python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* mxser.c -- MOXA Smartio/Industio family multiport serial driver.
*
* Copyright (C) 1999-2006 Moxa Technologies ([email protected]).
* Copyright (C) 2006-2008 Jiri Slaby <[email protected]>
*
* This code is loosely based on the 1.8 moxa driver which is based on
* Linux serial driver, written by Linus Torvalds, Theodore T'so and
* others.
*
* Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
* <[email protected]>. The original 1.8 code is available on
* www.moxa.com.
* - Fixed x86_64 cleanness
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
/*
* Semi-public control interfaces
*/
/*
* MOXA ioctls
*/
#define MOXA 0x400
#define MOXA_SET_OP_MODE (MOXA + 66)
#define MOXA_GET_OP_MODE (MOXA + 67)
#define RS232_MODE 0
#define RS485_2WIRE_MODE 1
#define RS422_MODE 2
#define RS485_4WIRE_MODE 3
#define OP_MODE_MASK 3
/* --------------------------------------------------- */
/*
* Follow just what Moxa Must chip defines.
*
* When LCR register (offset 0x03) is written the following value, the Must chip
* will enter enhanced mode. And a write to EFR (offset 0x02) bit 6,7 will
* change bank.
*/
#define MOXA_MUST_ENTER_ENHANCED 0xBF
/* when enhanced mode is enabled, access to general bank register */
#define MOXA_MUST_GDL_REGISTER 0x07
#define MOXA_MUST_GDL_MASK 0x7F
#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
/* enhanced register bank select and enhanced mode setting register */
/* This works only when LCR register equals to 0xBF */
#define MOXA_MUST_EFR_REGISTER 0x02
#define MOXA_MUST_EFR_EFRB_ENABLE 0x10 /* enhanced mode enable */
/* enhanced register bank set 0, 1, 2 */
#define MOXA_MUST_EFR_BANK0 0x00
#define MOXA_MUST_EFR_BANK1 0x40
#define MOXA_MUST_EFR_BANK2 0x80
#define MOXA_MUST_EFR_BANK3 0xC0
#define MOXA_MUST_EFR_BANK_MASK 0xC0
/* set XON1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON1_REGISTER 0x04
/* set XON2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON2_REGISTER 0x05
/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF1_REGISTER 0x06
/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF2_REGISTER 0x07
#define MOXA_MUST_RBRTL_REGISTER 0x04
#define MOXA_MUST_RBRTH_REGISTER 0x05
#define MOXA_MUST_RBRTI_REGISTER 0x06
#define MOXA_MUST_THRTL_REGISTER 0x07
#define MOXA_MUST_ENUM_REGISTER 0x04
#define MOXA_MUST_HWID_REGISTER 0x05
#define MOXA_MUST_ECR_REGISTER 0x06
#define MOXA_MUST_CSR_REGISTER 0x07
#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20 /* good data mode enable */
#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10 /* only good data put into RxFIFO */
#define MOXA_MUST_IER_ECTSI 0x80 /* enable CTS interrupt */
#define MOXA_MUST_IER_ERTSI 0x40 /* enable RTS interrupt */
#define MOXA_MUST_IER_XINT 0x20 /* enable Xon/Xoff interrupt */
#define MOXA_MUST_IER_EGDAI 0x10 /* enable GDA interrupt */
#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
/* GDA interrupt pending */
#define MOXA_MUST_IIR_GDA 0x1C
#define MOXA_MUST_IIR_RDA 0x04
#define MOXA_MUST_IIR_RTO 0x0C
#define MOXA_MUST_IIR_LSR 0x06
/* received Xon/Xoff or specical interrupt pending */
#define MOXA_MUST_IIR_XSC 0x10
/* RTS/CTS change state interrupt pending */
#define MOXA_MUST_IIR_RTSCTS 0x20
#define MOXA_MUST_IIR_MASK 0x3E
#define MOXA_MUST_MCR_XON_FLAG 0x40
#define MOXA_MUST_MCR_XON_ANY 0x80
#define MOXA_MUST_MCR_TX_XON 0x08
#define MOXA_MUST_EFR_SF_MASK 0x0F /* software flow control on chip mask value */
#define MOXA_MUST_EFR_SF_TX1 0x08 /* send Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_TX2 0x04 /* send Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_TX12 0x0C /* send Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_TX_NO 0x00 /* don't send Xon/Xoff */
#define MOXA_MUST_EFR_SF_TX_MASK 0x0C /* Tx software flow control mask */
#define MOXA_MUST_EFR_SF_RX_NO 0x00 /* don't receive Xon/Xoff */
#define MOXA_MUST_EFR_SF_RX1 0x02 /* receive Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_RX2 0x01 /* receive Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_RX12 0x03 /* receive Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_RX_MASK 0x03 /* Rx software flow control mask */
#define MXSERMAJOR 174
#define MXSER_BOARDS 4 /* Max. boards */
#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
#define MXSER_ISR_PASS_LIMIT 100
#define WAKEUP_CHARS 256
#define MXSER_BAUD_BASE 921600
#define MXSER_CUSTOM_DIVISOR (MXSER_BAUD_BASE * 16)
#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
#define PCI_DEVICE_ID_MOXA_CP102 0x1020
#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
#define PCI_DEVICE_ID_MOXA_CP102UF 0x1023
#define PCI_DEVICE_ID_MOXA_C104 0x1040
#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
#define PCI_DEVICE_ID_MOXA_POS104UL 0x1044
#define PCI_DEVICE_ID_MOXA_CB108 0x1080
#define PCI_DEVICE_ID_MOXA_CP112UL 0x1120
#define PCI_DEVICE_ID_MOXA_CT114 0x1140
#define PCI_DEVICE_ID_MOXA_CP114 0x1141
#define PCI_DEVICE_ID_MOXA_CB114 0x1142
#define PCI_DEVICE_ID_MOXA_CP114UL 0x1143
#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
#define PCI_DEVICE_ID_MOXA_CP132 0x1320
#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
#define PCI_DEVICE_ID_MOXA_CB134I 0x1341
#define PCI_DEVICE_ID_MOXA_CP138U 0x1380
#define PCI_DEVICE_ID_MOXA_C168 0x1680
#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define MXSER_NPORTS(ddata) ((ddata) & 0xffU)
#define MXSER_HIGHBAUD 0x0100
enum mxser_must_hwid {
MOXA_OTHER_UART = 0x00,
MOXA_MUST_MU150_HWID = 0x01,
MOXA_MUST_MU860_HWID = 0x02,
};
static const struct {
u8 type;
u8 fifo_size;
u8 rx_high_water;
u8 rx_low_water;
speed_t max_baud;
} Gpci_uart_info[] = {
{ MOXA_OTHER_UART, 16, 14, 1, 921600 },
{ MOXA_MUST_MU150_HWID, 64, 48, 16, 230400 },
{ MOXA_MUST_MU860_HWID, 128, 96, 32, 921600 }
};
#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
/* driver_data correspond to the lines in the structure above
see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
{ PCI_DEVICE_DATA(MOXA, C168, 8) },
{ PCI_DEVICE_DATA(MOXA, C104, 4) },
{ PCI_DEVICE_DATA(MOXA, CP132, 2) },
{ PCI_DEVICE_DATA(MOXA, CP114, 4) },
{ PCI_DEVICE_DATA(MOXA, CT114, 4) },
{ PCI_DEVICE_DATA(MOXA, CP102, 2 | MXSER_HIGHBAUD) },
{ PCI_DEVICE_DATA(MOXA, CP104U, 4) },
{ PCI_DEVICE_DATA(MOXA, CP168U, 8) },
{ PCI_DEVICE_DATA(MOXA, CP132U, 2) },
{ PCI_DEVICE_DATA(MOXA, CP134U, 4) },
{ PCI_DEVICE_DATA(MOXA, CP104JU, 4) },
{ PCI_DEVICE_DATA(MOXA, RC7000, 8) }, /* RC7000 */
{ PCI_DEVICE_DATA(MOXA, CP118U, 8) },
{ PCI_DEVICE_DATA(MOXA, CP102UL, 2) },
{ PCI_DEVICE_DATA(MOXA, CP102U, 2) },
{ PCI_DEVICE_DATA(MOXA, CP118EL, 8) },
{ PCI_DEVICE_DATA(MOXA, CP168EL, 8) },
{ PCI_DEVICE_DATA(MOXA, CP104EL, 4) },
{ PCI_DEVICE_DATA(MOXA, CB108, 8) },
{ PCI_DEVICE_DATA(MOXA, CB114, 4) },
{ PCI_DEVICE_DATA(MOXA, CB134I, 4) },
{ PCI_DEVICE_DATA(MOXA, CP138U, 8) },
{ PCI_DEVICE_DATA(MOXA, POS104UL, 4) },
{ PCI_DEVICE_DATA(MOXA, CP114UL, 4) },
{ PCI_DEVICE_DATA(MOXA, CP102UF, 2) },
{ PCI_DEVICE_DATA(MOXA, CP112UL, 2) },
{ }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
static int ttymajor = MXSERMAJOR;
/* Variables for insmod */
MODULE_AUTHOR("Casper Yang");
MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
module_param(ttymajor, int, 0);
MODULE_LICENSE("GPL");
struct mxser_board;
struct mxser_port {
struct tty_port port;
struct mxser_board *board;
unsigned long ioaddr;
unsigned long opmode_ioaddr;
u8 rx_high_water;
u8 rx_low_water;
int type; /* UART type */
unsigned char x_char; /* xon/xoff character */
u8 IER; /* Interrupt Enable Register */
u8 MCR; /* Modem control register */
u8 FCR; /* FIFO control register */
struct async_icount icount; /* kernel counters for 4 input interrupts */
unsigned int timeout;
u8 read_status_mask;
u8 ignore_status_mask;
u8 xmit_fifo_size;
spinlock_t slock;
};
struct mxser_board {
unsigned int idx;
unsigned short nports;
int irq;
unsigned long vector;
enum mxser_must_hwid must_hwid;
speed_t max_baud;
struct mxser_port ports[];
};
static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
static struct tty_driver *mxvar_sdriver;
static u8 __mxser_must_set_EFR(unsigned long baseio, u8 clear, u8 set,
bool restore_LCR)
{
u8 oldlcr, efr;
oldlcr = inb(baseio + UART_LCR);
outb(MOXA_MUST_ENTER_ENHANCED, baseio + UART_LCR);
efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
efr &= ~clear;
efr |= set;
outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
if (restore_LCR)
outb(oldlcr, baseio + UART_LCR);
return oldlcr;
}
static u8 mxser_must_select_bank(unsigned long baseio, u8 bank)
{
return __mxser_must_set_EFR(baseio, MOXA_MUST_EFR_BANK_MASK, bank,
false);
}
static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
{
u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK0);
outb(value, baseio + MOXA_MUST_XON1_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value)
{
u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK0);
outb(value, baseio + MOXA_MUST_XOFF1_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
static void mxser_set_must_fifo_value(struct mxser_port *info)
{
u8 oldlcr = mxser_must_select_bank(info->ioaddr, MOXA_MUST_EFR_BANK1);
outb(info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
outb(info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
outb(info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
outb(oldlcr, info->ioaddr + UART_LCR);
}
static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
{
u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK2);
outb(value, baseio + MOXA_MUST_ENUM_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
static u8 mxser_get_must_hardware_id(unsigned long baseio)
{
u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK2);
u8 id = inb(baseio + MOXA_MUST_HWID_REGISTER);
outb(oldlcr, baseio + UART_LCR);
return id;
}
static void mxser_must_set_EFR(unsigned long baseio, u8 clear, u8 set)
{
__mxser_must_set_EFR(baseio, clear, set, true);
}
static void mxser_must_set_enhance_mode(unsigned long baseio, bool enable)
{
mxser_must_set_EFR(baseio,
enable ? 0 : MOXA_MUST_EFR_EFRB_ENABLE,
enable ? MOXA_MUST_EFR_EFRB_ENABLE : 0);
}
static void mxser_must_no_sw_flow_control(unsigned long baseio)
{
mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_MASK, 0);
}
static void mxser_must_set_tx_sw_flow_control(unsigned long baseio, bool enable)
{
mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_TX_MASK,
enable ? MOXA_MUST_EFR_SF_TX1 : 0);
}
static void mxser_must_set_rx_sw_flow_control(unsigned long baseio, bool enable)
{
mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_RX_MASK,
enable ? MOXA_MUST_EFR_SF_RX1 : 0);
}
static enum mxser_must_hwid mxser_must_get_hwid(unsigned long io)
{
u8 oldmcr, hwid;
int i;
outb(0, io + UART_LCR);
mxser_must_set_enhance_mode(io, false);
oldmcr = inb(io + UART_MCR);
outb(0, io + UART_MCR);
mxser_set_must_xon1_value(io, 0x11);
if (inb(io + UART_MCR) != 0) {
outb(oldmcr, io + UART_MCR);
return MOXA_OTHER_UART;
}
hwid = mxser_get_must_hardware_id(io);
for (i = 1; i < UART_INFO_NUM; i++) /* 0 = OTHER_UART */
if (hwid == Gpci_uart_info[i].type)
return hwid;
return MOXA_OTHER_UART;
}
static bool mxser_16550A_or_MUST(struct mxser_port *info)
{
return info->type == PORT_16550A || info->board->must_hwid;
}
static void mxser_process_txrx_fifo(struct mxser_port *info)
{
unsigned int i;
if (info->type == PORT_16450 || info->type == PORT_8250) {
info->rx_high_water = 1;
info->rx_low_water = 1;
info->xmit_fifo_size = 1;
return;
}
for (i = 0; i < UART_INFO_NUM; i++)
if (info->board->must_hwid == Gpci_uart_info[i].type) {
info->rx_low_water = Gpci_uart_info[i].rx_low_water;
info->rx_high_water = Gpci_uart_info[i].rx_high_water;
info->xmit_fifo_size = Gpci_uart_info[i].fifo_size;
break;
}
}
static void __mxser_start_tx(struct mxser_port *info)
{
outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
outb(info->IER, info->ioaddr + UART_IER);
}
static void mxser_start_tx(struct mxser_port *info)
{
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
__mxser_start_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
static void __mxser_stop_tx(struct mxser_port *info)
{
info->IER &= ~UART_IER_THRI;
outb(info->IER, info->ioaddr + UART_IER);
}
static bool mxser_carrier_raised(struct tty_port *port)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
return inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD;
}
static void mxser_dtr_rts(struct tty_port *port, bool active)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
unsigned long flags;
u8 mcr;
spin_lock_irqsave(&mp->slock, flags);
mcr = inb(mp->ioaddr + UART_MCR);
if (active)
mcr |= UART_MCR_DTR | UART_MCR_RTS;
else
mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
outb(mcr, mp->ioaddr + UART_MCR);
spin_unlock_irqrestore(&mp->slock, flags);
}
static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
{
struct mxser_port *info = tty->driver_data;
unsigned int quot = 0, baud;
unsigned char cval;
u64 timeout;
if (newspd > info->board->max_baud)
return -1;
if (newspd == 134) {
quot = 2 * MXSER_BAUD_BASE / 269;
tty_encode_baud_rate(tty, 134, 134);
} else if (newspd) {
quot = MXSER_BAUD_BASE / newspd;
if (quot == 0)
quot = 1;
baud = MXSER_BAUD_BASE / quot;
tty_encode_baud_rate(tty, baud, baud);
} else {
quot = 0;
}
/*
* worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the
* u64 domain
*/
timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
do_div(timeout, MXSER_BAUD_BASE);
info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
if (quot) {
info->MCR |= UART_MCR_DTR;
outb(info->MCR, info->ioaddr + UART_MCR);
} else {
info->MCR &= ~UART_MCR_DTR;
outb(info->MCR, info->ioaddr + UART_MCR);
return 0;
}
cval = inb(info->ioaddr + UART_LCR);
outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
if (C_BAUD(tty) == BOTHER) {
quot = MXSER_BAUD_BASE % newspd;
quot *= 8;
if (quot % newspd > newspd / 2) {
quot /= newspd;
quot++;
} else
quot /= newspd;
mxser_set_must_enum_value(info->ioaddr, quot);
} else {
mxser_set_must_enum_value(info->ioaddr, 0);
}
return 0;
}
static void mxser_handle_cts(struct tty_struct *tty, struct mxser_port *info,
u8 msr)
{
bool cts = msr & UART_MSR_CTS;
if (tty->hw_stopped) {
if (cts) {
tty->hw_stopped = false;
if (!mxser_16550A_or_MUST(info))
__mxser_start_tx(info);
tty_wakeup(tty);
}
return;
} else if (cts)
return;
tty->hw_stopped = true;
if (!mxser_16550A_or_MUST(info))
__mxser_stop_tx(info);
}
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
static void mxser_change_speed(struct tty_struct *tty,
const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval;
cflag = tty->termios.c_cflag;
if (mxser_set_baud(tty, tty_get_baud_rate(tty))) {
/* Use previous rate on a failure */
if (old_termios) {
speed_t baud = tty_termios_baud_rate(old_termios);
tty_encode_baud_rate(tty, baud, baud);
}
}
/* byte size and parity */
cval = UART_LCR_WLEN(tty_get_char_size(tty->termios.c_cflag));
if (cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
info->FCR = 0;
if (info->board->must_hwid) {
info->FCR |= UART_FCR_ENABLE_FIFO |
MOXA_MUST_FCR_GDA_MODE_ENABLE;
mxser_set_must_fifo_value(info);
} else if (info->type != PORT_8250 && info->type != PORT_16450) {
info->FCR |= UART_FCR_ENABLE_FIFO;
switch (info->rx_high_water) {
case 1:
info->FCR |= UART_FCR_TRIGGER_1;
break;
case 4:
info->FCR |= UART_FCR_TRIGGER_4;
break;
case 8:
info->FCR |= UART_FCR_TRIGGER_8;
break;
default:
info->FCR |= UART_FCR_TRIGGER_14;
break;
}
}
/* CTS flow control flag and modem status interrupts */
info->IER &= ~UART_IER_MSI;
info->MCR &= ~UART_MCR_AFE;
tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
if (cflag & CRTSCTS) {
info->IER |= UART_IER_MSI;
if (mxser_16550A_or_MUST(info)) {
info->MCR |= UART_MCR_AFE;
} else {
mxser_handle_cts(tty, info,
inb(info->ioaddr + UART_MSR));
}
}
outb(info->MCR, info->ioaddr + UART_MCR);
tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
if (~cflag & CLOCAL)
info->IER |= UART_IER_MSI;
outb(info->IER, info->ioaddr + UART_IER);
/*
* Set up parity check flag
*/
info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (I_INPCK(tty))
info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (I_BRKINT(tty) || I_PARMRK(tty))
info->read_status_mask |= UART_LSR_BI;
info->ignore_status_mask = 0;
if (I_IGNBRK(tty)) {
info->ignore_status_mask |= UART_LSR_BI;
info->read_status_mask |= UART_LSR_BI;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (I_IGNPAR(tty)) {
info->ignore_status_mask |=
UART_LSR_OE |
UART_LSR_PE |
UART_LSR_FE;
info->read_status_mask |=
UART_LSR_OE |
UART_LSR_PE |
UART_LSR_FE;
}
}
if (info->board->must_hwid) {
mxser_set_must_xon1_value(info->ioaddr, START_CHAR(tty));
mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(tty));
mxser_must_set_rx_sw_flow_control(info->ioaddr, I_IXON(tty));
mxser_must_set_tx_sw_flow_control(info->ioaddr, I_IXOFF(tty));
}
outb(info->FCR, info->ioaddr + UART_FCR);
outb(cval, info->ioaddr + UART_LCR);
}
static u8 mxser_check_modem_status(struct tty_struct *tty,
struct mxser_port *port)
{
u8 msr = inb(port->ioaddr + UART_MSR);
if (!(msr & UART_MSR_ANY_DELTA))
return msr;
/* update input line counters */
if (msr & UART_MSR_TERI)
port->icount.rng++;
if (msr & UART_MSR_DDSR)
port->icount.dsr++;
if (msr & UART_MSR_DDCD)
port->icount.dcd++;
if (msr & UART_MSR_DCTS)
port->icount.cts++;
wake_up_interruptible(&port->port.delta_msr_wait);
if (tty_port_check_carrier(&port->port) && (msr & UART_MSR_DDCD)) {
if (msr & UART_MSR_DCD)
wake_up_interruptible(&port->port.open_wait);
}
if (tty_port_cts_enabled(&port->port))
mxser_handle_cts(tty, port, msr);
return msr;
}
static void mxser_disable_and_clear_FIFO(struct mxser_port *info)
{
u8 fcr = UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
if (info->board->must_hwid)
fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
outb(fcr, info->ioaddr + UART_FCR);
}
static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
{
struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long flags;
int ret;
ret = tty_port_alloc_xmit_buf(port);
if (ret < 0)
return ret;
spin_lock_irqsave(&info->slock, flags);
if (!info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
spin_unlock_irqrestore(&info->slock, flags);
ret = 0;
goto err_free_xmit;
}
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in mxser_change_speed())
*/
mxser_disable_and_clear_FIFO(info);
/*
* At this point there's no way the LSR could still be 0xFF;
* if it is, then bail out, because there's likely no UART
* here.
*/
if (inb(info->ioaddr + UART_LSR) == 0xff) {
spin_unlock_irqrestore(&info->slock, flags);
if (capable(CAP_SYS_ADMIN)) {
set_bit(TTY_IO_ERROR, &tty->flags);
return 0;
}
ret = -ENODEV;
goto err_free_xmit;
}
/*
* Clear the interrupt registers.
*/
(void) inb(info->ioaddr + UART_LSR);
(void) inb(info->ioaddr + UART_RX);
(void) inb(info->ioaddr + UART_IIR);
(void) inb(info->ioaddr + UART_MSR);
/*
* Now, initialize the UART
*/
outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
info->MCR = UART_MCR_DTR | UART_MCR_RTS;
outb(info->MCR, info->ioaddr + UART_MCR);
/*
* Finally, enable interrupts
*/
info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
if (info->board->must_hwid)
info->IER |= MOXA_MUST_IER_EGDAI;
outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
/*
* And clear the interrupt registers again for luck.
*/
(void) inb(info->ioaddr + UART_LSR);
(void) inb(info->ioaddr + UART_RX);
(void) inb(info->ioaddr + UART_IIR);
(void) inb(info->ioaddr + UART_MSR);
clear_bit(TTY_IO_ERROR, &tty->flags);
kfifo_reset(&port->xmit_fifo);
/*
* and set the speed of the serial port
*/
mxser_change_speed(tty, NULL);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
err_free_xmit:
tty_port_free_xmit_buf(port);
return ret;
}
/*
* To stop accepting input, we disable the receive line status interrupts, and
* tell the interrupt driver to stop checking the data ready bit in the line
* status register.
*/
static void mxser_stop_rx(struct mxser_port *info)
{
info->IER &= ~UART_IER_RLSI;
if (info->board->must_hwid)
info->IER &= ~MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
}
/*
* This routine will shutdown a serial port
*/
static void mxser_shutdown_port(struct tty_port *port)
{
struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
mxser_stop_rx(info);
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
*/
wake_up_interruptible(&info->port.delta_msr_wait);
info->IER = 0;
outb(0x00, info->ioaddr + UART_IER);
/* clear Rx/Tx FIFO's */
mxser_disable_and_clear_FIFO(info);
/* read data port to reset things */
(void) inb(info->ioaddr + UART_RX);
if (info->board->must_hwid)
mxser_must_no_sw_flow_control(info->ioaddr);
spin_unlock_irqrestore(&info->slock, flags);
/* make sure ISR is not running while we free the buffer */
synchronize_irq(info->board->irq);
tty_port_free_xmit_buf(port);
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int mxser_open(struct tty_struct *tty, struct file *filp)
{
struct tty_port *tport = tty->port;
struct mxser_port *port = container_of(tport, struct mxser_port, port);
tty->driver_data = port;
return tty_port_open(tport, tty, filp);
}
static void mxser_flush_buffer(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
kfifo_reset(&info->port.xmit_fifo);
outb(info->FCR | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
info->ioaddr + UART_FCR);
spin_unlock_irqrestore(&info->slock, flags);
tty_wakeup(tty);
}
static void mxser_close(struct tty_struct *tty, struct file *filp)
{
tty_port_close(tty->port, tty, filp);
}
static ssize_t mxser_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
int written;
bool is_empty;
spin_lock_irqsave(&info->slock, flags);
written = kfifo_in(&info->port.xmit_fifo, buf, count);
is_empty = kfifo_is_empty(&info->port.xmit_fifo);
spin_unlock_irqrestore(&info->slock, flags);
if (!is_empty && !tty->flow.stopped)
if (!tty->hw_stopped || mxser_16550A_or_MUST(info))
mxser_start_tx(info);
return written;
}
static int mxser_put_char(struct tty_struct *tty, u8 ch)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
int ret;
spin_lock_irqsave(&info->slock, flags);
ret = kfifo_put(&info->port.xmit_fifo, ch);
spin_unlock_irqrestore(&info->slock, flags);
return ret;
}
static void mxser_flush_chars(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
if (kfifo_is_empty(&info->port.xmit_fifo) || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(info)))
return;
mxser_start_tx(info);
}
static unsigned int mxser_write_room(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
return kfifo_avail(&info->port.xmit_fifo);
}
static unsigned int mxser_chars_in_buffer(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
return kfifo_len(&info->port.xmit_fifo);
}
/*
* ------------------------------------------------------------
* friends of mxser_ioctl()
* ------------------------------------------------------------
*/
static int mxser_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct mxser_port *info = tty->driver_data;
struct tty_port *port = &info->port;
unsigned int closing_wait, close_delay;
mutex_lock(&port->mutex);
close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
closing_wait = info->port.closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = jiffies_to_msecs(closing_wait) / 10;
ss->type = info->type;
ss->line = tty->index;
ss->port = info->ioaddr;
ss->irq = info->board->irq;
ss->flags = info->port.flags;
ss->baud_base = MXSER_BAUD_BASE;
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
ss->custom_divisor = MXSER_CUSTOM_DIVISOR,
mutex_unlock(&port->mutex);
return 0;
}
static int mxser_set_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct mxser_port *info = tty->driver_data;
struct tty_port *port = &info->port;
speed_t baud;
unsigned long sl_flags;
unsigned int old_speed, close_delay, closing_wait;
int retval = 0;
if (tty_io_error(tty))
return -EIO;
mutex_lock(&port->mutex);
if (ss->irq != info->board->irq ||
ss->port != info->ioaddr) {
mutex_unlock(&port->mutex);
return -EINVAL;
}
old_speed = port->flags & ASYNC_SPD_MASK;
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = msecs_to_jiffies(closing_wait * 10);
if (!capable(CAP_SYS_ADMIN)) {
if ((ss->baud_base != MXSER_BAUD_BASE) ||
(close_delay != port->close_delay) ||
(closing_wait != port->closing_wait) ||
((ss->flags & ~ASYNC_USR_MASK) != (port->flags & ~ASYNC_USR_MASK))) {
mutex_unlock(&port->mutex);
return -EPERM;
}
port->flags = (port->flags & ~ASYNC_USR_MASK) |
(ss->flags & ASYNC_USR_MASK);
} else {
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
port->flags = ((port->flags & ~ASYNC_FLAGS) |
(ss->flags & ASYNC_FLAGS));
port->close_delay = close_delay;
port->closing_wait = closing_wait;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
(ss->baud_base != MXSER_BAUD_BASE ||
ss->custom_divisor !=
MXSER_CUSTOM_DIVISOR)) {
if (ss->custom_divisor == 0) {
mutex_unlock(&port->mutex);
return -EINVAL;
}
baud = ss->baud_base / ss->custom_divisor;
tty_encode_baud_rate(tty, baud, baud);
}
info->type = ss->type;
mxser_process_txrx_fifo(info);
}
if (tty_port_initialized(port)) {
if (old_speed != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
mxser_change_speed(tty, NULL);
spin_unlock_irqrestore(&info->slock, sl_flags);
}
} else {
retval = mxser_activate(port, tty);
if (retval == 0)
tty_port_set_initialized(port, true);
}
mutex_unlock(&port->mutex);
return retval;
}
/*
* mxser_get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int mxser_get_lsr_info(struct mxser_port *info,
unsigned int __user *value)
{
unsigned char status;
unsigned int result;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
status = inb(info->ioaddr + UART_LSR);
spin_unlock_irqrestore(&info->slock, flags);
result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
return put_user(result, value);
}
static int mxser_tiocmget(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned char control;
unsigned long flags;
u8 msr;
if (tty_io_error(tty))
return -EIO;
spin_lock_irqsave(&info->slock, flags);
control = info->MCR;
msr = mxser_check_modem_status(tty, info);
spin_unlock_irqrestore(&info->slock, flags);
return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) |
((msr & UART_MSR_RI) ? TIOCM_RNG : 0) |
((msr & UART_MSR_DSR) ? TIOCM_DSR : 0) |
((msr & UART_MSR_CTS) ? TIOCM_CTS : 0);
}
static int mxser_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (tty_io_error(tty))
return -EIO;
spin_lock_irqsave(&info->slock, flags);
if (set & TIOCM_RTS)
info->MCR |= UART_MCR_RTS;
if (set & TIOCM_DTR)
info->MCR |= UART_MCR_DTR;
if (clear & TIOCM_RTS)
info->MCR &= ~UART_MCR_RTS;
if (clear & TIOCM_DTR)
info->MCR &= ~UART_MCR_DTR;
outb(info->MCR, info->ioaddr + UART_MCR);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
}
static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
struct async_icount *cprev)
{
struct async_icount cnow;
unsigned long flags;
int ret;
spin_lock_irqsave(&info->slock, flags);
cnow = info->icount; /* atomic copy */
spin_unlock_irqrestore(&info->slock, flags);
ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
/* We should likely switch to TIOCGRS485/TIOCSRS485. */
static int mxser_ioctl_op_mode(struct mxser_port *port, int index, bool set,
int __user *u_opmode)
{
int opmode, p = index % 4;
int shiftbit = p * 2;
u8 val;
if (port->board->must_hwid != MOXA_MUST_MU860_HWID)
return -EFAULT;
if (set) {
if (get_user(opmode, u_opmode))
return -EFAULT;
if (opmode & ~OP_MODE_MASK)
return -EINVAL;
spin_lock_irq(&port->slock);
val = inb(port->opmode_ioaddr);
val &= ~(OP_MODE_MASK << shiftbit);
val |= (opmode << shiftbit);
outb(val, port->opmode_ioaddr);
spin_unlock_irq(&port->slock);
return 0;
}
spin_lock_irq(&port->slock);
opmode = inb(port->opmode_ioaddr) >> shiftbit;
spin_unlock_irq(&port->slock);
return put_user(opmode & OP_MODE_MASK, u_opmode);
}
static int mxser_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct mxser_port *info = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
void __user *argp = (void __user *)arg;
if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE)
return mxser_ioctl_op_mode(info, tty->index,
cmd == MOXA_SET_OP_MODE, argp);
if (cmd != TIOCMIWAIT && tty_io_error(tty))
return -EIO;
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
return mxser_get_lsr_info(info, argp);
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
spin_lock_irqsave(&info->slock, flags);
cnow = info->icount; /* note the counters on entry */
spin_unlock_irqrestore(&info->slock, flags);
return wait_event_interruptible(info->port.delta_msr_wait,
mxser_cflags_changed(info, arg, &cnow));
default:
return -ENOIOCTLCMD;
}
return 0;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int mxser_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct mxser_port *info = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->slock, flags);
icount->frame = cnow.frame;
icount->brk = cnow.brk;
icount->overrun = cnow.overrun;
icount->buf_overrun = cnow.buf_overrun;
icount->parity = cnow.parity;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
return 0;
}
/*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
*/
static void mxser_throttle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
if (I_IXOFF(tty)) {
if (info->board->must_hwid) {
info->IER &= ~MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
} else {
info->x_char = STOP_CHAR(tty);
outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
outb(info->IER, info->ioaddr + UART_IER);
}
}
if (C_CRTSCTS(tty)) {
info->MCR &= ~UART_MCR_RTS;
outb(info->MCR, info->ioaddr + UART_MCR);
}
}
static void mxser_unthrottle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
/* startrx */
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else {
if (info->board->must_hwid) {
info->IER |= MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
} else {
info->x_char = START_CHAR(tty);
outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
outb(info->IER, info->ioaddr + UART_IER);
}
}
}
if (C_CRTSCTS(tty)) {
info->MCR |= UART_MCR_RTS;
outb(info->MCR, info->ioaddr + UART_MCR);
}
}
/*
* mxser_stop() and mxser_start()
*
* This routines are called before setting or resetting tty->flow.stopped.
* They enable or disable transmitter interrupts, as necessary.
*/
static void mxser_stop(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
if (info->IER & UART_IER_THRI)
__mxser_stop_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
static void mxser_start(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
if (!kfifo_is_empty(&info->port.xmit_fifo))
__mxser_start_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
static void mxser_set_termios(struct tty_struct *tty,
const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
mxser_change_speed(tty, old_termios);
spin_unlock_irqrestore(&info->slock, flags);
if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = false;
mxser_start(tty);
}
/* Handle sw stopped */
if ((old_termios->c_iflag & IXON) && !I_IXON(tty)) {
tty->flow.stopped = 0;
if (info->board->must_hwid) {
spin_lock_irqsave(&info->slock, flags);
mxser_must_set_rx_sw_flow_control(info->ioaddr, false);
spin_unlock_irqrestore(&info->slock, flags);
}
mxser_start(tty);
}
}
static bool mxser_tx_empty(struct mxser_port *info)
{
unsigned long flags;
u8 lsr;
spin_lock_irqsave(&info->slock, flags);
lsr = inb(info->ioaddr + UART_LSR);
spin_unlock_irqrestore(&info->slock, flags);
return !(lsr & UART_LSR_TEMT);
}
/*
* mxser_wait_until_sent() --- wait until the transmitter is empty
*/
static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct mxser_port *info = tty->driver_data;
unsigned long expire, char_time;
if (info->type == PORT_UNKNOWN)
return;
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
if (timeout && timeout < char_time)
char_time = timeout;
char_time = jiffies_to_msecs(char_time);
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than info->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*info->timeout.
*/
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
expire = jiffies + timeout;
while (mxser_tx_empty(info)) {
msleep_interruptible(char_time);
if (signal_pending(current))
break;
if (time_after(jiffies, expire))
break;
}
}
/*
* This routine is called by tty_hangup() when a hangup is signaled.
*/
static void mxser_hangup(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
mxser_flush_buffer(tty);
tty_port_hangup(&info->port);
}
/*
* mxser_rs_break() --- routine which turns the break handling on or off
*/
static int mxser_rs_break(struct tty_struct *tty, int break_state)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
u8 lcr;
spin_lock_irqsave(&info->slock, flags);
lcr = inb(info->ioaddr + UART_LCR);
if (break_state == -1)
lcr |= UART_LCR_SBC;
else
lcr &= ~UART_LCR_SBC;
outb(lcr, info->ioaddr + UART_LCR);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
}
static bool mxser_receive_chars_new(struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
u8 gdl;
if (hwid == MOXA_OTHER_UART)
return false;
if (status & (UART_LSR_BRK_ERROR_BITS | MOXA_MUST_LSR_RERR))
return false;
gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
if (hwid == MOXA_MUST_MU150_HWID)
gdl &= MOXA_MUST_GDL_MASK;
while (gdl--) {
u8 ch = inb(port->ioaddr + UART_RX);
if (!tty_insert_flip_char(&port->port, ch, 0))
port->icount.buf_overrun++;
}
return true;
}
static u8 mxser_receive_chars_old(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
int ignored = 0;
int max = 256;
u8 ch;
do {
if (max-- < 0)
break;
ch = inb(port->ioaddr + UART_RX);
if (hwid && (status & UART_LSR_OE))
outb(port->FCR | UART_FCR_CLEAR_RCVR,
port->ioaddr + UART_FCR);
status &= port->read_status_mask;
if (status & port->ignore_status_mask) {
if (++ignored > 100)
break;
} else {
char flag = 0;
if (status & UART_LSR_BRK_ERROR_BITS) {
if (status & UART_LSR_BI) {
flag = TTY_BREAK;
port->icount.brk++;
if (port->port.flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & UART_LSR_PE) {
flag = TTY_PARITY;
port->icount.parity++;
} else if (status & UART_LSR_FE) {
flag = TTY_FRAME;
port->icount.frame++;
} else if (status & UART_LSR_OE) {
flag = TTY_OVERRUN;
port->icount.overrun++;
}
}
if (!tty_insert_flip_char(&port->port, ch, flag)) {
port->icount.buf_overrun++;
break;
}
}
if (hwid)
break;
status = inb(port->ioaddr + UART_LSR);
} while (status & UART_LSR_DR);
return status;
}
static u8 mxser_receive_chars(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
if (!mxser_receive_chars_new(port, status))
status = mxser_receive_chars_old(tty, port, status);
tty_flip_buffer_push(&port->port);
return status;
}
static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port)
{
int count;
if (port->x_char) {
outb(port->x_char, port->ioaddr + UART_TX);
port->x_char = 0;
port->icount.tx++;
return;
}
if (kfifo_is_empty(&port->port.xmit_fifo) || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(port))) {
__mxser_stop_tx(port);
return;
}
count = port->xmit_fifo_size;
do {
unsigned char c;
if (!kfifo_get(&port->port.xmit_fifo, &c))
break;
outb(c, port->ioaddr + UART_TX);
port->icount.tx++;
} while (--count > 0);
if (kfifo_len(&port->port.xmit_fifo) < WAKEUP_CHARS)
tty_wakeup(tty);
if (kfifo_is_empty(&port->port.xmit_fifo))
__mxser_stop_tx(port);
}
static bool mxser_port_isr(struct mxser_port *port)
{
struct tty_struct *tty;
u8 iir, status;
bool error = false;
iir = inb(port->ioaddr + UART_IIR);
if (iir & UART_IIR_NO_INT)
return true;
iir &= MOXA_MUST_IIR_MASK;
tty = tty_port_tty_get(&port->port);
if (!tty) {
status = inb(port->ioaddr + UART_LSR);
outb(port->FCR | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
port->ioaddr + UART_FCR);
inb(port->ioaddr + UART_MSR);
error = true;
goto put_tty;
}
status = inb(port->ioaddr + UART_LSR);
if (port->board->must_hwid) {
if (iir == MOXA_MUST_IIR_GDA ||
iir == MOXA_MUST_IIR_RDA ||
iir == MOXA_MUST_IIR_RTO ||
iir == MOXA_MUST_IIR_LSR)
status = mxser_receive_chars(tty, port, status);
} else {
status &= port->read_status_mask;
if (status & UART_LSR_DR)
status = mxser_receive_chars(tty, port, status);
}
mxser_check_modem_status(tty, port);
if (port->board->must_hwid) {
if (iir == 0x02 && (status & UART_LSR_THRE))
mxser_transmit_chars(tty, port);
} else {
if (status & UART_LSR_THRE)
mxser_transmit_chars(tty, port);
}
put_tty:
tty_kref_put(tty);
return error;
}
/*
* This is the serial driver's generic interrupt routine
*/
static irqreturn_t mxser_interrupt(int irq, void *dev_id)
{
struct mxser_board *brd = dev_id;
struct mxser_port *port;
unsigned int int_cnt, pass_counter = 0;
unsigned int i, max = brd->nports;
int handled = IRQ_NONE;
u8 irqbits, bits, mask = BIT(max) - 1;
while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
irqbits = inb(brd->vector) & mask;
if (irqbits == mask)
break;
handled = IRQ_HANDLED;
for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
if (irqbits == mask)
break;
if (bits & irqbits)
continue;
port = &brd->ports[i];
int_cnt = 0;
spin_lock(&port->slock);
do {
if (mxser_port_isr(port))
break;
} while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
spin_unlock(&port->slock);
}
}
return handled;
}
static const struct tty_operations mxser_ops = {
.open = mxser_open,
.close = mxser_close,
.write = mxser_write,
.put_char = mxser_put_char,
.flush_chars = mxser_flush_chars,
.write_room = mxser_write_room,
.chars_in_buffer = mxser_chars_in_buffer,
.flush_buffer = mxser_flush_buffer,
.ioctl = mxser_ioctl,
.throttle = mxser_throttle,
.unthrottle = mxser_unthrottle,
.set_termios = mxser_set_termios,
.stop = mxser_stop,
.start = mxser_start,
.hangup = mxser_hangup,
.break_ctl = mxser_rs_break,
.wait_until_sent = mxser_wait_until_sent,
.tiocmget = mxser_tiocmget,
.tiocmset = mxser_tiocmset,
.set_serial = mxser_set_serial_info,
.get_serial = mxser_get_serial_info,
.get_icount = mxser_get_icount,
};
static const struct tty_port_operations mxser_port_ops = {
.carrier_raised = mxser_carrier_raised,
.dtr_rts = mxser_dtr_rts,
.activate = mxser_activate,
.shutdown = mxser_shutdown_port,
};
/*
* The MOXA Smartio/Industio serial driver boot-time initialization code!
*/
static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
{
struct mxser_port *info;
unsigned int i;
bool is_mu860;
brd->must_hwid = mxser_must_get_hwid(brd->ports[0].ioaddr);
is_mu860 = brd->must_hwid == MOXA_MUST_MU860_HWID;
for (i = 0; i < UART_INFO_NUM; i++) {
if (Gpci_uart_info[i].type == brd->must_hwid) {
brd->max_baud = Gpci_uart_info[i].max_baud;
/* exception....CP-102 */
if (high_baud)
brd->max_baud = 921600;
break;
}
}
if (is_mu860) {
/* set to RS232 mode by default */
outb(0, brd->vector + 4);
outb(0, brd->vector + 0x0c);
}
for (i = 0; i < brd->nports; i++) {
info = &brd->ports[i];
if (is_mu860) {
if (i < 4)
info->opmode_ioaddr = brd->vector + 4;
else
info->opmode_ioaddr = brd->vector + 0x0c;
}
tty_port_init(&info->port);
info->port.ops = &mxser_port_ops;
info->board = brd;
/* Enhance mode enabled here */
if (brd->must_hwid != MOXA_OTHER_UART)
mxser_must_set_enhance_mode(info->ioaddr, true);
info->type = PORT_16550A;
mxser_process_txrx_fifo(info);
info->port.close_delay = 5 * HZ / 10;
info->port.closing_wait = 30 * HZ;
spin_lock_init(&info->slock);
/* before set INT ISR, disable all int */
outb(inb(info->ioaddr + UART_IER) & 0xf0,
info->ioaddr + UART_IER);
}
}
static int mxser_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mxser_board *brd;
unsigned int i, base;
unsigned long ioaddress;
unsigned short nports = MXSER_NPORTS(ent->driver_data);
struct device *tty_dev;
int retval = -EINVAL;
i = find_first_zero_bit(mxser_boards, MXSER_BOARDS);
if (i >= MXSER_BOARDS) {
dev_err(&pdev->dev, "too many boards found (maximum %d), board "
"not configured\n", MXSER_BOARDS);
goto err;
}
brd = devm_kzalloc(&pdev->dev, struct_size(brd, ports, nports),
GFP_KERNEL);
if (!brd)
goto err;
brd->idx = i;
__set_bit(brd->idx, mxser_boards);
base = i * MXSER_PORTS_PER_BOARD;
retval = pcim_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "PCI enable failed\n");
goto err_zero;
}
/* io address */
ioaddress = pci_resource_start(pdev, 2);
retval = pci_request_region(pdev, 2, "mxser(IO)");
if (retval)
goto err_zero;
brd->nports = nports;
for (i = 0; i < nports; i++)
brd->ports[i].ioaddr = ioaddress + 8 * i;
/* vector */
ioaddress = pci_resource_start(pdev, 3);
retval = pci_request_region(pdev, 3, "mxser(vector)");
if (retval)
goto err_zero;
brd->vector = ioaddress;
/* irq */
brd->irq = pdev->irq;
mxser_initbrd(brd, ent->driver_data & MXSER_HIGHBAUD);
retval = devm_request_irq(&pdev->dev, brd->irq, mxser_interrupt,
IRQF_SHARED, "mxser", brd);
if (retval) {
dev_err(&pdev->dev, "request irq failed");
goto err_relbrd;
}
for (i = 0; i < nports; i++) {
tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, base + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
for (; i > 0; i--)
tty_unregister_device(mxvar_sdriver,
base + i - 1);
goto err_relbrd;
}
}
pci_set_drvdata(pdev, brd);
return 0;
err_relbrd:
for (i = 0; i < nports; i++)
tty_port_destroy(&brd->ports[i].port);
err_zero:
__clear_bit(brd->idx, mxser_boards);
err:
return retval;
}
static void mxser_remove(struct pci_dev *pdev)
{
struct mxser_board *brd = pci_get_drvdata(pdev);
unsigned int i, base = brd->idx * MXSER_PORTS_PER_BOARD;
for (i = 0; i < brd->nports; i++) {
tty_unregister_device(mxvar_sdriver, base + i);
tty_port_destroy(&brd->ports[i].port);
}
__clear_bit(brd->idx, mxser_boards);
}
static struct pci_driver mxser_driver = {
.name = "mxser",
.id_table = mxser_pcibrds,
.probe = mxser_probe,
.remove = mxser_remove
};
static int __init mxser_module_init(void)
{
int retval;
mxvar_sdriver = tty_alloc_driver(MXSER_PORTS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(mxvar_sdriver))
return PTR_ERR(mxvar_sdriver);
/* Initialize the tty_driver structure */
mxvar_sdriver->name = "ttyMI";
mxvar_sdriver->major = ttymajor;
mxvar_sdriver->minor_start = 0;
mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
mxvar_sdriver->init_termios = tty_std_termios;
mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
tty_set_operations(mxvar_sdriver, &mxser_ops);
retval = tty_register_driver(mxvar_sdriver);
if (retval) {
printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
"tty driver !\n");
goto err_put;
}
retval = pci_register_driver(&mxser_driver);
if (retval) {
printk(KERN_ERR "mxser: can't register pci driver\n");
goto err_unr;
}
return 0;
err_unr:
tty_unregister_driver(mxvar_sdriver);
err_put:
tty_driver_kref_put(mxvar_sdriver);
return retval;
}
static void __exit mxser_module_exit(void)
{
pci_unregister_driver(&mxser_driver);
tty_unregister_driver(mxvar_sdriver);
tty_driver_kref_put(mxvar_sdriver);
}
module_init(mxser_module_init);
module_exit(mxser_module_exit);
| linux-master | drivers/tty/mxser.c |
// SPDX-License-Identifier: GPL-2.0
/* ePAPR hypervisor byte channel device driver
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*
* Author: Timur Tabi <[email protected]>
*
* This driver support three distinct interfaces, all of which are related to
* ePAPR hypervisor byte channels.
*
* 1) An early-console (udbg) driver. This provides early console output
* through a byte channel. The byte channel handle must be specified in a
* Kconfig option.
*
* 2) A normal console driver. Output is sent to the byte channel designated
* for stdout in the device tree. The console driver is for handling kernel
* printk calls.
*
* 3) A tty driver, which is used to handle user-space input and output. The
* byte channel used for the console is designated as the default tty.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <asm/epapr_hcalls.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/circ_buf.h>
#include <asm/udbg.h>
/* The size of the transmit circular buffer. This must be a power of two. */
#define BUF_SIZE 2048
/* Per-byte channel private data */
struct ehv_bc_data {
struct device *dev;
struct tty_port port;
uint32_t handle;
unsigned int rx_irq;
unsigned int tx_irq;
spinlock_t lock; /* lock for transmit buffer */
unsigned char buf[BUF_SIZE]; /* transmit circular buffer */
unsigned int head; /* circular buffer head */
unsigned int tail; /* circular buffer tail */
int tx_irq_enabled; /* true == TX interrupt is enabled */
};
/* Array of byte channel objects */
static struct ehv_bc_data *bcs;
/* Byte channel handle for stdout (and stdin), taken from device tree */
static unsigned int stdout_bc;
/* Virtual IRQ for the byte channel handle for stdin, taken from device tree */
static unsigned int stdout_irq;
/**************************** SUPPORT FUNCTIONS ****************************/
/*
* Enable the transmit interrupt
*
* Unlike a serial device, byte channels have no mechanism for disabling their
* own receive or transmit interrupts. To emulate that feature, we toggle
* the IRQ in the kernel.
*
* We cannot just blindly call enable_irq() or disable_irq(), because these
* calls are reference counted. This means that we cannot call enable_irq()
* if interrupts are already enabled. This can happen in two situations:
*
* 1. The tty layer makes two back-to-back calls to ehv_bc_tty_write()
* 2. A transmit interrupt occurs while executing ehv_bc_tx_dequeue()
*
* To work around this, we keep a flag to tell us if the IRQ is enabled or not.
*/
static void enable_tx_interrupt(struct ehv_bc_data *bc)
{
if (!bc->tx_irq_enabled) {
enable_irq(bc->tx_irq);
bc->tx_irq_enabled = 1;
}
}
static void disable_tx_interrupt(struct ehv_bc_data *bc)
{
if (bc->tx_irq_enabled) {
disable_irq_nosync(bc->tx_irq);
bc->tx_irq_enabled = 0;
}
}
/*
* find the byte channel handle to use for the console
*
* The byte channel to be used for the console is specified via a "stdout"
* property in the /chosen node.
*/
static int find_console_handle(void)
{
struct device_node *np = of_stdout;
const uint32_t *iprop;
/* We don't care what the aliased node is actually called. We only
* care if it's compatible with "epapr,hv-byte-channel", because that
* indicates that it's a byte channel node.
*/
if (!np || !of_device_is_compatible(np, "epapr,hv-byte-channel"))
return 0;
stdout_irq = irq_of_parse_and_map(np, 0);
if (!stdout_irq) {
pr_err("ehv-bc: no 'interrupts' property in %pOF node\n", np);
return 0;
}
/*
* The 'hv-handle' property contains the handle for this byte channel.
*/
iprop = of_get_property(np, "hv-handle", NULL);
if (!iprop) {
pr_err("ehv-bc: no 'hv-handle' property in %pOFn node\n",
np);
return 0;
}
stdout_bc = be32_to_cpu(*iprop);
return 1;
}
static unsigned int local_ev_byte_channel_send(unsigned int handle,
unsigned int *count,
const char *p)
{
char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
unsigned int c = *count;
if (c < sizeof(buffer)) {
memcpy(buffer, p, c);
memset(&buffer[c], 0, sizeof(buffer) - c);
p = buffer;
}
return ev_byte_channel_send(handle, count, p);
}
/*************************** EARLY CONSOLE DRIVER ***************************/
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
/*
* send a byte to a byte channel, wait if necessary
*
* This function sends a byte to a byte channel, and it waits and
* retries if the byte channel is full. It returns if the character
* has been sent, or if some error has occurred.
*
*/
static void byte_channel_spin_send(const char data)
{
int ret, count;
do {
count = 1;
ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&count, &data);
} while (ret == EV_EAGAIN);
}
/*
* The udbg subsystem calls this function to display a single character.
* We convert CR to a CR/LF.
*/
static void ehv_bc_udbg_putc(char c)
{
if (c == '\n')
byte_channel_spin_send('\r');
byte_channel_spin_send(c);
}
/*
* early console initialization
*
* PowerPC kernels support an early printk console, also known as udbg.
* This function must be called via the ppc_md.init_early function pointer.
* At this point, the device tree has been unflattened, so we can obtain the
* byte channel handle for stdout.
*
* We only support displaying of characters (putc). We do not support
* keyboard input.
*/
void __init udbg_init_ehv_bc(void)
{
unsigned int rx_count, tx_count;
unsigned int ret;
/* Verify the byte channel handle */
ret = ev_byte_channel_poll(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&rx_count, &tx_count);
if (ret)
return;
udbg_putc = ehv_bc_udbg_putc;
register_early_udbg_console();
udbg_printf("ehv-bc: early console using byte channel handle %u\n",
CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
}
#endif
/****************************** CONSOLE DRIVER ******************************/
static struct tty_driver *ehv_bc_driver;
/*
* Byte channel console sending worker function.
*
* For consoles, if the output buffer is full, we should just spin until it
* clears.
*/
static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
unsigned int count)
{
unsigned int len;
int ret = 0;
while (count) {
len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
do {
ret = local_ev_byte_channel_send(handle, &len, s);
} while (ret == EV_EAGAIN);
count -= len;
s += len;
}
return ret;
}
/*
* write a string to the console
*
* This function gets called to write a string from the kernel, typically from
* a printk(). This function spins until all data is written.
*
* We copy the data to a temporary buffer because we need to insert a \r in
* front of every \n. It's more efficient to copy the data to the buffer than
* it is to make multiple hcalls for each character or each newline.
*/
static void ehv_bc_console_write(struct console *co, const char *s,
unsigned int count)
{
char s2[EV_BYTE_CHANNEL_MAX_BYTES];
unsigned int i, j = 0;
char c;
for (i = 0; i < count; i++) {
c = *s++;
if (c == '\n')
s2[j++] = '\r';
s2[j++] = c;
if (j >= (EV_BYTE_CHANNEL_MAX_BYTES - 1)) {
if (ehv_bc_console_byte_channel_send(stdout_bc, s2, j))
return;
j = 0;
}
}
if (j)
ehv_bc_console_byte_channel_send(stdout_bc, s2, j);
}
/*
* When /dev/console is opened, the kernel iterates the console list looking
* for one with ->device and then calls that method. On success, it expects
* the passed-in int* to contain the minor number to use.
*/
static struct tty_driver *ehv_bc_console_device(struct console *co, int *index)
{
*index = co->index;
return ehv_bc_driver;
}
static struct console ehv_bc_console = {
.name = "ttyEHV",
.write = ehv_bc_console_write,
.device = ehv_bc_console_device,
.flags = CON_PRINTBUFFER | CON_ENABLED,
};
/*
* Console initialization
*
* This is the first function that is called after the device tree is
* available, so here is where we determine the byte channel handle and IRQ for
* stdout/stdin, even though that information is used by the tty and character
* drivers.
*/
static int __init ehv_bc_console_init(void)
{
if (!find_console_handle()) {
pr_debug("ehv-bc: stdout is not a byte channel\n");
return -ENODEV;
}
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
/* Print a friendly warning if the user chose the wrong byte channel
* handle for udbg.
*/
if (stdout_bc != CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE)
pr_warn("ehv-bc: udbg handle %u is not the stdout handle\n",
CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
#endif
/* add_preferred_console() must be called before register_console(),
otherwise it won't work. However, we don't want to enumerate all the
byte channels here, either, since we only care about one. */
add_preferred_console(ehv_bc_console.name, ehv_bc_console.index, NULL);
register_console(&ehv_bc_console);
pr_info("ehv-bc: registered console driver for byte channel %u\n",
stdout_bc);
return 0;
}
console_initcall(ehv_bc_console_init);
/******************************** TTY DRIVER ********************************/
/*
* byte channel receive interrupt handler
*
* This ISR is called whenever data is available on a byte channel.
*/
static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data)
{
struct ehv_bc_data *bc = data;
unsigned int rx_count, tx_count, len;
int count;
char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
int ret;
/* Find out how much data needs to be read, and then ask the TTY layer
* if it can handle that much. We want to ensure that every byte we
* read from the byte channel will be accepted by the TTY layer.
*/
ev_byte_channel_poll(bc->handle, &rx_count, &tx_count);
count = tty_buffer_request_room(&bc->port, rx_count);
/* 'count' is the maximum amount of data the TTY layer can accept at
* this time. However, during testing, I was never able to get 'count'
* to be less than 'rx_count'. I'm not sure whether I'm calling it
* correctly.
*/
while (count > 0) {
len = min_t(unsigned int, count, sizeof(buffer));
/* Read some data from the byte channel. This function will
* never return more than EV_BYTE_CHANNEL_MAX_BYTES bytes.
*/
ev_byte_channel_receive(bc->handle, &len, buffer);
/* 'len' is now the amount of data that's been received. 'len'
* can't be zero, and most likely it's equal to one.
*/
/* Pass the received data to the tty layer. */
ret = tty_insert_flip_string(&bc->port, buffer, len);
/* 'ret' is the number of bytes that the TTY layer accepted.
* If it's not equal to 'len', then it means the buffer is
* full, which should never happen. If it does happen, we can
* exit gracefully, but we drop the last 'len - ret' characters
* that we read from the byte channel.
*/
if (ret != len)
break;
count -= len;
}
/* Tell the tty layer that we're done. */
tty_flip_buffer_push(&bc->port);
return IRQ_HANDLED;
}
/*
* dequeue the transmit buffer to the hypervisor
*
* This function, which can be called in interrupt context, dequeues as much
* data as possible from the transmit buffer to the byte channel.
*/
static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
{
unsigned int count;
unsigned int len, ret;
unsigned long flags;
do {
spin_lock_irqsave(&bc->lock, flags);
len = min_t(unsigned int,
CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
EV_BYTE_CHANNEL_MAX_BYTES);
ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
/* 'len' is valid only if the return code is 0 or EV_EAGAIN */
if (!ret || (ret == EV_EAGAIN))
bc->tail = (bc->tail + len) & (BUF_SIZE - 1);
count = CIRC_CNT(bc->head, bc->tail, BUF_SIZE);
spin_unlock_irqrestore(&bc->lock, flags);
} while (count && !ret);
spin_lock_irqsave(&bc->lock, flags);
if (CIRC_CNT(bc->head, bc->tail, BUF_SIZE))
/*
* If we haven't emptied the buffer, then enable the TX IRQ.
* We'll get an interrupt when there's more room in the
* hypervisor's output buffer.
*/
enable_tx_interrupt(bc);
else
disable_tx_interrupt(bc);
spin_unlock_irqrestore(&bc->lock, flags);
}
/*
* byte channel transmit interrupt handler
*
* This ISR is called whenever space becomes available for transmitting
* characters on a byte channel.
*/
static irqreturn_t ehv_bc_tty_tx_isr(int irq, void *data)
{
struct ehv_bc_data *bc = data;
ehv_bc_tx_dequeue(bc);
tty_port_tty_wakeup(&bc->port);
return IRQ_HANDLED;
}
/*
* This function is called when the tty layer has data for us send. We store
* the data first in a circular buffer, and then dequeue as much of that data
* as possible.
*
* We don't need to worry about whether there is enough room in the buffer for
* all the data. The purpose of ehv_bc_tty_write_room() is to tell the tty
* layer how much data it can safely send to us. We guarantee that
* ehv_bc_tty_write_room() will never lie, so the tty layer will never send us
* too much data.
*/
static ssize_t ehv_bc_tty_write(struct tty_struct *ttys, const u8 *s,
size_t count)
{
struct ehv_bc_data *bc = ttys->driver_data;
unsigned long flags;
unsigned int len;
unsigned int written = 0;
while (1) {
spin_lock_irqsave(&bc->lock, flags);
len = CIRC_SPACE_TO_END(bc->head, bc->tail, BUF_SIZE);
if (count < len)
len = count;
if (len) {
memcpy(bc->buf + bc->head, s, len);
bc->head = (bc->head + len) & (BUF_SIZE - 1);
}
spin_unlock_irqrestore(&bc->lock, flags);
if (!len)
break;
s += len;
count -= len;
written += len;
}
ehv_bc_tx_dequeue(bc);
return written;
}
/*
* This function can be called multiple times for a given tty_struct, which is
* why we initialize bc->ttys in ehv_bc_tty_port_activate() instead.
*
* The tty layer will still call this function even if the device was not
* registered (i.e. tty_register_device() was not called). This happens
* because tty_register_device() is optional and some legacy drivers don't
* use it. So we need to check for that.
*/
static int ehv_bc_tty_open(struct tty_struct *ttys, struct file *filp)
{
struct ehv_bc_data *bc = &bcs[ttys->index];
if (!bc->dev)
return -ENODEV;
return tty_port_open(&bc->port, ttys, filp);
}
/*
* Amazingly, if ehv_bc_tty_open() returns an error code, the tty layer will
* still call this function to close the tty device. So we can't assume that
* the tty port has been initialized.
*/
static void ehv_bc_tty_close(struct tty_struct *ttys, struct file *filp)
{
struct ehv_bc_data *bc = &bcs[ttys->index];
if (bc->dev)
tty_port_close(&bc->port, ttys, filp);
}
/*
* Return the amount of space in the output buffer
*
* This is actually a contract between the driver and the tty layer outlining
* how much write room the driver can guarantee will be sent OR BUFFERED. This
* driver MUST honor the return value.
*/
static unsigned int ehv_bc_tty_write_room(struct tty_struct *ttys)
{
struct ehv_bc_data *bc = ttys->driver_data;
unsigned long flags;
unsigned int count;
spin_lock_irqsave(&bc->lock, flags);
count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE);
spin_unlock_irqrestore(&bc->lock, flags);
return count;
}
/*
* Stop sending data to the tty layer
*
* This function is called when the tty layer's input buffers are getting full,
* so the driver should stop sending it data. The easiest way to do this is to
* disable the RX IRQ, which will prevent ehv_bc_tty_rx_isr() from being
* called.
*
* The hypervisor will continue to queue up any incoming data. If there is any
* data in the queue when the RX interrupt is enabled, we'll immediately get an
* RX interrupt.
*/
static void ehv_bc_tty_throttle(struct tty_struct *ttys)
{
struct ehv_bc_data *bc = ttys->driver_data;
disable_irq(bc->rx_irq);
}
/*
* Resume sending data to the tty layer
*
* This function is called after previously calling ehv_bc_tty_throttle(). The
* tty layer's input buffers now have more room, so the driver can resume
* sending it data.
*/
static void ehv_bc_tty_unthrottle(struct tty_struct *ttys)
{
struct ehv_bc_data *bc = ttys->driver_data;
/* If there is any data in the queue when the RX interrupt is enabled,
* we'll immediately get an RX interrupt.
*/
enable_irq(bc->rx_irq);
}
static void ehv_bc_tty_hangup(struct tty_struct *ttys)
{
struct ehv_bc_data *bc = ttys->driver_data;
ehv_bc_tx_dequeue(bc);
tty_port_hangup(&bc->port);
}
/*
* TTY driver operations
*
* If we could ask the hypervisor how much data is still in the TX buffer, or
* at least how big the TX buffers are, then we could implement the
* .wait_until_sent and .chars_in_buffer functions.
*/
static const struct tty_operations ehv_bc_ops = {
.open = ehv_bc_tty_open,
.close = ehv_bc_tty_close,
.write = ehv_bc_tty_write,
.write_room = ehv_bc_tty_write_room,
.throttle = ehv_bc_tty_throttle,
.unthrottle = ehv_bc_tty_unthrottle,
.hangup = ehv_bc_tty_hangup,
};
/*
* initialize the TTY port
*
* This function will only be called once, no matter how many times
* ehv_bc_tty_open() is called. That's why we register the ISR here, and also
* why we initialize tty_struct-related variables here.
*/
static int ehv_bc_tty_port_activate(struct tty_port *port,
struct tty_struct *ttys)
{
struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port);
int ret;
ttys->driver_data = bc;
ret = request_irq(bc->rx_irq, ehv_bc_tty_rx_isr, 0, "ehv-bc", bc);
if (ret < 0) {
dev_err(bc->dev, "could not request rx irq %u (ret=%i)\n",
bc->rx_irq, ret);
return ret;
}
/* request_irq also enables the IRQ */
bc->tx_irq_enabled = 1;
ret = request_irq(bc->tx_irq, ehv_bc_tty_tx_isr, 0, "ehv-bc", bc);
if (ret < 0) {
dev_err(bc->dev, "could not request tx irq %u (ret=%i)\n",
bc->tx_irq, ret);
free_irq(bc->rx_irq, bc);
return ret;
}
/* The TX IRQ is enabled only when we can't write all the data to the
* byte channel at once, so by default it's disabled.
*/
disable_tx_interrupt(bc);
return 0;
}
static void ehv_bc_tty_port_shutdown(struct tty_port *port)
{
struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port);
free_irq(bc->tx_irq, bc);
free_irq(bc->rx_irq, bc);
}
static const struct tty_port_operations ehv_bc_tty_port_ops = {
.activate = ehv_bc_tty_port_activate,
.shutdown = ehv_bc_tty_port_shutdown,
};
static int ehv_bc_tty_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ehv_bc_data *bc;
const uint32_t *iprop;
unsigned int handle;
int ret;
static unsigned int index = 1;
unsigned int i;
iprop = of_get_property(np, "hv-handle", NULL);
if (!iprop) {
dev_err(&pdev->dev, "no 'hv-handle' property in %pOFn node\n",
np);
return -ENODEV;
}
/* We already told the console layer that the index for the console
* device is zero, so we need to make sure that we use that index when
* we probe the console byte channel node.
*/
handle = be32_to_cpu(*iprop);
i = (handle == stdout_bc) ? 0 : index++;
bc = &bcs[i];
bc->handle = handle;
bc->head = 0;
bc->tail = 0;
spin_lock_init(&bc->lock);
bc->rx_irq = irq_of_parse_and_map(np, 0);
bc->tx_irq = irq_of_parse_and_map(np, 1);
if (!bc->rx_irq || !bc->tx_irq) {
dev_err(&pdev->dev, "no 'interrupts' property in %pOFn node\n",
np);
ret = -ENODEV;
goto error;
}
tty_port_init(&bc->port);
bc->port.ops = &ehv_bc_tty_port_ops;
bc->dev = tty_port_register_device(&bc->port, ehv_bc_driver, i,
&pdev->dev);
if (IS_ERR(bc->dev)) {
ret = PTR_ERR(bc->dev);
dev_err(&pdev->dev, "could not register tty (ret=%i)\n", ret);
goto error;
}
dev_set_drvdata(&pdev->dev, bc);
dev_info(&pdev->dev, "registered /dev/%s%u for byte channel %u\n",
ehv_bc_driver->name, i, bc->handle);
return 0;
error:
tty_port_destroy(&bc->port);
irq_dispose_mapping(bc->tx_irq);
irq_dispose_mapping(bc->rx_irq);
memset(bc, 0, sizeof(struct ehv_bc_data));
return ret;
}
static const struct of_device_id ehv_bc_tty_of_ids[] = {
{ .compatible = "epapr,hv-byte-channel" },
{}
};
static struct platform_driver ehv_bc_tty_driver = {
.driver = {
.name = "ehv-bc",
.of_match_table = ehv_bc_tty_of_ids,
.suppress_bind_attrs = true,
},
.probe = ehv_bc_tty_probe,
};
/**
* ehv_bc_init - ePAPR hypervisor byte channel driver initialization
*
* This function is called when this driver is loaded.
*/
static int __init ehv_bc_init(void)
{
struct tty_driver *driver;
struct device_node *np;
unsigned int count = 0; /* Number of elements in bcs[] */
int ret;
pr_info("ePAPR hypervisor byte channel driver\n");
/* Count the number of byte channels */
for_each_compatible_node(np, NULL, "epapr,hv-byte-channel")
count++;
if (!count)
return -ENODEV;
/* The array index of an element in bcs[] is the same as the tty index
* for that element. If you know the address of an element in the
* array, then you can use pointer math (e.g. "bc - bcs") to get its
* tty index.
*/
bcs = kcalloc(count, sizeof(struct ehv_bc_data), GFP_KERNEL);
if (!bcs)
return -ENOMEM;
driver = tty_alloc_driver(count, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(driver)) {
ret = PTR_ERR(driver);
goto err_free_bcs;
}
driver->driver_name = "ehv-bc";
driver->name = ehv_bc_console.name;
driver->type = TTY_DRIVER_TYPE_CONSOLE;
driver->subtype = SYSTEM_TYPE_CONSOLE;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &ehv_bc_ops);
ret = tty_register_driver(driver);
if (ret) {
pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
goto err_tty_driver_kref_put;
}
ehv_bc_driver = driver;
ret = platform_driver_register(&ehv_bc_tty_driver);
if (ret) {
pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
ret);
goto err_deregister_tty_driver;
}
return 0;
err_deregister_tty_driver:
ehv_bc_driver = NULL;
tty_unregister_driver(driver);
err_tty_driver_kref_put:
tty_driver_kref_put(driver);
err_free_bcs:
kfree(bcs);
return ret;
}
device_initcall(ehv_bc_init);
| linux-master | drivers/tty/ehv_bytechan.c |
// SPDX-License-Identifier: GPL-2.0
/* vcc.c: sun4v virtual channel concentrator
*
* Copyright (C) 2017 Oracle. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/termios_internal.h>
#include <asm/vio.h>
#include <asm/ldc.h>
MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
struct vcc_port {
struct vio_driver_state vio;
spinlock_t lock;
char *domain;
struct tty_struct *tty; /* only populated while dev is open */
unsigned long index; /* index into the vcc_table */
u64 refcnt;
bool excl_locked;
bool removed;
/* This buffer is required to support the tty write_room interface
* and guarantee that any characters that the driver accepts will
* be eventually sent, either immediately or later.
*/
size_t chars_in_buffer;
struct vio_vcc buffer;
struct timer_list rx_timer;
struct timer_list tx_timer;
};
/* Microseconds that thread will delay waiting for a vcc port ref */
#define VCC_REF_DELAY 100
#define VCC_MAX_PORTS 1024
#define VCC_MINOR_START 0 /* must be zero */
#define VCC_BUFF_LEN VIO_VCC_MTU_SIZE
#define VCC_CTL_BREAK -1
#define VCC_CTL_HUP -2
static struct tty_driver *vcc_tty_driver;
static struct vcc_port *vcc_table[VCC_MAX_PORTS];
static DEFINE_SPINLOCK(vcc_table_lock);
static unsigned int vcc_dbg;
static unsigned int vcc_dbg_ldc;
static unsigned int vcc_dbg_vio;
module_param(vcc_dbg, uint, 0664);
module_param(vcc_dbg_ldc, uint, 0664);
module_param(vcc_dbg_vio, uint, 0664);
#define VCC_DBG_DRV 0x1
#define VCC_DBG_LDC 0x2
#define VCC_DBG_PKT 0x4
#define vccdbg(f, a...) \
do { \
if (vcc_dbg & VCC_DBG_DRV) \
pr_info(f, ## a); \
} while (0) \
#define vccdbgl(l) \
do { \
if (vcc_dbg & VCC_DBG_LDC) \
ldc_print(l); \
} while (0) \
#define vccdbgp(pkt) \
do { \
if (vcc_dbg & VCC_DBG_PKT) { \
int i; \
for (i = 0; i < pkt.tag.stype; i++) \
pr_info("[%c]", pkt.data[i]); \
} \
} while (0) \
/* Note: Be careful when adding flags to this line discipline. Don't
* add anything that will cause echoing or we'll go into recursive
* loop echoing chars back and forth with the console drivers.
*/
static const struct ktermios vcc_tty_termios = {
.c_iflag = IGNBRK | IGNPAR,
.c_oflag = OPOST,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
.c_ospeed = 38400
};
/**
* vcc_table_add() - Add VCC port to the VCC table
* @port: pointer to the VCC port
*
* Return: index of the port in the VCC table on success,
* -1 on failure
*/
static int vcc_table_add(struct vcc_port *port)
{
unsigned long flags;
int i;
spin_lock_irqsave(&vcc_table_lock, flags);
for (i = VCC_MINOR_START; i < VCC_MAX_PORTS; i++) {
if (!vcc_table[i]) {
vcc_table[i] = port;
break;
}
}
spin_unlock_irqrestore(&vcc_table_lock, flags);
if (i < VCC_MAX_PORTS)
return i;
else
return -1;
}
/**
* vcc_table_remove() - Removes a VCC port from the VCC table
* @index: Index into the VCC table
*/
static void vcc_table_remove(unsigned long index)
{
unsigned long flags;
if (WARN_ON(index >= VCC_MAX_PORTS))
return;
spin_lock_irqsave(&vcc_table_lock, flags);
vcc_table[index] = NULL;
spin_unlock_irqrestore(&vcc_table_lock, flags);
}
/**
* vcc_get() - Gets a reference to VCC port
* @index: Index into the VCC table
* @excl: Indicates if an exclusive access is requested
*
* Return: reference to the VCC port, if found
* NULL, if port not found
*/
static struct vcc_port *vcc_get(unsigned long index, bool excl)
{
struct vcc_port *port;
unsigned long flags;
try_again:
spin_lock_irqsave(&vcc_table_lock, flags);
port = vcc_table[index];
if (!port) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
return NULL;
}
if (!excl) {
if (port->excl_locked) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
udelay(VCC_REF_DELAY);
goto try_again;
}
port->refcnt++;
spin_unlock_irqrestore(&vcc_table_lock, flags);
return port;
}
if (port->refcnt) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
/* Threads wanting exclusive access will wait half the time,
* probably giving them higher priority in the case of
* multiple waiters.
*/
udelay(VCC_REF_DELAY/2);
goto try_again;
}
port->refcnt++;
port->excl_locked = true;
spin_unlock_irqrestore(&vcc_table_lock, flags);
return port;
}
/**
* vcc_put() - Returns a reference to VCC port
* @port: pointer to VCC port
* @excl: Indicates if the returned reference is an exclusive reference
*
* Note: It's the caller's responsibility to ensure the correct value
* for the excl flag
*/
static void vcc_put(struct vcc_port *port, bool excl)
{
unsigned long flags;
if (!port)
return;
spin_lock_irqsave(&vcc_table_lock, flags);
/* check if caller attempted to put with the wrong flags */
if (WARN_ON((excl && !port->excl_locked) ||
(!excl && port->excl_locked)))
goto done;
port->refcnt--;
if (excl)
port->excl_locked = false;
done:
spin_unlock_irqrestore(&vcc_table_lock, flags);
}
/**
* vcc_get_ne() - Get a non-exclusive reference to VCC port
* @index: Index into the VCC table
*
* Gets a non-exclusive reference to VCC port, if it's not removed
*
* Return: pointer to the VCC port, if found
* NULL, if port not found
*/
static struct vcc_port *vcc_get_ne(unsigned long index)
{
struct vcc_port *port;
port = vcc_get(index, false);
if (port && port->removed) {
vcc_put(port, false);
return NULL;
}
return port;
}
static void vcc_kick_rx(struct vcc_port *port)
{
struct vio_driver_state *vio = &port->vio;
assert_spin_locked(&port->lock);
if (!timer_pending(&port->rx_timer) && !port->removed) {
disable_irq_nosync(vio->vdev->rx_irq);
port->rx_timer.expires = (jiffies + 1);
add_timer(&port->rx_timer);
}
}
static void vcc_kick_tx(struct vcc_port *port)
{
assert_spin_locked(&port->lock);
if (!timer_pending(&port->tx_timer) && !port->removed) {
port->tx_timer.expires = (jiffies + 1);
add_timer(&port->tx_timer);
}
}
static int vcc_rx_check(struct tty_struct *tty, int size)
{
if (WARN_ON(!tty || !tty->port))
return 1;
/* tty_buffer_request_room won't sleep because it uses
* GFP_ATOMIC flag to allocate buffer
*/
if (test_bit(TTY_THROTTLED, &tty->flags) ||
(tty_buffer_request_room(tty->port, VCC_BUFF_LEN) < VCC_BUFF_LEN))
return 0;
return 1;
}
static int vcc_rx(struct tty_struct *tty, char *buf, int size)
{
int len = 0;
if (WARN_ON(!tty || !tty->port))
return len;
len = tty_insert_flip_string(tty->port, buf, size);
if (len)
tty_flip_buffer_push(tty->port);
return len;
}
static int vcc_ldc_read(struct vcc_port *port)
{
struct vio_driver_state *vio = &port->vio;
struct tty_struct *tty;
struct vio_vcc pkt;
int rv = 0;
tty = port->tty;
if (!tty) {
rv = ldc_rx_reset(vio->lp);
vccdbg("VCC: reset rx q: rv=%d\n", rv);
goto done;
}
/* Read as long as LDC has incoming data. */
while (1) {
if (!vcc_rx_check(tty, VIO_VCC_MTU_SIZE)) {
vcc_kick_rx(port);
break;
}
vccdbgl(vio->lp);
rv = ldc_read(vio->lp, &pkt, sizeof(pkt));
if (rv <= 0)
break;
vccdbg("VCC: ldc_read()=%d\n", rv);
vccdbg("TAG [%02x:%02x:%04x:%08x]\n",
pkt.tag.type, pkt.tag.stype,
pkt.tag.stype_env, pkt.tag.sid);
if (pkt.tag.type == VIO_TYPE_DATA) {
vccdbgp(pkt);
/* vcc_rx_check ensures memory availability */
vcc_rx(tty, pkt.data, pkt.tag.stype);
} else {
pr_err("VCC: unknown msg [%02x:%02x:%04x:%08x]\n",
pkt.tag.type, pkt.tag.stype,
pkt.tag.stype_env, pkt.tag.sid);
rv = -ECONNRESET;
break;
}
WARN_ON(rv != LDC_PACKET_SIZE);
}
done:
return rv;
}
static void vcc_rx_timer(struct timer_list *t)
{
struct vcc_port *port = from_timer(port, t, rx_timer);
struct vio_driver_state *vio;
unsigned long flags;
int rv;
spin_lock_irqsave(&port->lock, flags);
port->rx_timer.expires = 0;
vio = &port->vio;
enable_irq(vio->vdev->rx_irq);
if (!port->tty || port->removed)
goto done;
rv = vcc_ldc_read(port);
if (rv == -ECONNRESET)
vio_conn_reset(vio);
done:
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
}
static void vcc_tx_timer(struct timer_list *t)
{
struct vcc_port *port = from_timer(port, t, tx_timer);
struct vio_vcc *pkt;
unsigned long flags;
size_t tosend = 0;
int rv;
spin_lock_irqsave(&port->lock, flags);
port->tx_timer.expires = 0;
if (!port->tty || port->removed)
goto done;
tosend = min(VCC_BUFF_LEN, port->chars_in_buffer);
if (!tosend)
goto done;
pkt = &port->buffer;
pkt->tag.type = VIO_TYPE_DATA;
pkt->tag.stype = tosend;
vccdbgl(port->vio.lp);
rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
WARN_ON(!rv);
if (rv < 0) {
vccdbg("VCC: ldc_write()=%d\n", rv);
vcc_kick_tx(port);
} else {
struct tty_struct *tty = port->tty;
port->chars_in_buffer = 0;
if (tty)
tty_wakeup(tty);
}
done:
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
}
/**
* vcc_event() - LDC event processing engine
* @arg: VCC private data
* @event: LDC event
*
* Handles LDC events for VCC
*/
static void vcc_event(void *arg, int event)
{
struct vio_driver_state *vio;
struct vcc_port *port;
unsigned long flags;
int rv;
port = arg;
vio = &port->vio;
spin_lock_irqsave(&port->lock, flags);
switch (event) {
case LDC_EVENT_RESET:
case LDC_EVENT_UP:
vio_link_state_change(vio, event);
break;
case LDC_EVENT_DATA_READY:
rv = vcc_ldc_read(port);
if (rv == -ECONNRESET)
vio_conn_reset(vio);
break;
default:
pr_err("VCC: unexpected LDC event(%d)\n", event);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static struct ldc_channel_config vcc_ldc_cfg = {
.event = vcc_event,
.mtu = VIO_VCC_MTU_SIZE,
.mode = LDC_MODE_RAW,
.debug = 0,
};
/* Ordered from largest major to lowest */
static struct vio_version vcc_versions[] = {
{ .major = 1, .minor = 0 },
};
static struct tty_port_operations vcc_port_ops = { 0 };
static ssize_t domain_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vcc_port *port;
int rv;
port = dev_get_drvdata(dev);
if (!port)
return -ENODEV;
rv = scnprintf(buf, PAGE_SIZE, "%s\n", port->domain);
return rv;
}
static int vcc_send_ctl(struct vcc_port *port, int ctl)
{
struct vio_vcc pkt;
int rv;
pkt.tag.type = VIO_TYPE_CTRL;
pkt.tag.sid = ctl;
pkt.tag.stype = 0;
rv = ldc_write(port->vio.lp, &pkt, sizeof(pkt.tag));
WARN_ON(!rv);
vccdbg("VCC: ldc_write(%ld)=%d\n", sizeof(pkt.tag), rv);
return rv;
}
static ssize_t break_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vcc_port *port;
unsigned long flags;
int rv = count;
int brk;
port = dev_get_drvdata(dev);
if (!port)
return -ENODEV;
spin_lock_irqsave(&port->lock, flags);
if (sscanf(buf, "%ud", &brk) != 1 || brk != 1)
rv = -EINVAL;
else if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
return rv;
}
static DEVICE_ATTR_ADMIN_RO(domain);
static DEVICE_ATTR_WO(break);
static struct attribute *vcc_sysfs_entries[] = {
&dev_attr_domain.attr,
&dev_attr_break.attr,
NULL
};
static struct attribute_group vcc_attribute_group = {
.name = NULL,
.attrs = vcc_sysfs_entries,
};
/**
* vcc_probe() - Initialize VCC port
* @vdev: Pointer to VIO device of the new VCC port
* @id: VIO device ID
*
* Initializes a VCC port to receive serial console data from
* the guest domain. Sets up a TTY end point on the control
* domain. Sets up VIO/LDC link between the guest & control
* domain endpoints.
*
* Return: status of the probe
*/
static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vcc_port *port;
struct device *dev;
const char *domain;
char *name;
u64 node;
int rv;
vccdbg("VCC: name=%s\n", dev_name(&vdev->dev));
if (!vcc_tty_driver) {
pr_err("VCC: TTY driver not registered\n");
return -ENODEV;
}
port = kzalloc(sizeof(struct vcc_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
ARRAY_SIZE(vcc_versions), NULL, name);
if (rv)
goto free_port;
port->vio.debug = vcc_dbg_vio;
vcc_ldc_cfg.debug = vcc_dbg_ldc;
rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
if (rv)
goto free_port;
spin_lock_init(&port->lock);
port->index = vcc_table_add(port);
if (port->index == -1) {
pr_err("VCC: no more TTY indices left for allocation\n");
rv = -ENOMEM;
goto free_ldc;
}
/* Register the device using VCC table index as TTY index */
dev = tty_register_device(vcc_tty_driver, port->index, &vdev->dev);
if (IS_ERR(dev)) {
rv = PTR_ERR(dev);
goto free_table;
}
hp = mdesc_grab();
node = vio_vdev_node(hp, vdev);
if (node == MDESC_NODE_NULL) {
rv = -ENXIO;
mdesc_release(hp);
goto unreg_tty;
}
domain = mdesc_get_property(hp, node, "vcc-domain-name", NULL);
if (!domain) {
rv = -ENXIO;
mdesc_release(hp);
goto unreg_tty;
}
port->domain = kstrdup(domain, GFP_KERNEL);
mdesc_release(hp);
rv = sysfs_create_group(&vdev->dev.kobj, &vcc_attribute_group);
if (rv)
goto free_domain;
timer_setup(&port->rx_timer, vcc_rx_timer, 0);
timer_setup(&port->tx_timer, vcc_tx_timer, 0);
dev_set_drvdata(&vdev->dev, port);
/* It's possible to receive IRQs in the middle of vio_port_up. Disable
* IRQs until the port is up.
*/
disable_irq_nosync(vdev->rx_irq);
vio_port_up(&port->vio);
enable_irq(vdev->rx_irq);
return 0;
free_domain:
kfree(port->domain);
unreg_tty:
tty_unregister_device(vcc_tty_driver, port->index);
free_table:
vcc_table_remove(port->index);
free_ldc:
vio_ldc_free(&port->vio);
free_port:
kfree(name);
kfree(port);
return rv;
}
/**
* vcc_remove() - Terminate a VCC port
* @vdev: Pointer to VIO device of the VCC port
*
* Terminates a VCC port. Sets up the teardown of TTY and
* VIO/LDC link between guest and primary domains.
*
* Return: status of removal
*/
static void vcc_remove(struct vio_dev *vdev)
{
struct vcc_port *port = dev_get_drvdata(&vdev->dev);
del_timer_sync(&port->rx_timer);
del_timer_sync(&port->tx_timer);
/* If there's a process with the device open, do a synchronous
* hangup of the TTY. This *may* cause the process to call close
* asynchronously, but it's not guaranteed.
*/
if (port->tty)
tty_vhangup(port->tty);
/* Get exclusive reference to VCC, ensures that there are no other
* clients to this port. This cannot fail.
*/
vcc_get(port->index, true);
tty_unregister_device(vcc_tty_driver, port->index);
del_timer_sync(&port->vio.timer);
vio_ldc_free(&port->vio);
sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group);
dev_set_drvdata(&vdev->dev, NULL);
if (port->tty) {
port->removed = true;
vcc_put(port, true);
} else {
vcc_table_remove(port->index);
kfree(port->vio.name);
kfree(port->domain);
kfree(port);
}
}
static const struct vio_device_id vcc_match[] = {
{
.type = "vcc-port",
},
{},
};
MODULE_DEVICE_TABLE(vio, vcc_match);
static struct vio_driver vcc_driver = {
.id_table = vcc_match,
.probe = vcc_probe,
.remove = vcc_remove,
.name = "vcc",
};
static int vcc_open(struct tty_struct *tty, struct file *vcc_file)
{
struct vcc_port *port;
if (tty->count > 1)
return -EBUSY;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: open: Failed to find VCC port\n");
return -ENODEV;
}
if (unlikely(!port->vio.lp)) {
pr_err("VCC: open: LDC channel not configured\n");
vcc_put(port, false);
return -EPIPE;
}
vccdbgl(port->vio.lp);
vcc_put(port, false);
if (unlikely(!tty->port)) {
pr_err("VCC: open: TTY port not found\n");
return -ENXIO;
}
if (unlikely(!tty->port->ops)) {
pr_err("VCC: open: TTY ops not defined\n");
return -ENXIO;
}
return tty_port_open(tty->port, tty, vcc_file);
}
static void vcc_close(struct tty_struct *tty, struct file *vcc_file)
{
if (unlikely(tty->count > 1))
return;
if (unlikely(!tty->port)) {
pr_err("VCC: close: TTY port not found\n");
return;
}
tty_port_close(tty->port, tty, vcc_file);
}
static void vcc_ldc_hup(struct vcc_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (vcc_send_ctl(port, VCC_CTL_HUP) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void vcc_hangup(struct tty_struct *tty)
{
struct vcc_port *port;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: hangup: Failed to find VCC port\n");
return;
}
if (unlikely(!tty->port)) {
pr_err("VCC: hangup: TTY port not found\n");
vcc_put(port, false);
return;
}
vcc_ldc_hup(port);
vcc_put(port, false);
tty_port_hangup(tty->port);
}
static ssize_t vcc_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct vcc_port *port;
struct vio_vcc *pkt;
unsigned long flags;
size_t total_sent = 0;
size_t tosend = 0;
int rv = -EINVAL;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: write: Failed to find VCC port");
return -ENODEV;
}
spin_lock_irqsave(&port->lock, flags);
pkt = &port->buffer;
pkt->tag.type = VIO_TYPE_DATA;
while (count > 0) {
/* Minimum of data to write and space available */
tosend = min_t(size_t, count,
(VCC_BUFF_LEN - port->chars_in_buffer));
if (!tosend)
break;
memcpy(&pkt->data[port->chars_in_buffer], &buf[total_sent],
tosend);
port->chars_in_buffer += tosend;
pkt->tag.stype = tosend;
vccdbg("TAG [%02x:%02x:%04x:%08x]\n", pkt->tag.type,
pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid);
vccdbg("DATA [%s]\n", pkt->data);
vccdbgl(port->vio.lp);
/* Since we know we have enough room in VCC buffer for tosend
* we record that it was sent regardless of whether the
* hypervisor actually took it because we have it buffered.
*/
rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
vccdbg("VCC: write: ldc_write(%zu)=%d\n",
(VIO_TAG_SIZE + tosend), rv);
total_sent += tosend;
count -= tosend;
if (rv < 0) {
vcc_kick_tx(port);
break;
}
port->chars_in_buffer = 0;
}
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
vccdbg("VCC: write: total=%zu rv=%d", total_sent, rv);
return total_sent ? total_sent : rv;
}
static unsigned int vcc_write_room(struct tty_struct *tty)
{
struct vcc_port *port;
unsigned int num;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: write_room: Failed to find VCC port\n");
return 0;
}
num = VCC_BUFF_LEN - port->chars_in_buffer;
vcc_put(port, false);
return num;
}
static unsigned int vcc_chars_in_buffer(struct tty_struct *tty)
{
struct vcc_port *port;
unsigned int num;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: chars_in_buffer: Failed to find VCC port\n");
return 0;
}
num = port->chars_in_buffer;
vcc_put(port, false);
return num;
}
static int vcc_break_ctl(struct tty_struct *tty, int state)
{
struct vcc_port *port;
unsigned long flags;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: break_ctl: Failed to find VCC port\n");
return -ENODEV;
}
/* Turn off break */
if (state == 0) {
vcc_put(port, false);
return 0;
}
spin_lock_irqsave(&port->lock, flags);
if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
return 0;
}
static int vcc_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct vcc_port *port_vcc;
struct tty_port *port_tty;
int ret;
if (tty->index >= VCC_MAX_PORTS)
return -EINVAL;
ret = tty_standard_install(driver, tty);
if (ret)
return ret;
port_tty = kzalloc(sizeof(struct tty_port), GFP_KERNEL);
if (!port_tty)
return -ENOMEM;
port_vcc = vcc_get(tty->index, true);
if (!port_vcc) {
pr_err("VCC: install: Failed to find VCC port\n");
tty->port = NULL;
kfree(port_tty);
return -ENODEV;
}
tty_port_init(port_tty);
port_tty->ops = &vcc_port_ops;
tty->port = port_tty;
port_vcc->tty = tty;
vcc_put(port_vcc, true);
return 0;
}
static void vcc_cleanup(struct tty_struct *tty)
{
struct vcc_port *port;
port = vcc_get(tty->index, true);
if (port) {
port->tty = NULL;
if (port->removed) {
vcc_table_remove(tty->index);
kfree(port->vio.name);
kfree(port->domain);
kfree(port);
} else {
vcc_put(port, true);
}
}
tty_port_destroy(tty->port);
kfree(tty->port);
tty->port = NULL;
}
static const struct tty_operations vcc_ops = {
.open = vcc_open,
.close = vcc_close,
.hangup = vcc_hangup,
.write = vcc_write,
.write_room = vcc_write_room,
.chars_in_buffer = vcc_chars_in_buffer,
.break_ctl = vcc_break_ctl,
.install = vcc_install,
.cleanup = vcc_cleanup,
};
#define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW)
static int vcc_tty_init(void)
{
int rv;
vcc_tty_driver = tty_alloc_driver(VCC_MAX_PORTS, VCC_TTY_FLAGS);
if (IS_ERR(vcc_tty_driver)) {
pr_err("VCC: TTY driver alloc failed\n");
return PTR_ERR(vcc_tty_driver);
}
vcc_tty_driver->driver_name = "vcc";
vcc_tty_driver->name = "vcc";
vcc_tty_driver->minor_start = VCC_MINOR_START;
vcc_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
vcc_tty_driver->init_termios = vcc_tty_termios;
tty_set_operations(vcc_tty_driver, &vcc_ops);
rv = tty_register_driver(vcc_tty_driver);
if (rv) {
pr_err("VCC: TTY driver registration failed\n");
tty_driver_kref_put(vcc_tty_driver);
vcc_tty_driver = NULL;
return rv;
}
vccdbg("VCC: TTY driver registered\n");
return 0;
}
static void vcc_tty_exit(void)
{
tty_unregister_driver(vcc_tty_driver);
tty_driver_kref_put(vcc_tty_driver);
vccdbg("VCC: TTY driver unregistered\n");
vcc_tty_driver = NULL;
}
static int __init vcc_init(void)
{
int rv;
rv = vcc_tty_init();
if (rv) {
pr_err("VCC: TTY init failed\n");
return rv;
}
rv = vio_register_driver(&vcc_driver);
if (rv) {
pr_err("VCC: VIO driver registration failed\n");
vcc_tty_exit();
} else {
vccdbg("VCC: VIO driver registered successfully\n");
}
return rv;
}
static void __exit vcc_exit(void)
{
vio_unregister_driver(&vcc_driver);
vccdbg("VCC: VIO driver unregistered\n");
vcc_tty_exit();
vccdbg("VCC: TTY driver unregistered\n");
}
module_init(vcc_init);
module_exit(vcc_exit);
| linux-master | drivers/tty/vcc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2017 Imagination Technologies Ltd.
*/
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/goldfish.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/serial_core.h>
/* Goldfish tty register's offsets */
#define GOLDFISH_TTY_REG_BYTES_READY 0x04
#define GOLDFISH_TTY_REG_CMD 0x08
#define GOLDFISH_TTY_REG_DATA_PTR 0x10
#define GOLDFISH_TTY_REG_DATA_LEN 0x14
#define GOLDFISH_TTY_REG_DATA_PTR_HIGH 0x18
#define GOLDFISH_TTY_REG_VERSION 0x20
/* Goldfish tty commands */
#define GOLDFISH_TTY_CMD_INT_DISABLE 0
#define GOLDFISH_TTY_CMD_INT_ENABLE 1
#define GOLDFISH_TTY_CMD_WRITE_BUFFER 2
#define GOLDFISH_TTY_CMD_READ_BUFFER 3
struct goldfish_tty {
struct tty_port port;
spinlock_t lock;
void __iomem *base;
u32 irq;
int opencount;
struct console console;
u32 version;
struct device *dev;
};
static DEFINE_MUTEX(goldfish_tty_lock);
static struct tty_driver *goldfish_tty_driver;
static u32 goldfish_tty_line_count = 8;
static u32 goldfish_tty_current_line_count;
static struct goldfish_tty *goldfish_ttys;
static void do_rw_io(struct goldfish_tty *qtty,
unsigned long address,
unsigned int count,
int is_write)
{
unsigned long irq_flags;
void __iomem *base = qtty->base;
spin_lock_irqsave(&qtty->lock, irq_flags);
gf_write_ptr((void *)address, base + GOLDFISH_TTY_REG_DATA_PTR,
base + GOLDFISH_TTY_REG_DATA_PTR_HIGH);
gf_iowrite32(count, base + GOLDFISH_TTY_REG_DATA_LEN);
if (is_write)
gf_iowrite32(GOLDFISH_TTY_CMD_WRITE_BUFFER,
base + GOLDFISH_TTY_REG_CMD);
else
gf_iowrite32(GOLDFISH_TTY_CMD_READ_BUFFER,
base + GOLDFISH_TTY_REG_CMD);
spin_unlock_irqrestore(&qtty->lock, irq_flags);
}
static void goldfish_tty_rw(struct goldfish_tty *qtty,
unsigned long addr,
unsigned int count,
int is_write)
{
dma_addr_t dma_handle;
enum dma_data_direction dma_dir;
dma_dir = (is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (qtty->version > 0) {
/*
* Goldfish TTY for Ranchu platform uses
* physical addresses and DMA for read/write operations
*/
unsigned long addr_end = addr + count;
while (addr < addr_end) {
unsigned long pg_end = (addr & PAGE_MASK) + PAGE_SIZE;
unsigned long next =
pg_end < addr_end ? pg_end : addr_end;
unsigned long avail = next - addr;
/*
* Map the buffer's virtual address to the DMA address
* so the buffer can be accessed by the device.
*/
dma_handle = dma_map_single(qtty->dev, (void *)addr,
avail, dma_dir);
if (dma_mapping_error(qtty->dev, dma_handle)) {
dev_err(qtty->dev, "tty: DMA mapping error.\n");
return;
}
do_rw_io(qtty, dma_handle, avail, is_write);
/*
* Unmap the previously mapped region after
* the completion of the read/write operation.
*/
dma_unmap_single(qtty->dev, dma_handle, avail, dma_dir);
addr += avail;
}
} else {
/*
* Old style Goldfish TTY used on the Goldfish platform
* uses virtual addresses.
*/
do_rw_io(qtty, addr, count, is_write);
}
}
static void goldfish_tty_do_write(int line, const u8 *buf, unsigned int count)
{
struct goldfish_tty *qtty = &goldfish_ttys[line];
unsigned long address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 1);
}
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
unsigned long address;
unsigned char *buf;
u32 count;
count = gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
if (count == 0)
return IRQ_NONE;
count = tty_prepare_flip_string(&qtty->port, &buf, count);
address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 0);
tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
}
static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
gf_iowrite32(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
return 0;
}
static void goldfish_tty_shutdown(struct tty_port *port)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
gf_iowrite32(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
}
static int goldfish_tty_open(struct tty_struct *tty, struct file *filp)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
return tty_port_open(&qtty->port, tty, filp);
}
static void goldfish_tty_close(struct tty_struct *tty, struct file *filp)
{
tty_port_close(tty->port, tty, filp);
}
static void goldfish_tty_hangup(struct tty_struct *tty)
{
tty_port_hangup(tty->port);
}
static ssize_t goldfish_tty_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
goldfish_tty_do_write(tty->index, buf, count);
return count;
}
static unsigned int goldfish_tty_write_room(struct tty_struct *tty)
{
return 0x10000;
}
static unsigned int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
void __iomem *base = qtty->base;
return gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
}
static void goldfish_tty_console_write(struct console *co, const char *b,
unsigned count)
{
goldfish_tty_do_write(co->index, b, count);
}
static struct tty_driver *goldfish_tty_console_device(struct console *c,
int *index)
{
*index = c->index;
return goldfish_tty_driver;
}
static int goldfish_tty_console_setup(struct console *co, char *options)
{
if ((unsigned)co->index >= goldfish_tty_line_count)
return -ENODEV;
if (!goldfish_ttys[co->index].base)
return -ENODEV;
return 0;
}
static const struct tty_port_operations goldfish_port_ops = {
.activate = goldfish_tty_activate,
.shutdown = goldfish_tty_shutdown
};
static const struct tty_operations goldfish_tty_ops = {
.open = goldfish_tty_open,
.close = goldfish_tty_close,
.hangup = goldfish_tty_hangup,
.write = goldfish_tty_write,
.write_room = goldfish_tty_write_room,
.chars_in_buffer = goldfish_tty_chars_in_buffer,
};
static int goldfish_tty_create_driver(void)
{
int ret;
struct tty_driver *tty;
goldfish_ttys = kcalloc(goldfish_tty_line_count,
sizeof(*goldfish_ttys),
GFP_KERNEL);
if (goldfish_ttys == NULL) {
ret = -ENOMEM;
goto err_alloc_goldfish_ttys_failed;
}
tty = tty_alloc_driver(goldfish_tty_line_count,
TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(tty)) {
ret = PTR_ERR(tty);
goto err_tty_alloc_driver_failed;
}
tty->driver_name = "goldfish";
tty->name = "ttyGF";
tty->type = TTY_DRIVER_TYPE_SERIAL;
tty->subtype = SERIAL_TYPE_NORMAL;
tty->init_termios = tty_std_termios;
tty_set_operations(tty, &goldfish_tty_ops);
ret = tty_register_driver(tty);
if (ret)
goto err_tty_register_driver_failed;
goldfish_tty_driver = tty;
return 0;
err_tty_register_driver_failed:
tty_driver_kref_put(tty);
err_tty_alloc_driver_failed:
kfree(goldfish_ttys);
goldfish_ttys = NULL;
err_alloc_goldfish_ttys_failed:
return ret;
}
static void goldfish_tty_delete_driver(void)
{
tty_unregister_driver(goldfish_tty_driver);
tty_driver_kref_put(goldfish_tty_driver);
goldfish_tty_driver = NULL;
kfree(goldfish_ttys);
goldfish_ttys = NULL;
}
static int goldfish_tty_probe(struct platform_device *pdev)
{
struct goldfish_tty *qtty;
int ret = -ENODEV;
struct resource *r;
struct device *ttydev;
void __iomem *base;
int irq;
unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
pr_err("goldfish_tty: No MEM resource available!\n");
return -ENOMEM;
}
base = ioremap(r->start, 0x1000);
if (!base) {
pr_err("goldfish_tty: Unable to ioremap base!\n");
return -ENOMEM;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err_unmap;
}
mutex_lock(&goldfish_tty_lock);
if (pdev->id == PLATFORM_DEVID_NONE)
line = goldfish_tty_current_line_count;
else
line = pdev->id;
if (line >= goldfish_tty_line_count) {
pr_err("goldfish_tty: Reached maximum tty number of %d.\n",
goldfish_tty_current_line_count);
ret = -ENOMEM;
goto err_unlock;
}
if (goldfish_tty_current_line_count == 0) {
ret = goldfish_tty_create_driver();
if (ret)
goto err_unlock;
}
goldfish_tty_current_line_count++;
qtty = &goldfish_ttys[line];
spin_lock_init(&qtty->lock);
tty_port_init(&qtty->port);
qtty->port.ops = &goldfish_port_ops;
qtty->base = base;
qtty->irq = irq;
qtty->dev = &pdev->dev;
/*
* Goldfish TTY device used by the Goldfish emulator
* should identify itself with 0, forcing the driver
* to use virtual addresses. Goldfish TTY device
* on Ranchu emulator (qemu2) returns 1 here and
* driver will use physical addresses.
*/
qtty->version = gf_ioread32(base + GOLDFISH_TTY_REG_VERSION);
/*
* Goldfish TTY device on Ranchu emulator (qemu2)
* will use DMA for read/write IO operations.
*/
if (qtty->version > 0) {
/*
* Initialize dma_mask to 32-bits.
*/
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_dec_line_count;
}
}
gf_iowrite32(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_REG_CMD);
ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED,
"goldfish_tty", qtty);
if (ret) {
pr_err("goldfish_tty: No IRQ available!\n");
goto err_dec_line_count;
}
ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
line, &pdev->dev);
if (IS_ERR(ttydev)) {
ret = PTR_ERR(ttydev);
goto err_tty_register_device_failed;
}
strcpy(qtty->console.name, "ttyGF");
qtty->console.write = goldfish_tty_console_write;
qtty->console.device = goldfish_tty_console_device;
qtty->console.setup = goldfish_tty_console_setup;
qtty->console.flags = CON_PRINTBUFFER;
qtty->console.index = line;
register_console(&qtty->console);
platform_set_drvdata(pdev, qtty);
mutex_unlock(&goldfish_tty_lock);
return 0;
err_tty_register_device_failed:
free_irq(irq, qtty);
err_dec_line_count:
tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
err_unlock:
mutex_unlock(&goldfish_tty_lock);
err_unmap:
iounmap(base);
return ret;
}
static int goldfish_tty_remove(struct platform_device *pdev)
{
struct goldfish_tty *qtty = platform_get_drvdata(pdev);
mutex_lock(&goldfish_tty_lock);
unregister_console(&qtty->console);
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
free_irq(qtty->irq, qtty);
tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
mutex_unlock(&goldfish_tty_lock);
return 0;
}
#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
static void gf_early_console_putchar(struct uart_port *port, unsigned char ch)
{
gf_iowrite32(ch, port->membase);
}
static void gf_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, gf_early_console_putchar);
}
static int __init gf_earlycon_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = gf_early_write;
return 0;
}
OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup);
#endif
static const struct of_device_id goldfish_tty_of_match[] = {
{ .compatible = "google,goldfish-tty", },
{},
};
MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
.remove = goldfish_tty_remove,
.driver = {
.name = "goldfish_tty",
.of_match_table = goldfish_tty_of_match,
}
};
module_platform_driver(goldfish_tty_platform_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/goldfish.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 STMicroelectronics - All Rights Reserved
*
* The rpmsg tty driver implements serial communication on the RPMsg bus to makes
* possible for user-space programs to send and receive rpmsg messages as a standard
* tty protocol.
*
* The remote processor can instantiate a new tty by requesting a "rpmsg-tty" RPMsg service.
* The "rpmsg-tty" service is directly used for data exchange. No flow control is implemented yet.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/rpmsg.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define RPMSG_TTY_NAME "ttyRPMSG"
#define MAX_TTY_RPMSG 32
static DEFINE_IDR(tty_idr); /* tty instance id */
static DEFINE_MUTEX(idr_lock); /* protects tty_idr */
static struct tty_driver *rpmsg_tty_driver;
struct rpmsg_tty_port {
struct tty_port port; /* TTY port data */
int id; /* TTY rpmsg index */
struct rpmsg_device *rpdev; /* rpmsg device */
};
static int rpmsg_tty_cb(struct rpmsg_device *rpdev, void *data, int len, void *priv, u32 src)
{
struct rpmsg_tty_port *cport = dev_get_drvdata(&rpdev->dev);
int copied;
if (!len)
return -EINVAL;
copied = tty_insert_flip_string(&cport->port, data, len);
if (copied != len)
dev_err_ratelimited(&rpdev->dev, "Trunc buffer: available space is %d\n", copied);
tty_flip_buffer_push(&cport->port);
return 0;
}
static int rpmsg_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct rpmsg_tty_port *cport = idr_find(&tty_idr, tty->index);
struct tty_port *port;
tty->driver_data = cport;
port = tty_port_get(&cport->port);
return tty_port_install(port, driver, tty);
}
static void rpmsg_tty_cleanup(struct tty_struct *tty)
{
tty_port_put(tty->port);
}
static int rpmsg_tty_open(struct tty_struct *tty, struct file *filp)
{
return tty_port_open(tty->port, tty, filp);
}
static void rpmsg_tty_close(struct tty_struct *tty, struct file *filp)
{
return tty_port_close(tty->port, tty, filp);
}
static ssize_t rpmsg_tty_write(struct tty_struct *tty, const u8 *buf,
size_t len)
{
struct rpmsg_tty_port *cport = tty->driver_data;
struct rpmsg_device *rpdev;
int msg_max_size, msg_size;
int ret;
rpdev = cport->rpdev;
msg_max_size = rpmsg_get_mtu(rpdev->ept);
if (msg_max_size < 0)
return msg_max_size;
msg_size = min_t(unsigned int, len, msg_max_size);
/*
* Use rpmsg_trysend instead of rpmsg_send to send the message so the caller is not
* hung until a rpmsg buffer is available. In such case rpmsg_trysend returns -ENOMEM.
*/
ret = rpmsg_trysend(rpdev->ept, (void *)buf, msg_size);
if (ret) {
dev_dbg_ratelimited(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
return ret;
}
return msg_size;
}
static unsigned int rpmsg_tty_write_room(struct tty_struct *tty)
{
struct rpmsg_tty_port *cport = tty->driver_data;
int size;
size = rpmsg_get_mtu(cport->rpdev->ept);
if (size < 0)
return 0;
return size;
}
static void rpmsg_tty_hangup(struct tty_struct *tty)
{
tty_port_hangup(tty->port);
}
static const struct tty_operations rpmsg_tty_ops = {
.install = rpmsg_tty_install,
.open = rpmsg_tty_open,
.close = rpmsg_tty_close,
.write = rpmsg_tty_write,
.write_room = rpmsg_tty_write_room,
.hangup = rpmsg_tty_hangup,
.cleanup = rpmsg_tty_cleanup,
};
static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
{
struct rpmsg_tty_port *cport;
int ret;
cport = kzalloc(sizeof(*cport), GFP_KERNEL);
if (!cport)
return ERR_PTR(-ENOMEM);
mutex_lock(&idr_lock);
ret = idr_alloc(&tty_idr, cport, 0, MAX_TTY_RPMSG, GFP_KERNEL);
mutex_unlock(&idr_lock);
if (ret < 0) {
kfree(cport);
return ERR_PTR(ret);
}
cport->id = ret;
return cport;
}
static void rpmsg_tty_destruct_port(struct tty_port *port)
{
struct rpmsg_tty_port *cport = container_of(port, struct rpmsg_tty_port, port);
mutex_lock(&idr_lock);
idr_remove(&tty_idr, cport->id);
mutex_unlock(&idr_lock);
kfree(cport);
}
static const struct tty_port_operations rpmsg_tty_port_ops = {
.destruct = rpmsg_tty_destruct_port,
};
static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
{
struct rpmsg_tty_port *cport;
struct device *dev = &rpdev->dev;
struct device *tty_dev;
int ret;
cport = rpmsg_tty_alloc_cport();
if (IS_ERR(cport))
return dev_err_probe(dev, PTR_ERR(cport), "Failed to alloc tty port\n");
tty_port_init(&cport->port);
cport->port.ops = &rpmsg_tty_port_ops;
tty_dev = tty_port_register_device(&cport->port, rpmsg_tty_driver,
cport->id, dev);
if (IS_ERR(tty_dev)) {
ret = dev_err_probe(dev, PTR_ERR(tty_dev), "Failed to register tty port\n");
tty_port_put(&cport->port);
return ret;
}
cport->rpdev = rpdev;
dev_set_drvdata(dev, cport);
dev_dbg(dev, "New channel: 0x%x -> 0x%x: " RPMSG_TTY_NAME "%d\n",
rpdev->src, rpdev->dst, cport->id);
return 0;
}
static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
{
struct rpmsg_tty_port *cport = dev_get_drvdata(&rpdev->dev);
dev_dbg(&rpdev->dev, "Removing rpmsg tty device %d\n", cport->id);
/* User hang up to release the tty */
tty_port_tty_hangup(&cport->port, false);
tty_unregister_device(rpmsg_tty_driver, cport->id);
tty_port_put(&cport->port);
}
static struct rpmsg_device_id rpmsg_driver_tty_id_table[] = {
{ .name = "rpmsg-tty" },
{ },
};
MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_tty_id_table);
static struct rpmsg_driver rpmsg_tty_rpmsg_drv = {
.drv.name = KBUILD_MODNAME,
.id_table = rpmsg_driver_tty_id_table,
.probe = rpmsg_tty_probe,
.callback = rpmsg_tty_cb,
.remove = rpmsg_tty_remove,
};
static int __init rpmsg_tty_init(void)
{
int ret;
rpmsg_tty_driver = tty_alloc_driver(MAX_TTY_RPMSG, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(rpmsg_tty_driver))
return PTR_ERR(rpmsg_tty_driver);
rpmsg_tty_driver->driver_name = "rpmsg_tty";
rpmsg_tty_driver->name = RPMSG_TTY_NAME;
rpmsg_tty_driver->major = 0;
rpmsg_tty_driver->type = TTY_DRIVER_TYPE_CONSOLE;
/* Disable unused mode by default */
rpmsg_tty_driver->init_termios = tty_std_termios;
rpmsg_tty_driver->init_termios.c_lflag &= ~(ECHO | ICANON);
rpmsg_tty_driver->init_termios.c_oflag &= ~(OPOST | ONLCR);
tty_set_operations(rpmsg_tty_driver, &rpmsg_tty_ops);
ret = tty_register_driver(rpmsg_tty_driver);
if (ret < 0) {
pr_err("Couldn't install driver: %d\n", ret);
goto error_put;
}
ret = register_rpmsg_driver(&rpmsg_tty_rpmsg_drv);
if (ret < 0) {
pr_err("Couldn't register driver: %d\n", ret);
goto error_unregister;
}
return 0;
error_unregister:
tty_unregister_driver(rpmsg_tty_driver);
error_put:
tty_driver_kref_put(rpmsg_tty_driver);
return ret;
}
static void __exit rpmsg_tty_exit(void)
{
unregister_rpmsg_driver(&rpmsg_tty_rpmsg_drv);
tty_unregister_driver(rpmsg_tty_driver);
tty_driver_kref_put(rpmsg_tty_driver);
idr_destroy(&tty_idr);
}
module_init(rpmsg_tty_init);
module_exit(rpmsg_tty_exit);
MODULE_AUTHOR("Arnaud Pouliquen <[email protected]>");
MODULE_DESCRIPTION("remote processor messaging tty driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/rpmsg_tty.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/module.h>
/*
* n_null.c - Null line discipline used in the failure path
*
* Copyright (C) Intel 2017
*/
static ssize_t n_null_read(struct tty_struct *tty, struct file *file, u8 *buf,
size_t nr, void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
static ssize_t n_null_write(struct tty_struct *tty, struct file *file,
const u8 *buf, size_t nr)
{
return -EOPNOTSUPP;
}
static struct tty_ldisc_ops null_ldisc = {
.owner = THIS_MODULE,
.num = N_NULL,
.name = "n_null",
.read = n_null_read,
.write = n_null_write,
};
static int __init n_null_init(void)
{
BUG_ON(tty_register_ldisc(&null_ldisc));
return 0;
}
static void __exit n_null_exit(void)
{
tty_unregister_ldisc(&null_ldisc);
}
module_init(n_null_init);
module_exit(n_null_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alan Cox");
MODULE_ALIAS_LDISC(N_NULL);
MODULE_DESCRIPTION("Null ldisc driver");
| linux-master | drivers/tty/n_null.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Added support for a Unix98-style ptmx device.
* -- C. Scott Ananian <[email protected]>, 14-Jan-1998
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/fcntl.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/devpts_fs.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/ioctl.h>
#include <linux/compat.h>
#include "tty.h"
#undef TTY_DEBUG_HANGUP
#ifdef TTY_DEBUG_HANGUP
# define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args)
#else
# define tty_debug_hangup(tty, f, args...) do {} while (0)
#endif
#ifdef CONFIG_UNIX98_PTYS
static struct tty_driver *ptm_driver;
static struct tty_driver *pts_driver;
static DEFINE_MUTEX(devpts_mutex);
#endif
static void pty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->driver->subtype == PTY_TYPE_MASTER)
WARN_ON(tty->count > 1);
else {
if (tty_io_error(tty))
return;
if (tty->count > 2)
return;
}
set_bit(TTY_IO_ERROR, &tty->flags);
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
spin_lock_irq(&tty->ctrl.lock);
tty->ctrl.packet = false;
spin_unlock_irq(&tty->ctrl.lock);
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
wake_up_interruptible(&tty->link->read_wait);
wake_up_interruptible(&tty->link->write_wait);
if (tty->driver->subtype == PTY_TYPE_MASTER) {
set_bit(TTY_OTHER_CLOSED, &tty->flags);
#ifdef CONFIG_UNIX98_PTYS
if (tty->driver == ptm_driver) {
mutex_lock(&devpts_mutex);
if (tty->link->driver_data)
devpts_pty_kill(tty->link->driver_data);
mutex_unlock(&devpts_mutex);
}
#endif
tty_vhangup(tty->link);
}
}
/*
* The unthrottle routine is called by the line discipline to signal
* that it can receive more characters. For PTY's, the TTY_THROTTLED
* flag is always set, to force the line discipline to always call the
* unthrottle routine when there are fewer than TTY_THRESHOLD_UNTHROTTLE
* characters in the queue. This is necessary since each time this
* happens, we need to wake up any sleeping processes that could be
* (1) trying to send data to the pty, or (2) waiting in wait_until_sent()
* for the pty buffer to be drained.
*/
static void pty_unthrottle(struct tty_struct *tty)
{
tty_wakeup(tty->link);
set_bit(TTY_THROTTLED, &tty->flags);
}
/**
* pty_write - write to a pty
* @tty: the tty we write from
* @buf: kernel buffer of data
* @c: bytes to write
*
* Our "hardware" write method. Data is coming from the ldisc which
* may be in a non sleeping state. We simply throw this at the other
* end of the link as if we were an IRQ handler receiving stuff for
* the other side of the pty/tty pair.
*/
static ssize_t pty_write(struct tty_struct *tty, const u8 *buf, size_t c)
{
struct tty_struct *to = tty->link;
if (tty->flow.stopped || !c)
return 0;
return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**
* pty_write_room - write space
* @tty: tty we are writing from
*
* Report how many bytes the ldisc can send into the queue for
* the other device.
*/
static unsigned int pty_write_room(struct tty_struct *tty)
{
if (tty->flow.stopped)
return 0;
return tty_buffer_space_avail(tty->link->port);
}
/* Set the lock flag on a pty */
static int pty_set_lock(struct tty_struct *tty, int __user *arg)
{
int val;
if (get_user(val, arg))
return -EFAULT;
if (val)
set_bit(TTY_PTY_LOCK, &tty->flags);
else
clear_bit(TTY_PTY_LOCK, &tty->flags);
return 0;
}
static int pty_get_lock(struct tty_struct *tty, int __user *arg)
{
int locked = test_bit(TTY_PTY_LOCK, &tty->flags);
return put_user(locked, arg);
}
/* Set the packet mode on a pty */
static int pty_set_pktmode(struct tty_struct *tty, int __user *arg)
{
int pktmode;
if (get_user(pktmode, arg))
return -EFAULT;
spin_lock_irq(&tty->ctrl.lock);
if (pktmode) {
if (!tty->ctrl.packet) {
tty->link->ctrl.pktstatus = 0;
smp_mb();
tty->ctrl.packet = true;
}
} else
tty->ctrl.packet = false;
spin_unlock_irq(&tty->ctrl.lock);
return 0;
}
/* Get the packet mode of a pty */
static int pty_get_pktmode(struct tty_struct *tty, int __user *arg)
{
int pktmode = tty->ctrl.packet;
return put_user(pktmode, arg);
}
/* Send a signal to the slave */
static int pty_signal(struct tty_struct *tty, int sig)
{
struct pid *pgrp;
if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
return -EINVAL;
if (tty->link) {
pgrp = tty_get_pgrp(tty->link);
if (pgrp)
kill_pgrp(pgrp, sig, 1);
put_pid(pgrp);
}
return 0;
}
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
if (!to)
return;
tty_buffer_flush(to, NULL);
if (to->ctrl.packet) {
spin_lock_irq(&tty->ctrl.lock);
tty->ctrl.pktstatus |= TIOCPKT_FLUSHWRITE;
wake_up_interruptible(&to->read_wait);
spin_unlock_irq(&tty->ctrl.lock);
}
}
static int pty_open(struct tty_struct *tty, struct file *filp)
{
if (!tty || !tty->link)
return -ENODEV;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
goto out;
if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
goto out;
if (tty->driver->subtype == PTY_TYPE_SLAVE && tty->link->count != 1)
goto out;
clear_bit(TTY_IO_ERROR, &tty->flags);
clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
set_bit(TTY_THROTTLED, &tty->flags);
return 0;
out:
set_bit(TTY_IO_ERROR, &tty->flags);
return -EIO;
}
static void pty_set_termios(struct tty_struct *tty,
const struct ktermios *old_termios)
{
/* See if packet mode change of state. */
if (tty->link && tty->link->ctrl.packet) {
int extproc = (old_termios->c_lflag & EXTPROC) | L_EXTPROC(tty);
int old_flow = ((old_termios->c_iflag & IXON) &&
(old_termios->c_cc[VSTOP] == '\023') &&
(old_termios->c_cc[VSTART] == '\021'));
int new_flow = (I_IXON(tty) &&
STOP_CHAR(tty) == '\023' &&
START_CHAR(tty) == '\021');
if ((old_flow != new_flow) || extproc) {
spin_lock_irq(&tty->ctrl.lock);
if (old_flow != new_flow) {
tty->ctrl.pktstatus &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
if (new_flow)
tty->ctrl.pktstatus |= TIOCPKT_DOSTOP;
else
tty->ctrl.pktstatus |= TIOCPKT_NOSTOP;
}
if (extproc)
tty->ctrl.pktstatus |= TIOCPKT_IOCTL;
spin_unlock_irq(&tty->ctrl.lock);
wake_up_interruptible(&tty->link->read_wait);
}
}
tty->termios.c_cflag &= ~(CSIZE | PARENB);
tty->termios.c_cflag |= (CS8 | CREAD);
}
/**
* pty_resize - resize event
* @tty: tty being resized
* @ws: window size being set.
*
* Update the termios variables and send the necessary signals to
* peform a terminal resize correctly
*/
static int pty_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp, *rpgrp;
struct tty_struct *pty = tty->link;
/* For a PTY we need to lock the tty side */
mutex_lock(&tty->winsize_mutex);
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
/* Signal the foreground process group of both ptys */
pgrp = tty_get_pgrp(tty);
rpgrp = tty_get_pgrp(pty);
if (pgrp)
kill_pgrp(pgrp, SIGWINCH, 1);
if (rpgrp != pgrp && rpgrp)
kill_pgrp(rpgrp, SIGWINCH, 1);
put_pid(pgrp);
put_pid(rpgrp);
tty->winsize = *ws;
pty->winsize = *ws; /* Never used so will go away soon */
done:
mutex_unlock(&tty->winsize_mutex);
return 0;
}
/**
* pty_start - start() handler
* pty_stop - stop() handler
* @tty: tty being flow-controlled
*
* Propagates the TIOCPKT status to the master pty.
*
* NB: only the master pty can be in packet mode so only the slave
* needs start()/stop() handlers
*/
static void pty_start(struct tty_struct *tty)
{
unsigned long flags;
if (tty->link && tty->link->ctrl.packet) {
spin_lock_irqsave(&tty->ctrl.lock, flags);
tty->ctrl.pktstatus &= ~TIOCPKT_STOP;
tty->ctrl.pktstatus |= TIOCPKT_START;
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
}
}
static void pty_stop(struct tty_struct *tty)
{
unsigned long flags;
if (tty->link && tty->link->ctrl.packet) {
spin_lock_irqsave(&tty->ctrl.lock, flags);
tty->ctrl.pktstatus &= ~TIOCPKT_START;
tty->ctrl.pktstatus |= TIOCPKT_STOP;
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
}
}
/**
* pty_common_install - set up the pty pair
* @driver: the pty driver
* @tty: the tty being instantiated
* @legacy: true if this is BSD style
*
* Perform the initial set up for the tty/pty pair. Called from the
* tty layer when the port is first opened.
*
* Locking: the caller must hold the tty_mutex
*/
static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
bool legacy)
{
struct tty_struct *o_tty;
struct tty_port *ports[2];
int idx = tty->index;
int retval = -ENOMEM;
/* Opening the slave first has always returned -EIO */
if (driver->subtype != PTY_TYPE_MASTER)
return -EIO;
ports[0] = kmalloc(sizeof **ports, GFP_KERNEL);
ports[1] = kmalloc(sizeof **ports, GFP_KERNEL);
if (!ports[0] || !ports[1])
goto err;
if (!try_module_get(driver->other->owner)) {
/* This cannot in fact currently happen */
goto err;
}
o_tty = alloc_tty_struct(driver->other, idx);
if (!o_tty)
goto err_put_module;
tty_set_lock_subclass(o_tty);
lockdep_set_subclass(&o_tty->termios_rwsem, TTY_LOCK_SLAVE);
if (legacy) {
/* We always use new tty termios data so we can do this
the easy way .. */
tty_init_termios(tty);
tty_init_termios(o_tty);
driver->other->ttys[idx] = o_tty;
driver->ttys[idx] = tty;
} else {
memset(&tty->termios_locked, 0, sizeof(tty->termios_locked));
tty->termios = driver->init_termios;
memset(&o_tty->termios_locked, 0, sizeof(tty->termios_locked));
o_tty->termios = driver->other->init_termios;
}
/*
* Everything allocated ... set up the o_tty structure.
*/
tty_driver_kref_get(driver->other);
/* Establish the links in both directions */
tty->link = o_tty;
o_tty->link = tty;
tty_port_init(ports[0]);
tty_port_init(ports[1]);
tty_buffer_set_limit(ports[0], 8192);
tty_buffer_set_limit(ports[1], 8192);
o_tty->port = ports[0];
tty->port = ports[1];
o_tty->port->itty = o_tty;
tty_buffer_set_lock_subclass(o_tty->port);
tty_driver_kref_get(driver);
tty->count++;
o_tty->count++;
return 0;
err_put_module:
module_put(driver->other->owner);
err:
kfree(ports[0]);
kfree(ports[1]);
return retval;
}
static void pty_cleanup(struct tty_struct *tty)
{
tty_port_put(tty->port);
}
/* Traditional BSD devices */
#ifdef CONFIG_LEGACY_PTYS
static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
{
return pty_common_install(driver, tty, true);
}
static void pty_remove(struct tty_driver *driver, struct tty_struct *tty)
{
struct tty_struct *pair = tty->link;
driver->ttys[tty->index] = NULL;
if (pair)
pair->driver->ttys[pair->index] = NULL;
}
static int pty_bsd_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
return pty_set_lock(tty, (int __user *) arg);
case TIOCGPTLCK: /* Get PT Lock status */
return pty_get_lock(tty, (int __user *)arg);
case TIOCPKT: /* Set PT packet mode */
return pty_set_pktmode(tty, (int __user *)arg);
case TIOCGPKT: /* Get PT packet mode */
return pty_get_pktmode(tty, (int __user *)arg);
case TIOCSIG: /* Send signal to other side of pty */
return pty_signal(tty, (int) arg);
case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
return -EINVAL;
}
return -ENOIOCTLCMD;
}
#ifdef CONFIG_COMPAT
static long pty_bsd_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
/*
* PTY ioctls don't require any special translation between 32-bit and
* 64-bit userspace, they are already compatible.
*/
return pty_bsd_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
}
#else
#define pty_bsd_compat_ioctl NULL
#endif
static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
/*
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
module_param(legacy_count, int, 0);
/*
* The master side of a pty can do TIOCSPTLCK and thus
* has pty_bsd_ioctl.
*/
static const struct tty_operations master_pty_ops_bsd = {
.install = pty_install,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.unthrottle = pty_unthrottle,
.ioctl = pty_bsd_ioctl,
.compat_ioctl = pty_bsd_compat_ioctl,
.cleanup = pty_cleanup,
.resize = pty_resize,
.remove = pty_remove
};
static const struct tty_operations slave_pty_ops_bsd = {
.install = pty_install,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.cleanup = pty_cleanup,
.resize = pty_resize,
.start = pty_start,
.stop = pty_stop,
.remove = pty_remove
};
static void __init legacy_pty_init(void)
{
struct tty_driver *pty_driver, *pty_slave_driver;
if (legacy_count <= 0)
return;
pty_driver = tty_alloc_driver(legacy_count,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_ALLOC);
if (IS_ERR(pty_driver))
panic("Couldn't allocate pty driver");
pty_slave_driver = tty_alloc_driver(legacy_count,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_ALLOC);
if (IS_ERR(pty_slave_driver))
panic("Couldn't allocate pty slave driver");
pty_driver->driver_name = "pty_master";
pty_driver->name = "pty";
pty_driver->major = PTY_MASTER_MAJOR;
pty_driver->minor_start = 0;
pty_driver->type = TTY_DRIVER_TYPE_PTY;
pty_driver->subtype = PTY_TYPE_MASTER;
pty_driver->init_termios = tty_std_termios;
pty_driver->init_termios.c_iflag = 0;
pty_driver->init_termios.c_oflag = 0;
pty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
pty_driver->init_termios.c_lflag = 0;
pty_driver->init_termios.c_ispeed = 38400;
pty_driver->init_termios.c_ospeed = 38400;
pty_driver->other = pty_slave_driver;
tty_set_operations(pty_driver, &master_pty_ops_bsd);
pty_slave_driver->driver_name = "pty_slave";
pty_slave_driver->name = "ttyp";
pty_slave_driver->major = PTY_SLAVE_MAJOR;
pty_slave_driver->minor_start = 0;
pty_slave_driver->type = TTY_DRIVER_TYPE_PTY;
pty_slave_driver->subtype = PTY_TYPE_SLAVE;
pty_slave_driver->init_termios = tty_std_termios;
pty_slave_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
pty_slave_driver->init_termios.c_ispeed = 38400;
pty_slave_driver->init_termios.c_ospeed = 38400;
pty_slave_driver->other = pty_driver;
tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
if (tty_register_driver(pty_driver))
panic("Couldn't register pty driver");
if (tty_register_driver(pty_slave_driver))
panic("Couldn't register pty slave driver");
}
#else
static inline void legacy_pty_init(void) { }
#endif
/* Unix98 devices */
#ifdef CONFIG_UNIX98_PTYS
static struct cdev ptmx_cdev;
/**
* ptm_open_peer - open the peer of a pty
* @master: the open struct file of the ptmx device node
* @tty: the master of the pty being opened
* @flags: the flags for open
*
* Provide a race free way for userspace to open the slave end of a pty
* (where they have the master fd and cannot access or trust the mount
* namespace /dev/pts was mounted inside).
*/
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
{
int fd;
struct file *filp;
int retval = -EINVAL;
struct path path;
if (tty->driver != ptm_driver)
return -EIO;
fd = get_unused_fd_flags(flags);
if (fd < 0) {
retval = fd;
goto err;
}
/* Compute the slave's path */
path.mnt = devpts_mntget(master, tty->driver_data);
if (IS_ERR(path.mnt)) {
retval = PTR_ERR(path.mnt);
goto err_put;
}
path.dentry = tty->link->driver_data;
filp = dentry_open(&path, flags, current_cred());
mntput(path.mnt);
if (IS_ERR(filp)) {
retval = PTR_ERR(filp);
goto err_put;
}
fd_install(fd, filp);
return fd;
err_put:
put_unused_fd(fd);
err:
return retval;
}
static int pty_unix98_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
return pty_set_lock(tty, (int __user *)arg);
case TIOCGPTLCK: /* Get PT Lock status */
return pty_get_lock(tty, (int __user *)arg);
case TIOCPKT: /* Set PT packet mode */
return pty_set_pktmode(tty, (int __user *)arg);
case TIOCGPKT: /* Get PT packet mode */
return pty_get_pktmode(tty, (int __user *)arg);
case TIOCGPTN: /* Get PT Number */
return put_user(tty->index, (unsigned int __user *)arg);
case TIOCSIG: /* Send signal to other side of pty */
return pty_signal(tty, (int) arg);
}
return -ENOIOCTLCMD;
}
#ifdef CONFIG_COMPAT
static long pty_unix98_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
/*
* PTY ioctls don't require any special translation between 32-bit and
* 64-bit userspace, they are already compatible.
*/
return pty_unix98_ioctl(tty, cmd,
cmd == TIOCSIG ? arg : (unsigned long)compat_ptr(arg));
}
#else
#define pty_unix98_compat_ioctl NULL
#endif
/**
* ptm_unix98_lookup - find a pty master
* @driver: ptm driver
* @file: unused
* @idx: tty index
*
* Look up a pty master device. Called under the tty_mutex for now.
* This provides our locking.
*/
static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
struct file *file, int idx)
{
/* Master must be open via /dev/ptmx */
return ERR_PTR(-EIO);
}
/**
* pts_unix98_lookup - find a pty slave
* @driver: pts driver
* @file: file pointer to tty
* @idx: tty index
*
* Look up a pty master device. Called under the tty_mutex for now.
* This provides our locking for the tty pointer.
*/
static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
struct file *file, int idx)
{
struct tty_struct *tty;
mutex_lock(&devpts_mutex);
tty = devpts_get_priv(file->f_path.dentry);
mutex_unlock(&devpts_mutex);
/* Master must be open before slave */
if (!tty)
return ERR_PTR(-EIO);
return tty;
}
static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
{
return pty_common_install(driver, tty, false);
}
/* this is called once with whichever end is closed last */
static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
fsi = tty->driver_data;
else
fsi = tty->link->driver_data;
if (fsi) {
devpts_kill_index(fsi, tty->index);
devpts_release(fsi);
}
}
static void pty_show_fdinfo(struct tty_struct *tty, struct seq_file *m)
{
seq_printf(m, "tty-index:\t%d\n", tty->index);
}
static const struct tty_operations ptm_unix98_ops = {
.lookup = ptm_unix98_lookup,
.install = pty_unix98_install,
.remove = pty_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.unthrottle = pty_unthrottle,
.ioctl = pty_unix98_ioctl,
.compat_ioctl = pty_unix98_compat_ioctl,
.resize = pty_resize,
.cleanup = pty_cleanup,
.show_fdinfo = pty_show_fdinfo,
};
static const struct tty_operations pty_unix98_ops = {
.lookup = pts_unix98_lookup,
.install = pty_unix98_install,
.remove = pty_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.start = pty_start,
.stop = pty_stop,
.cleanup = pty_cleanup,
};
/**
* ptmx_open - open a unix 98 pty master
* @inode: inode of device file
* @filp: file pointer to tty
*
* Allocate a unix98 pty master device from the ptmx driver.
*
* Locking: tty_mutex protects the init_dev work. tty->count should
* protect the rest.
* allocated_ptys_lock handles the list of free pty numbers
*/
static int ptmx_open(struct inode *inode, struct file *filp)
{
struct pts_fs_info *fsi;
struct tty_struct *tty;
struct dentry *dentry;
int retval;
int index;
nonseekable_open(inode, filp);
/* We refuse fsnotify events on ptmx, since it's a shared resource */
filp->f_mode |= FMODE_NONOTIFY;
retval = tty_alloc_file(filp);
if (retval)
return retval;
fsi = devpts_acquire(filp);
if (IS_ERR(fsi)) {
retval = PTR_ERR(fsi);
goto out_free_file;
}
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
retval = index;
if (index < 0)
goto out_put_fsi;
mutex_lock(&tty_mutex);
tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
retval = PTR_ERR(tty);
if (IS_ERR(tty))
goto out;
/*
* From here on out, the tty is "live", and the index and
* fsi will be killed/put by the tty_release()
*/
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty->driver_data = fsi;
tty_add_file(tty, filp);
dentry = devpts_pty_new(fsi, index, tty->link);
if (IS_ERR(dentry)) {
retval = PTR_ERR(dentry);
goto err_release;
}
tty->link->driver_data = dentry;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
goto err_release;
tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
tty_unlock(tty);
return 0;
err_release:
tty_unlock(tty);
// This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
devpts_kill_index(fsi, index);
out_put_fsi:
devpts_release(fsi);
out_free_file:
tty_free_file(filp);
return retval;
}
static struct file_operations ptmx_fops __ro_after_init;
static void __init unix98_pty_init(void)
{
ptm_driver = tty_alloc_driver(NR_UNIX98_PTY_MAX,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_DEVPTS_MEM |
TTY_DRIVER_DYNAMIC_ALLOC);
if (IS_ERR(ptm_driver))
panic("Couldn't allocate Unix98 ptm driver");
pts_driver = tty_alloc_driver(NR_UNIX98_PTY_MAX,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_DEVPTS_MEM |
TTY_DRIVER_DYNAMIC_ALLOC);
if (IS_ERR(pts_driver))
panic("Couldn't allocate Unix98 pts driver");
ptm_driver->driver_name = "pty_master";
ptm_driver->name = "ptm";
ptm_driver->major = UNIX98_PTY_MASTER_MAJOR;
ptm_driver->minor_start = 0;
ptm_driver->type = TTY_DRIVER_TYPE_PTY;
ptm_driver->subtype = PTY_TYPE_MASTER;
ptm_driver->init_termios = tty_std_termios;
ptm_driver->init_termios.c_iflag = 0;
ptm_driver->init_termios.c_oflag = 0;
ptm_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
ptm_driver->init_termios.c_lflag = 0;
ptm_driver->init_termios.c_ispeed = 38400;
ptm_driver->init_termios.c_ospeed = 38400;
ptm_driver->other = pts_driver;
tty_set_operations(ptm_driver, &ptm_unix98_ops);
pts_driver->driver_name = "pty_slave";
pts_driver->name = "pts";
pts_driver->major = UNIX98_PTY_SLAVE_MAJOR;
pts_driver->minor_start = 0;
pts_driver->type = TTY_DRIVER_TYPE_PTY;
pts_driver->subtype = PTY_TYPE_SLAVE;
pts_driver->init_termios = tty_std_termios;
pts_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
pts_driver->init_termios.c_ispeed = 38400;
pts_driver->init_termios.c_ospeed = 38400;
pts_driver->other = ptm_driver;
tty_set_operations(pts_driver, &pty_unix98_ops);
if (tty_register_driver(ptm_driver))
panic("Couldn't register Unix98 ptm driver");
if (tty_register_driver(pts_driver))
panic("Couldn't register Unix98 pts driver");
/* Now create the /dev/ptmx special device */
tty_default_fops(&ptmx_fops);
ptmx_fops.open = ptmx_open;
cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
panic("Couldn't register /dev/ptmx driver");
device_create(&tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
}
#else
static inline void unix98_pty_init(void) { }
#endif
static int __init pty_init(void)
{
legacy_pty_init();
unix98_pty_init();
return 0;
}
device_initcall(pty_init);
| linux-master | drivers/tty/pty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Axis Communications AB
*
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
#include <linux/console.h>
#include <linux/module.h>
#include <linux/tty.h>
static const struct tty_port_operations ttynull_port_ops;
static struct tty_driver *ttynull_driver;
static struct tty_port ttynull_port;
static int ttynull_open(struct tty_struct *tty, struct file *filp)
{
return tty_port_open(&ttynull_port, tty, filp);
}
static void ttynull_close(struct tty_struct *tty, struct file *filp)
{
tty_port_close(&ttynull_port, tty, filp);
}
static void ttynull_hangup(struct tty_struct *tty)
{
tty_port_hangup(&ttynull_port);
}
static ssize_t ttynull_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
return count;
}
static unsigned int ttynull_write_room(struct tty_struct *tty)
{
return 65536;
}
static const struct tty_operations ttynull_ops = {
.open = ttynull_open,
.close = ttynull_close,
.hangup = ttynull_hangup,
.write = ttynull_write,
.write_room = ttynull_write_room,
};
static struct tty_driver *ttynull_device(struct console *c, int *index)
{
*index = 0;
return ttynull_driver;
}
static struct console ttynull_console = {
.name = "ttynull",
.device = ttynull_device,
};
static int __init ttynull_init(void)
{
struct tty_driver *driver;
int ret;
driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_UNNUMBERED_NODE);
if (IS_ERR(driver))
return PTR_ERR(driver);
tty_port_init(&ttynull_port);
ttynull_port.ops = &ttynull_port_ops;
driver->driver_name = "ttynull";
driver->name = "ttynull";
driver->type = TTY_DRIVER_TYPE_CONSOLE;
driver->init_termios = tty_std_termios;
driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
tty_set_operations(driver, &ttynull_ops);
tty_port_link_device(&ttynull_port, driver, 0);
ret = tty_register_driver(driver);
if (ret < 0) {
tty_driver_kref_put(driver);
tty_port_destroy(&ttynull_port);
return ret;
}
ttynull_driver = driver;
register_console(&ttynull_console);
return 0;
}
static void __exit ttynull_exit(void)
{
unregister_console(&ttynull_console);
tty_unregister_driver(ttynull_driver);
tty_driver_kref_put(ttynull_driver);
tty_port_destroy(&ttynull_port);
}
module_init(ttynull_init);
module_exit(ttynull_exit);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/ttynull.c |
// SPDX-License-Identifier: GPL-2.0
/*
* n_gsm.c GSM 0710 tty multiplexor
* Copyright (c) 2009/10 Intel Corporation
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
* Outgoing path:
* tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
* control message -> GSM MUX control queue --´
*
* Incoming path:
* ldisc -> gsm_queue() -o--> tty
* `-> gsm_control_response()
*
* TO DO:
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
* Restart DLCI 0 when it closes ?
* Improve the tx engine
* Resolve tx side locking by adding a queue_head and routing
* all control traffic via it
* General tidy/document
* Review the locking/move to refcounts more (mux now moved to an
* alloc/free model ready)
* Use newest tty open/close port helpers and install hooks
* What to do about power functions ?
* Termios setting and negotiation
* Do we need a 'which mux are you' ioctl to correlate mux and tty sets
*
*/
#include <linux/types.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/fcntl.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/math.h>
#include <linux/nospec.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/bitops.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/tty_flip.h>
#include <linux/tty_driver.h>
#include <linux/serial.h>
#include <linux/kfifo.h>
#include <linux/skbuff.h>
#include <net/arp.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/gsmmux.h>
#include "tty.h"
static int debug;
module_param(debug, int, 0600);
/* Module debug bits */
#define DBG_DUMP BIT(0) /* Data transmission dump. */
#define DBG_CD_ON BIT(1) /* Always assume CD line on. */
#define DBG_DATA BIT(2) /* Data transmission details. */
#define DBG_ERRORS BIT(3) /* Details for fail conditions. */
#define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */
#define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */
/* Defaults: these are from the specification */
#define T1 10 /* 100mS */
#define T2 34 /* 333mS */
#define T3 10 /* 10s */
#define N2 3 /* Retry 3 times */
#define K 2 /* outstanding I frames */
#define MAX_T3 255 /* In seconds. */
#define MAX_WINDOW_SIZE 7 /* Limit of K in error recovery mode. */
/* Use long timers for testing at low speed with debug on */
#ifdef DEBUG_TIMING
#define T1 100
#define T2 200
#endif
/*
* Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte
* limits so this is plenty
*/
#define MAX_MRU 1500
#define MAX_MTU 1500
#define MIN_MTU (PROT_OVERHEAD + 1)
/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
#define PROT_OVERHEAD 7
#define GSM_NET_TX_TIMEOUT (HZ*10)
/*
* struct gsm_mux_net - network interface
*
* Created when net interface is initialized.
*/
struct gsm_mux_net {
struct kref ref;
struct gsm_dlci *dlci;
};
/*
* Each block of data we have queued to go out is in the form of
* a gsm_msg which holds everything we need in a link layer independent
* format
*/
struct gsm_msg {
struct list_head list;
u8 addr; /* DLCI address + flags */
u8 ctrl; /* Control byte + flags */
unsigned int len; /* Length of data block (can be zero) */
unsigned char *data; /* Points into buffer but not at the start */
unsigned char buffer[];
};
enum gsm_dlci_state {
DLCI_CLOSED,
DLCI_WAITING_CONFIG, /* Waiting for DLCI configuration from user */
DLCI_CONFIGURE, /* Sending PN (for adaption > 1) */
DLCI_OPENING, /* Sending SABM not seen UA */
DLCI_OPEN, /* SABM/UA complete */
DLCI_CLOSING, /* Sending DISC not seen UA/DM */
};
enum gsm_dlci_mode {
DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */
DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */
};
/*
* Each active data link has a gsm_dlci structure associated which ties
* the link layer to an optional tty (if the tty side is open). To avoid
* complexity right now these are only ever freed up when the mux is
* shut down.
*
* At the moment we don't free DLCI objects until the mux is torn down
* this avoid object life time issues but might be worth review later.
*/
struct gsm_dlci {
struct gsm_mux *gsm;
int addr;
enum gsm_dlci_state state;
struct mutex mutex;
/* Link layer */
enum gsm_dlci_mode mode;
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
/* Uplink tty if active */
struct tty_port port; /* The tty bound to this DLCI if there is one */
#define TX_SIZE 4096 /* Must be power of 2. */
struct kfifo fifo; /* Queue fifo for the DLCI */
int adaption; /* Adaption layer in use */
int prev_adaption;
u32 modem_rx; /* Our incoming virtual modem lines */
u32 modem_tx; /* Our outgoing modem lines */
unsigned int mtu;
bool dead; /* Refuse re-open */
/* Configuration */
u8 prio; /* Priority */
u8 ftype; /* Frame type */
u8 k; /* Window size */
/* Flow control */
bool throttled; /* Private copy of throttle state */
bool constipated; /* Throttle status for outgoing */
/* Packetised I/O */
struct sk_buff *skb; /* Frame being sent */
struct sk_buff_head skb_list; /* Queued frames */
/* Data handling callback */
void (*data)(struct gsm_dlci *dlci, const u8 *data, int len);
void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len);
struct net_device *net; /* network interface, if created */
};
/*
* Parameter bits used for parameter negotiation according to 3GPP 27.010
* chapter 5.4.6.3.1.
*/
struct gsm_dlci_param_bits {
u8 d_bits;
u8 i_cl_bits;
u8 p_bits;
u8 t_bits;
__le16 n_bits;
u8 na_bits;
u8 k_bits;
};
static_assert(sizeof(struct gsm_dlci_param_bits) == 8);
#define PN_D_FIELD_DLCI GENMASK(5, 0)
#define PN_I_CL_FIELD_FTYPE GENMASK(3, 0)
#define PN_I_CL_FIELD_ADAPTION GENMASK(7, 4)
#define PN_P_FIELD_PRIO GENMASK(5, 0)
#define PN_T_FIELD_T1 GENMASK(7, 0)
#define PN_N_FIELD_N1 GENMASK(15, 0)
#define PN_NA_FIELD_N2 GENMASK(7, 0)
#define PN_K_FIELD_K GENMASK(2, 0)
/* Total number of supported devices */
#define GSM_TTY_MINORS 256
/* DLCI 0, 62/63 are special or reserved see gsmtty_open */
#define NUM_DLCI 64
/*
* DLCI 0 is used to pass control blocks out of band of the data
* flow (and with a higher link priority). One command can be outstanding
* at a time and we use this structure to manage them. They are created
* and destroyed by the user context, and updated by the receive paths
* and timers
*/
struct gsm_control {
u8 cmd; /* Command we are issuing */
u8 *data; /* Data for the command in case we retransmit */
int len; /* Length of block for retransmission */
int done; /* Done flag */
int error; /* Error if any */
};
enum gsm_encoding {
GSM_BASIC_OPT,
GSM_ADV_OPT,
};
enum gsm_mux_state {
GSM_SEARCH,
GSM_START,
GSM_ADDRESS,
GSM_CONTROL,
GSM_LEN,
GSM_DATA,
GSM_FCS,
GSM_OVERRUN,
GSM_LEN0,
GSM_LEN1,
GSM_SSOF,
};
/*
* Each GSM mux we have is represented by this structure. If we are
* operating as an ldisc then we use this structure as our ldisc
* state. We need to sort out lifetimes and locking with respect
* to the gsm mux array. For now we don't free DLCI objects that
* have been instantiated until the mux itself is terminated.
*
* To consider further: tty open versus mux shutdown.
*/
struct gsm_mux {
struct tty_struct *tty; /* The tty our ldisc is bound to */
spinlock_t lock;
struct mutex mutex;
unsigned int num;
struct kref ref;
/* Events on the GSM channel */
wait_queue_head_t event;
/* ldisc send work */
struct work_struct tx_work;
/* Bits for GSM mode decoding */
/* Framing Layer */
unsigned char *buf;
enum gsm_mux_state state;
unsigned int len;
unsigned int address;
unsigned int count;
bool escape;
enum gsm_encoding encoding;
u8 control;
u8 fcs;
u8 *txframe; /* TX framing buffer */
/* Method for the receiver side */
void (*receive)(struct gsm_mux *gsm, u8 ch);
/* Link Layer */
unsigned int mru;
unsigned int mtu;
int initiator; /* Did we initiate connection */
bool dead; /* Has the mux been shut down */
struct gsm_dlci *dlci[NUM_DLCI];
int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
bool has_devices; /* Devices were registered */
spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
struct list_head tx_ctrl_list; /* Pending control packets */
struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
struct timer_list kick_timer; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
spinlock_t control_lock; /* Protects the pending command */
/* Keep-alive */
struct timer_list ka_timer; /* Keep-alive response timer */
u8 ka_num; /* Keep-alive match pattern */
signed int ka_retries; /* Keep-alive retry counter, -1 if not yet initialized */
/* Configuration */
int adaption; /* 1 or 2 supported */
u8 ftype; /* UI or UIH */
int t1, t2; /* Timers in 1/100th of a sec */
unsigned int t3; /* Power wake-up timer in seconds. */
int n2; /* Retry count */
u8 k; /* Window size */
bool wait_config; /* Wait for configuration by ioctl before DLCI open */
u32 keep_alive; /* Control channel keep-alive in 10ms */
/* Statistics (not currently exposed) */
unsigned long bad_fcs;
unsigned long malformed;
unsigned long io_error;
unsigned long open_error;
unsigned long bad_size;
unsigned long unsupported;
};
/*
* Mux objects - needed so that we can translate a tty index into the
* relevant mux and DLCI.
*/
#define MAX_MUX 4 /* 256 minors */
static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
static DEFINE_SPINLOCK(gsm_mux_lock);
static struct tty_driver *gsm_tty_driver;
/*
* This section of the driver logic implements the GSM encodings
* both the basic and the 'advanced'. Reliable transport is not
* supported.
*/
#define CR 0x02
#define EA 0x01
#define PF 0x10
/* I is special: the rest are ..*/
#define RR 0x01
#define UI 0x03
#define RNR 0x05
#define REJ 0x09
#define DM 0x0F
#define SABM 0x2F
#define DISC 0x43
#define UA 0x63
#define UIH 0xEF
/* Channel commands */
#define CMD_NSC 0x09
#define CMD_TEST 0x11
#define CMD_PSC 0x21
#define CMD_RLS 0x29
#define CMD_FCOFF 0x31
#define CMD_PN 0x41
#define CMD_RPN 0x49
#define CMD_FCON 0x51
#define CMD_CLD 0x61
#define CMD_SNC 0x69
#define CMD_MSC 0x71
/* Virtual modem bits */
#define MDM_FC 0x01
#define MDM_RTC 0x02
#define MDM_RTR 0x04
#define MDM_IC 0x20
#define MDM_DV 0x40
#define GSM0_SOF 0xF9
#define GSM1_SOF 0x7E
#define GSM1_ESCAPE 0x7D
#define GSM1_ESCAPE_BITS 0x20
#define XON 0x11
#define XOFF 0x13
#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops;
/*
* CRC table for GSM 0710
*/
static const u8 gsm_fcs8[256] = {
0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
};
#define INIT_FCS 0xFF
#define GOOD_FCS 0xCF
static void gsm_dlci_close(struct gsm_dlci *dlci);
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
u8 ctrl);
static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr);
static void gsmld_write_trigger(struct gsm_mux *gsm);
static void gsmld_write_task(struct work_struct *work);
/**
* gsm_fcs_add - update FCS
* @fcs: Current FCS
* @c: Next data
*
* Update the FCS to include c. Uses the algorithm in the specification
* notes.
*/
static inline u8 gsm_fcs_add(u8 fcs, u8 c)
{
return gsm_fcs8[fcs ^ c];
}
/**
* gsm_fcs_add_block - update FCS for a block
* @fcs: Current FCS
* @c: buffer of data
* @len: length of buffer
*
* Update the FCS to include c. Uses the algorithm in the specification
* notes.
*/
static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
{
while (len--)
fcs = gsm_fcs8[fcs ^ *c++];
return fcs;
}
/**
* gsm_read_ea - read a byte into an EA
* @val: variable holding value
* @c: byte going into the EA
*
* Processes one byte of an EA. Updates the passed variable
* and returns 1 if the EA is now completely read
*/
static int gsm_read_ea(unsigned int *val, u8 c)
{
/* Add the next 7 bits into the value */
*val <<= 7;
*val |= c >> 1;
/* Was this the last byte of the EA 1 = yes*/
return c & EA;
}
/**
* gsm_read_ea_val - read a value until EA
* @val: variable holding value
* @data: buffer of data
* @dlen: length of data
*
* Processes an EA value. Updates the passed variable and
* returns the processed data length.
*/
static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
{
unsigned int len = 0;
for (; dlen > 0; dlen--) {
len++;
if (gsm_read_ea(val, *data++))
break;
}
return len;
}
/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
* Returns the correct GSM encoded modem status bits (6 bit field) for
* the current status of the DLCI and attached tty object
*/
static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
{
u8 modembits = 0;
/* FC is true flow control not modem bits */
if (dlci->throttled)
modembits |= MDM_FC;
if (dlci->modem_tx & TIOCM_DTR)
modembits |= MDM_RTC;
if (dlci->modem_tx & TIOCM_RTS)
modembits |= MDM_RTR;
if (dlci->modem_tx & TIOCM_RI)
modembits |= MDM_IC;
if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
/* special mappings for passive side to operate as UE */
if (dlci->modem_tx & TIOCM_OUT1)
modembits |= MDM_IC;
if (dlci->modem_tx & TIOCM_OUT2)
modembits |= MDM_DV;
return modembits;
}
static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
unsigned long len)
{
char *prefix;
if (!fname) {
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
true);
return;
}
prefix = kasprintf(GFP_ATOMIC, "%s: ", fname);
if (!prefix)
return;
print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
true);
kfree(prefix);
}
/**
* gsm_encode_params - encode DLCI parameters
* @dlci: DLCI to encode from
* @params: buffer to fill with the encoded parameters
*
* Encodes the parameters according to GSM 07.10 section 5.4.6.3.1
* table 3.
*/
static int gsm_encode_params(const struct gsm_dlci *dlci,
struct gsm_dlci_param_bits *params)
{
const struct gsm_mux *gsm = dlci->gsm;
unsigned int i, cl;
switch (dlci->ftype) {
case UIH:
i = 0; /* UIH */
break;
case UI:
i = 1; /* UI */
break;
default:
pr_debug("unsupported frame type %d\n", dlci->ftype);
return -EINVAL;
}
switch (dlci->adaption) {
case 1: /* Unstructured */
cl = 0; /* convergence layer type 1 */
break;
case 2: /* Unstructured with modem bits. */
cl = 1; /* convergence layer type 2 */
break;
default:
pr_debug("unsupported adaption %d\n", dlci->adaption);
return -EINVAL;
}
params->d_bits = FIELD_PREP(PN_D_FIELD_DLCI, dlci->addr);
/* UIH, convergence layer type 1 */
params->i_cl_bits = FIELD_PREP(PN_I_CL_FIELD_FTYPE, i) |
FIELD_PREP(PN_I_CL_FIELD_ADAPTION, cl);
params->p_bits = FIELD_PREP(PN_P_FIELD_PRIO, dlci->prio);
params->t_bits = FIELD_PREP(PN_T_FIELD_T1, gsm->t1);
params->n_bits = cpu_to_le16(FIELD_PREP(PN_N_FIELD_N1, dlci->mtu));
params->na_bits = FIELD_PREP(PN_NA_FIELD_N2, gsm->n2);
params->k_bits = FIELD_PREP(PN_K_FIELD_K, dlci->k);
return 0;
}
/**
* gsm_register_devices - register all tty devices for a given mux index
*
* @driver: the tty driver that describes the tty devices
* @index: the mux number is used to calculate the minor numbers of the
* ttys for this mux and may differ from the position in the
* mux array.
*/
static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
{
struct device *dev;
int i;
unsigned int base;
if (!driver || index >= MAX_MUX)
return -EINVAL;
base = index * NUM_DLCI; /* first minor for this index */
for (i = 1; i < NUM_DLCI; i++) {
/* Don't register device 0 - this is the control channel
* and not a usable tty interface
*/
dev = tty_register_device(gsm_tty_driver, base + i, NULL);
if (IS_ERR(dev)) {
if (debug & DBG_ERRORS)
pr_info("%s failed to register device minor %u",
__func__, base + i);
for (i--; i >= 1; i--)
tty_unregister_device(gsm_tty_driver, base + i);
return PTR_ERR(dev);
}
}
return 0;
}
/**
* gsm_unregister_devices - unregister all tty devices for a given mux index
*
* @driver: the tty driver that describes the tty devices
* @index: the mux number is used to calculate the minor numbers of the
* ttys for this mux and may differ from the position in the
* mux array.
*/
static void gsm_unregister_devices(struct tty_driver *driver,
unsigned int index)
{
int i;
unsigned int base;
if (!driver || index >= MAX_MUX)
return;
base = index * NUM_DLCI; /* first minor for this index */
for (i = 1; i < NUM_DLCI; i++) {
/* Don't unregister device 0 - this is the control
* channel and not a usable tty interface
*/
tty_unregister_device(gsm_tty_driver, base + i);
}
}
/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
* @cr: C/R bit seen as initiator
* @control: control including PF bit
* @data: following data bytes
* @dlen: length of data
*
* Displays a packet in human readable format for debugging purposes. The
* style is based on amateur radio LAP-B dump display.
*/
static void gsm_print_packet(const char *hdr, int addr, int cr,
u8 control, const u8 *data, int dlen)
{
if (!(debug & DBG_DUMP))
return;
/* Only show user payload frames if debug & DBG_PAYLOAD */
if (!(debug & DBG_PAYLOAD) && addr != 0)
if ((control & ~PF) == UI || (control & ~PF) == UIH)
return;
pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
switch (control & ~PF) {
case SABM:
pr_cont("SABM");
break;
case UA:
pr_cont("UA");
break;
case DISC:
pr_cont("DISC");
break;
case DM:
pr_cont("DM");
break;
case UI:
pr_cont("UI");
break;
case UIH:
pr_cont("UIH");
break;
default:
if (!(control & 0x01)) {
pr_cont("I N(S)%d N(R)%d",
(control & 0x0E) >> 1, (control & 0xE0) >> 5);
} else switch (control & 0x0F) {
case RR:
pr_cont("RR(%d)", (control & 0xE0) >> 5);
break;
case RNR:
pr_cont("RNR(%d)", (control & 0xE0) >> 5);
break;
case REJ:
pr_cont("REJ(%d)", (control & 0xE0) >> 5);
break;
default:
pr_cont("[%02X]", control);
}
}
if (control & PF)
pr_cont("(P)");
else
pr_cont("(F)");
gsm_hex_dump_bytes(NULL, data, dlen);
}
/*
* Link level transmission side
*/
/**
* gsm_stuff_frame - bytestuff a packet
* @input: input buffer
* @output: output buffer
* @len: length of input
*
* Expand a buffer by bytestuffing it. The worst case size change
* is doubling and the caller is responsible for handing out
* suitable sized buffers.
*/
static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
{
int olen = 0;
while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
|| (*input & ISO_IEC_646_MASK) == XON
|| (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++;
} else
*output++ = *input++;
olen++;
}
return olen;
}
/**
* gsm_send - send a control frame
* @gsm: our GSM mux
* @addr: address for control frame
* @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
* Format up and transmit a control frame. These should be transmitted
* ahead of data when they are needed.
*/
static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
{
struct gsm_msg *msg;
u8 *dp;
int ocr;
unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control);
if (!msg)
return -ENOMEM;
/* toggle C/R coding if not initiator */
ocr = cr ^ (gsm->initiator ? 0 : 1);
msg->data -= 3;
dp = msg->data;
*dp++ = (addr << 2) | (ocr << 1) | EA;
*dp++ = control;
if (gsm->encoding == GSM_BASIC_OPT)
*dp++ = EA; /* Length of data = 0 */
*dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
msg->len = (dp - msg->data) + 1;
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
spin_lock_irqsave(&gsm->tx_lock, flags);
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
gsm->tx_bytes += msg->len;
spin_unlock_irqrestore(&gsm->tx_lock, flags);
gsmld_write_trigger(gsm);
return 0;
}
/**
* gsm_dlci_clear_queues - remove outstanding data for a DLCI
* @gsm: mux
* @dlci: clear for this DLCI
*
* Clears the data queues for a given DLCI.
*/
static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg, *nmsg;
int addr = dlci->addr;
unsigned long flags;
/* Clear DLCI write fifo first */
spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */
spin_lock_irqsave(&gsm->tx_lock, flags);
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
if (msg->addr != addr)
continue;
gsm->tx_bytes -= msg->len;
list_del(&msg->list);
kfree(msg);
}
spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
* gsm_response - send a control response
* @gsm: our GSM mux
* @addr: address for control frame
* @control: control byte including PF bit
*
* Format up and transmit a link level response frame.
*/
static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
{
gsm_send(gsm, addr, 0, control);
}
/**
* gsm_command - send a control command
* @gsm: our GSM mux
* @addr: address for control frame
* @control: control byte including PF bit
*
* Format up and transmit a link level command frame.
*/
static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
{
gsm_send(gsm, addr, 1, control);
}
/* Data transmission */
#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
/**
* gsm_data_alloc - allocate data frame
* @gsm: GSM mux
* @addr: DLCI address
* @len: length excluding header and FCS
* @ctrl: control byte
*
* Allocate a new data buffer for sending frames with data. Space is left
* at the front for header bytes but that is treated as an implementation
* detail and not for the high level code to use
*/
static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
u8 ctrl)
{
struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
GFP_ATOMIC);
if (m == NULL)
return NULL;
m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
m->len = len;
m->addr = addr;
m->ctrl = ctrl;
INIT_LIST_HEAD(&m->list);
return m;
}
/**
* gsm_send_packet - sends a single packet
* @gsm: GSM Mux
* @msg: packet to send
*
* The given packet is encoded and sent out. No memory is freed.
* The caller must hold the gsm tx lock.
*/
static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
{
int len, ret;
if (gsm->encoding == GSM_BASIC_OPT) {
gsm->txframe[0] = GSM0_SOF;
memcpy(gsm->txframe + 1, msg->data, msg->len);
gsm->txframe[msg->len + 1] = GSM0_SOF;
len = msg->len + 2;
} else {
gsm->txframe[0] = GSM1_SOF;
len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
gsm->txframe[len + 1] = GSM1_SOF;
len += 2;
}
if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, gsm->txframe, len);
gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
msg->len);
ret = gsmld_output(gsm, gsm->txframe, len);
if (ret <= 0)
return ret;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
return 0;
}
/**
* gsm_is_flow_ctrl_msg - checks if flow control message
* @msg: message to check
*
* Returns true if the given message is a flow control command of the
* control channel. False is returned in any other case.
*/
static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
{
unsigned int cmd;
if (msg->addr > 0)
return false;
switch (msg->ctrl & ~PF) {
case UI:
case UIH:
cmd = 0;
if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
break;
switch (cmd & ~PF) {
case CMD_FCOFF:
case CMD_FCON:
return true;
}
break;
}
return false;
}
/**
* gsm_data_kick - poke the queue
* @gsm: GSM Mux
*
* The tty device has called us to indicate that room has appeared in
* the transmit queue. Ram more data into the pipe if we have any.
* If we have been flow-stopped by a CMD_FCOFF, then we can only
* send messages on DLCI0 until CMD_FCON. The caller must hold
* the gsm tx lock.
*/
static int gsm_data_kick(struct gsm_mux *gsm)
{
struct gsm_msg *msg, *nmsg;
struct gsm_dlci *dlci;
int ret;
clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
/* Serialize control messages and control channel messages first */
list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
continue;
ret = gsm_send_packet(gsm, msg);
switch (ret) {
case -ENOSPC:
return -ENOSPC;
case -ENODEV:
/* ldisc not open */
gsm->tx_bytes -= msg->len;
list_del(&msg->list);
kfree(msg);
continue;
default:
if (ret >= 0) {
list_del(&msg->list);
kfree(msg);
}
break;
}
}
if (gsm->constipated)
return -EAGAIN;
/* Serialize other channels */
if (list_empty(&gsm->tx_data_list))
return 0;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
dlci = gsm->dlci[msg->addr];
/* Send only messages for DLCIs with valid state */
if (dlci->state != DLCI_OPEN) {
gsm->tx_bytes -= msg->len;
list_del(&msg->list);
kfree(msg);
continue;
}
ret = gsm_send_packet(gsm, msg);
switch (ret) {
case -ENOSPC:
return -ENOSPC;
case -ENODEV:
/* ldisc not open */
gsm->tx_bytes -= msg->len;
list_del(&msg->list);
kfree(msg);
continue;
default:
if (ret >= 0) {
list_del(&msg->list);
kfree(msg);
}
break;
}
}
return 1;
}
/**
* __gsm_data_queue - queue a UI or UIH frame
* @dlci: DLCI sending the data
* @msg: message queued
*
* Add data to the transmit queue and try and get stuff moving
* out of the mux tty if not already doing so. The Caller must hold
* the gsm tx lock.
*/
static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
struct gsm_mux *gsm = dlci->gsm;
u8 *dp = msg->data;
u8 *fcs = dp + msg->len;
/* Fill in the header */
if (gsm->encoding == GSM_BASIC_OPT) {
if (msg->len < 128)
*--dp = (msg->len << 1) | EA;
else {
*--dp = (msg->len >> 7); /* bits 7 - 15 */
*--dp = (msg->len & 127) << 1; /* bits 0 - 6 */
}
}
*--dp = msg->ctrl;
if (gsm->initiator)
*--dp = (msg->addr << 2) | CR | EA;
else
*--dp = (msg->addr << 2) | EA;
*fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
/* Ugly protocol layering violation */
if (msg->ctrl == UI || msg->ctrl == (UI|PF))
*fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
*fcs = 0xFF - *fcs;
gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
msg->data, msg->len);
/* Move the header back and adjust the length, also allow for the FCS
now tacked on the end */
msg->len += (msg->data - dp) + 1;
msg->data = dp;
/* Add to the actual output queue */
switch (msg->ctrl & ~PF) {
case UI:
case UIH:
if (msg->addr > 0) {
list_add_tail(&msg->list, &gsm->tx_data_list);
break;
}
fallthrough;
default:
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
break;
}
gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm);
mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
}
/**
* gsm_data_queue - queue a UI or UIH frame
* @dlci: DLCI sending the data
* @msg: message queued
*
* Add data to the transmit queue and try and get stuff moving
* out of the mux tty if not already doing so. Take the
* the gsm tx lock and dlci lock.
*/
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
unsigned long flags;
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
__gsm_data_queue(dlci, msg);
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
/**
* gsm_dlci_data_output - try and push data out of a DLCI
* @gsm: mux
* @dlci: the DLCI to pull data from
*
* Pull data from a DLCI and send it into the transmit queue if there
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
* Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
int h, len, size;
/* for modem bits without break data */
h = ((dlci->adaption == 1) ? 0 : 1);
len = kfifo_len(&dlci->fifo);
if (len == 0)
return 0;
/* MTU/MRU count only the data bits but watch adaption mode */
if ((len + h) > dlci->mtu)
len = dlci->mtu - h;
size = len + h;
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
if (!msg)
return -ENOMEM;
dp = msg->data;
switch (dlci->adaption) {
case 1: /* Unstructured */
break;
case 2: /* Unstructured with modem bits.
* Always one byte as we never send inline break data
*/
*dp++ = (gsm_encode_modem(dlci) << 1) | EA;
break;
default:
pr_err("%s: unsupported adaption %d\n", __func__,
dlci->adaption);
break;
}
WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
&dlci->lock));
/* Notify upper layer about available send space. */
tty_port_tty_wakeup(&dlci->port);
__gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
return size;
}
/**
* gsm_dlci_data_output_framed - try and push data out of a DLCI
* @gsm: mux
* @dlci: the DLCI to pull data from
*
* Pull data from a DLCI and send it into the transmit queue if there
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
* Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
int len, size;
int last = 0, first = 0;
int overhead = 0;
/* One byte per frame is used for B/F flags */
if (dlci->adaption == 4)
overhead = 1;
/* dlci->skb is locked by tx_lock */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
return 0;
first = 1;
}
len = dlci->skb->len + overhead;
/* MTU/MRU count only the data bits */
if (len > dlci->mtu) {
if (dlci->adaption == 3) {
/* Over long frame, bin it */
dev_kfree_skb_any(dlci->skb);
dlci->skb = NULL;
return 0;
}
len = dlci->mtu;
} else
last = 1;
size = len + overhead;
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
if (msg == NULL) {
skb_queue_tail(&dlci->skb_list, dlci->skb);
dlci->skb = NULL;
return -ENOMEM;
}
dp = msg->data;
if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
/* Flag byte to carry the start/end info */
*dp++ = last << 7 | first << 6 | 1; /* EA */
len--;
}
memcpy(dp, dlci->skb->data, len);
skb_pull(dlci->skb, len);
__gsm_data_queue(dlci, msg);
if (last) {
dev_kfree_skb_any(dlci->skb);
dlci->skb = NULL;
}
return size;
}
/**
* gsm_dlci_modem_output - try and push modem status out of a DLCI
* @gsm: mux
* @dlci: the DLCI to pull modem status from
* @brk: break signal
*
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
* Caller must hold the tx_lock of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
u8 brk)
{
u8 *dp = NULL;
struct gsm_msg *msg;
int size = 0;
/* for modem bits without break data */
switch (dlci->adaption) {
case 1: /* Unstructured */
break;
case 2: /* Unstructured with modem bits. */
size++;
if (brk > 0)
size++;
break;
default:
pr_err("%s: unsupported adaption %d\n", __func__,
dlci->adaption);
return -EINVAL;
}
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
if (!msg) {
pr_err("%s: gsm_data_alloc error", __func__);
return -ENOMEM;
}
dp = msg->data;
switch (dlci->adaption) {
case 1: /* Unstructured */
break;
case 2: /* Unstructured with modem bits. */
if (brk == 0) {
*dp++ = (gsm_encode_modem(dlci) << 1) | EA;
} else {
*dp++ = gsm_encode_modem(dlci) << 1;
*dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */
}
break;
default:
/* Handled above */
break;
}
__gsm_data_queue(dlci, msg);
return size;
}
/**
* gsm_dlci_data_sweep - look for data to send
* @gsm: the GSM mux
*
* Sweep the GSM mux channels in priority order looking for ones with
* data to send. We could do with optimising this scan a bit. We aim
* to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
* TX_THRESH_LO we get called again
*
* FIXME: We should round robin between groups and in theory you can
* renegotiate DLCI priorities with optional stuff. Needs optimising.
*/
static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
{
/* Priority ordering: We should do priority with RR of the groups */
int i, len, ret = 0;
bool sent;
struct gsm_dlci *dlci;
while (gsm->tx_bytes < TX_THRESH_HI) {
for (sent = false, i = 1; i < NUM_DLCI; i++) {
dlci = gsm->dlci[i];
/* skip unused or blocked channel */
if (!dlci || dlci->constipated)
continue;
/* skip channels with invalid state */
if (dlci->state != DLCI_OPEN)
continue;
/* count the sent data per adaption */
if (dlci->adaption < 3 && !dlci->net)
len = gsm_dlci_data_output(gsm, dlci);
else
len = gsm_dlci_data_output_framed(gsm, dlci);
/* on error exit */
if (len < 0)
return ret;
if (len > 0) {
ret++;
sent = true;
/* The lower DLCs can starve the higher DLCs! */
break;
}
/* try next */
}
if (!sent)
break;
}
return ret;
}
/**
* gsm_dlci_data_kick - transmit if possible
* @dlci: DLCI to kick
*
* Transmit data from this DLCI if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
unsigned long flags;
int sweep;
if (dlci->constipated)
return;
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
if (dlci->net)
gsm_dlci_data_output_framed(dlci->gsm, dlci);
else
gsm_dlci_data_output(dlci->gsm, dlci);
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
/*
* Control message processing
*/
/**
* gsm_control_command - send a command frame to a control
* @gsm: gsm channel
* @cmd: the command to use
* @data: data to follow encoded info
* @dlen: length of data
*
* Encode up and queue a UI/UIH frame containing our command.
*/
static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data,
int dlen)
{
struct gsm_msg *msg;
struct gsm_dlci *dlci = gsm->dlci[0];
msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
if (msg == NULL)
return -ENOMEM;
msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */
msg->data[1] = (dlen << 1) | EA;
memcpy(msg->data + 2, data, dlen);
gsm_data_queue(dlci, msg);
return 0;
}
/**
* gsm_control_reply - send a response frame to a control
* @gsm: gsm channel
* @cmd: the command to use
* @data: data to follow encoded info
* @dlen: length of data
*
* Encode up and queue a UI/UIH frame containing our response.
*/
static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
int dlen)
{
struct gsm_msg *msg;
struct gsm_dlci *dlci = gsm->dlci[0];
msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
if (msg == NULL)
return;
msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
msg->data[1] = (dlen << 1) | EA;
memcpy(msg->data + 2, data, dlen);
gsm_data_queue(dlci, msg);
}
/**
* gsm_process_modem - process received modem status
* @tty: virtual tty bound to the DLCI
* @dlci: DLCI to affect
* @modem: modem bits (full EA)
* @slen: number of signal octets
*
* Used when a modem control message or line state inline in adaption
* layer 2 is processed. Sort out the local modem state and throttles
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
u32 modem, int slen)
{
int mlines = 0;
u8 brk = 0;
int fc;
/* The modem status command can either contain one octet (V.24 signals)
* or two octets (V.24 signals + break signals). This is specified in
* section 5.4.6.3.7 of the 07.10 mux spec.
*/
if (slen == 1)
modem = modem & 0x7f;
else {
brk = modem & 0x7f;
modem = (modem >> 7) & 0x7f;
}
/* Flow control/ready to communicate */
fc = (modem & MDM_FC) || !(modem & MDM_RTR);
if (fc && !dlci->constipated) {
/* Need to throttle our output on this device */
dlci->constipated = true;
} else if (!fc && dlci->constipated) {
dlci->constipated = false;
gsm_dlci_data_kick(dlci);
}
/* Map modem bits */
if (modem & MDM_RTC)
mlines |= TIOCM_DSR | TIOCM_DTR;
if (modem & MDM_RTR)
mlines |= TIOCM_RTS | TIOCM_CTS;
if (modem & MDM_IC)
mlines |= TIOCM_RI;
if (modem & MDM_DV)
mlines |= TIOCM_CD;
/* Carrier drop -> hangup */
if (tty) {
if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
if (!C_CLOCAL(tty))
tty_hangup(tty);
}
if (brk & 0x01)
tty_insert_flip_char(&dlci->port, 0, TTY_BREAK);
dlci->modem_rx = mlines;
wake_up_interruptible(&dlci->gsm->event);
}
/**
* gsm_process_negotiation - process received parameters
* @gsm: GSM channel
* @addr: DLCI address
* @cr: command/response
* @params: encoded parameters from the parameter negotiation message
*
* Used when the response for our parameter negotiation command was
* received.
*/
static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr,
unsigned int cr,
const struct gsm_dlci_param_bits *params)
{
struct gsm_dlci *dlci = gsm->dlci[addr];
unsigned int ftype, i, adaption, prio, n1, k;
i = FIELD_GET(PN_I_CL_FIELD_FTYPE, params->i_cl_bits);
adaption = FIELD_GET(PN_I_CL_FIELD_ADAPTION, params->i_cl_bits) + 1;
prio = FIELD_GET(PN_P_FIELD_PRIO, params->p_bits);
n1 = FIELD_GET(PN_N_FIELD_N1, get_unaligned_le16(¶ms->n_bits));
k = FIELD_GET(PN_K_FIELD_K, params->k_bits);
if (n1 < MIN_MTU) {
if (debug & DBG_ERRORS)
pr_info("%s N1 out of range in PN\n", __func__);
return -EINVAL;
}
switch (i) {
case 0x00:
ftype = UIH;
break;
case 0x01:
ftype = UI;
break;
case 0x02: /* I frames are not supported */
if (debug & DBG_ERRORS)
pr_info("%s unsupported I frame request in PN\n",
__func__);
gsm->unsupported++;
return -EINVAL;
default:
if (debug & DBG_ERRORS)
pr_info("%s i out of range in PN\n", __func__);
return -EINVAL;
}
if (!cr && gsm->initiator) {
if (adaption != dlci->adaption) {
if (debug & DBG_ERRORS)
pr_info("%s invalid adaption %d in PN\n",
__func__, adaption);
return -EINVAL;
}
if (prio != dlci->prio) {
if (debug & DBG_ERRORS)
pr_info("%s invalid priority %d in PN",
__func__, prio);
return -EINVAL;
}
if (n1 > gsm->mru || n1 > dlci->mtu) {
/* We requested a frame size but the other party wants
* to send larger frames. The standard allows only a
* smaller response value than requested (5.4.6.3.1).
*/
if (debug & DBG_ERRORS)
pr_info("%s invalid N1 %d in PN\n", __func__,
n1);
return -EINVAL;
}
dlci->mtu = n1;
if (ftype != dlci->ftype) {
if (debug & DBG_ERRORS)
pr_info("%s invalid i %d in PN\n", __func__, i);
return -EINVAL;
}
if (ftype != UI && ftype != UIH && k > dlci->k) {
if (debug & DBG_ERRORS)
pr_info("%s invalid k %d in PN\n", __func__, k);
return -EINVAL;
}
dlci->k = k;
} else if (cr && !gsm->initiator) {
/* Only convergence layer type 1 and 2 are supported. */
if (adaption != 1 && adaption != 2) {
if (debug & DBG_ERRORS)
pr_info("%s invalid adaption %d in PN\n",
__func__, adaption);
return -EINVAL;
}
dlci->adaption = adaption;
if (n1 > gsm->mru) {
/* Propose a smaller value */
dlci->mtu = gsm->mru;
} else if (n1 > MAX_MTU) {
/* Propose a smaller value */
dlci->mtu = MAX_MTU;
} else {
dlci->mtu = n1;
}
dlci->prio = prio;
dlci->ftype = ftype;
dlci->k = k;
} else {
return -EINVAL;
}
return 0;
}
/**
* gsm_control_modem - modem status received
* @gsm: GSM channel
* @data: data following command
* @clen: command length
*
* We have received a modem status control message. This is used by
* the GSM mux protocol to pass virtual modem line status and optionally
* to indicate break signals. Unpack it, convert to Linux representation
* and if need be stuff a break message down the tty.
*/
static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
struct gsm_dlci *dlci;
int len = clen;
int cl = clen;
const u8 *dp = data;
struct tty_struct *tty;
len = gsm_read_ea_val(&addr, data, cl);
if (len < 1)
return;
addr >>= 1;
/* Closed port, or invalid ? */
if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
return;
dlci = gsm->dlci[addr];
/* Must be at least one byte following the EA */
if ((cl - len) < 1)
return;
dp += len;
cl -= len;
/* get the modem status */
len = gsm_read_ea_val(&modem, dp, cl);
if (len < 1)
return;
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, cl);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
}
gsm_control_reply(gsm, CMD_MSC, data, clen);
}
/**
* gsm_control_negotiation - parameter negotiation received
* @gsm: GSM channel
* @cr: command/response flag
* @data: data following command
* @dlen: data length
*
* We have received a parameter negotiation message. This is used by
* the GSM mux protocol to configure protocol parameters for a new DLCI.
*/
static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr,
const u8 *data, unsigned int dlen)
{
unsigned int addr;
struct gsm_dlci_param_bits pn_reply;
struct gsm_dlci *dlci;
struct gsm_dlci_param_bits *params;
if (dlen < sizeof(struct gsm_dlci_param_bits)) {
gsm->open_error++;
return;
}
/* Invalid DLCI? */
params = (struct gsm_dlci_param_bits *)data;
addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits);
if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) {
gsm->open_error++;
return;
}
dlci = gsm->dlci[addr];
/* Too late for parameter negotiation? */
if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) {
gsm->open_error++;
return;
}
/* Process the received parameters */
if (gsm_process_negotiation(gsm, addr, cr, params) != 0) {
/* Negotiation failed. Close the link. */
if (debug & DBG_ERRORS)
pr_info("%s PN failed\n", __func__);
gsm->open_error++;
gsm_dlci_close(dlci);
return;
}
if (cr) {
/* Reply command with accepted parameters. */
if (gsm_encode_params(dlci, &pn_reply) == 0)
gsm_control_reply(gsm, CMD_PN, (const u8 *)&pn_reply,
sizeof(pn_reply));
else if (debug & DBG_ERRORS)
pr_info("%s PN invalid\n", __func__);
} else if (dlci->state == DLCI_CONFIGURE) {
/* Proceed with link setup by sending SABM before UA */
dlci->state = DLCI_OPENING;
gsm_command(gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else {
if (debug & DBG_ERRORS)
pr_info("%s PN in invalid state\n", __func__);
gsm->open_error++;
}
}
/**
* gsm_control_rls - remote line status
* @gsm: GSM channel
* @data: data bytes
* @clen: data length
*
* The modem sends us a two byte message on the control channel whenever
* it wishes to send us an error state from the virtual link. Stuff
* this into the uplink tty if present
*/
static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen)
{
struct tty_port *port;
unsigned int addr = 0;
u8 bits;
int len = clen;
const u8 *dp = data;
while (gsm_read_ea(&addr, *dp++) == 0) {
len--;
if (len == 0)
return;
}
/* Must be at least one byte following ea */
len--;
if (len <= 0)
return;
addr >>= 1;
/* Closed port, or invalid ? */
if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
return;
/* No error ? */
bits = *dp;
if ((bits & 1) == 0)
return;
port = &gsm->dlci[addr]->port;
if (bits & 2)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
if (bits & 4)
tty_insert_flip_char(port, 0, TTY_PARITY);
if (bits & 8)
tty_insert_flip_char(port, 0, TTY_FRAME);
tty_flip_buffer_push(port);
gsm_control_reply(gsm, CMD_RLS, data, clen);
}
static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
/**
* gsm_control_message - DLCI 0 control processing
* @gsm: our GSM mux
* @command: the command EA
* @data: data beyond the command/length EAs
* @clen: length
*
* Input processor for control messages from the other end of the link.
* Processes the incoming request and queues a response frame or an
* NSC response if not supported
*/
static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
u8 buf[1];
switch (command) {
case CMD_CLD: {
struct gsm_dlci *dlci = gsm->dlci[0];
/* Modem wishes to close down */
if (dlci) {
dlci->dead = true;
gsm->dead = true;
gsm_dlci_begin_close(dlci);
}
}
break;
case CMD_TEST:
/* Modem wishes to test, reply with the data */
gsm_control_reply(gsm, CMD_TEST, data, clen);
break;
case CMD_FCON:
/* Modem can accept data again */
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
gsmld_write_trigger(gsm);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
gsm->constipated = true;
gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
break;
case CMD_MSC:
/* Out of band modem line change indicator for a DLCI */
gsm_control_modem(gsm, data, clen);
break;
case CMD_RLS:
/* Out of band error reception for a DLCI */
gsm_control_rls(gsm, data, clen);
break;
case CMD_PSC:
/* Modem wishes to enter power saving state */
gsm_control_reply(gsm, CMD_PSC, NULL, 0);
break;
/* Optional commands */
case CMD_PN:
/* Modem sends a parameter negotiation command */
gsm_control_negotiation(gsm, 1, data, clen);
break;
/* Optional unsupported commands */
case CMD_RPN: /* Remote port negotiation */
case CMD_SNC: /* Service negotiation command */
gsm->unsupported++;
fallthrough;
default:
/* Reply to bad commands with an NSC */
buf[0] = command;
gsm_control_reply(gsm, CMD_NSC, buf, 1);
break;
}
}
/**
* gsm_control_response - process a response to our control
* @gsm: our GSM mux
* @command: the command (response) EA
* @data: data beyond the command/length EA
* @clen: length
*
* Process a response to an outstanding command. We only allow a single
* control message in flight so this is fairly easy. All the clean up
* is done by the caller, we just update the fields, flag it as done
* and return
*/
static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
struct gsm_control *ctrl;
struct gsm_dlci *dlci;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
dlci = gsm->dlci[0];
command |= 1;
/* Does the reply match our command */
if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
/* Our command was replied to, kill the retry timer */
del_timer(&gsm->t2_timer);
gsm->pending_cmd = NULL;
/* Rejected by the other end */
if (command == CMD_NSC)
ctrl->error = -EOPNOTSUPP;
ctrl->done = 1;
wake_up(&gsm->event);
/* Or did we receive the PN response to our PN command */
} else if (command == CMD_PN) {
gsm_control_negotiation(gsm, 0, data, clen);
/* Or did we receive the TEST response to our TEST command */
} else if (command == CMD_TEST && clen == 1 && *data == gsm->ka_num) {
gsm->ka_retries = -1; /* trigger new keep-alive message */
if (dlci && !dlci->dead)
mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100);
}
spin_unlock_irqrestore(&gsm->control_lock, flags);
}
/**
* gsm_control_keep_alive - check timeout or start keep-alive
* @t: timer contained in our gsm object
*
* Called off the keep-alive timer expiry signaling that our link
* partner is not responding anymore. Link will be closed.
* This is also called to startup our timer.
*/
static void gsm_control_keep_alive(struct timer_list *t)
{
struct gsm_mux *gsm = from_timer(gsm, t, ka_timer);
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
if (gsm->ka_num && gsm->ka_retries == 0) {
/* Keep-alive expired -> close the link */
if (debug & DBG_ERRORS)
pr_debug("%s keep-alive timed out\n", __func__);
spin_unlock_irqrestore(&gsm->control_lock, flags);
if (gsm->dlci[0])
gsm_dlci_begin_close(gsm->dlci[0]);
return;
} else if (gsm->keep_alive && gsm->dlci[0] && !gsm->dlci[0]->dead) {
if (gsm->ka_retries > 0) {
/* T2 expired for keep-alive -> resend */
gsm->ka_retries--;
} else {
/* Start keep-alive timer */
gsm->ka_num++;
if (!gsm->ka_num)
gsm->ka_num++;
gsm->ka_retries = (signed int)gsm->n2;
}
gsm_control_command(gsm, CMD_TEST, &gsm->ka_num,
sizeof(gsm->ka_num));
mod_timer(&gsm->ka_timer,
jiffies + gsm->t2 * HZ / 100);
}
spin_unlock_irqrestore(&gsm->control_lock, flags);
}
/**
* gsm_control_transmit - send control packet
* @gsm: gsm mux
* @ctrl: frame to send
*
* Send out a pending control command (called under control lock)
*/
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len);
}
/**
* gsm_control_retransmit - retransmit a control frame
* @t: timer contained in our gsm object
*
* Called off the T2 timer expiry in order to retransmit control frames
* that have been lost in the system somewhere. The control_lock protects
* us from colliding with another sender or a receive completion event.
* In that situation the timer may still occur in a small window but
* gsm->pending_cmd will be NULL and we just let the timer expire.
*/
static void gsm_control_retransmit(struct timer_list *t)
{
struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
spin_unlock_irqrestore(&gsm->control_lock, flags);
wake_up(&gsm->event);
return;
}
gsm->cretries--;
gsm_control_transmit(gsm, ctrl);
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
}
spin_unlock_irqrestore(&gsm->control_lock, flags);
}
/**
* gsm_control_send - send a control frame on DLCI 0
* @gsm: the GSM channel
* @command: command to send including CR bit
* @data: bytes of data (must be kmalloced)
* @clen: length of the block to send
*
* Queue and dispatch a control command. Only one command can be
* active at a time. In theory more can be outstanding but the matching
* gets really complicated so for now stick to one outstanding.
*/
static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
unsigned int command, u8 *data, int clen)
{
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
GFP_ATOMIC);
unsigned long flags;
if (ctrl == NULL)
return NULL;
retry:
wait_event(gsm->event, gsm->pending_cmd == NULL);
spin_lock_irqsave(&gsm->control_lock, flags);
if (gsm->pending_cmd != NULL) {
spin_unlock_irqrestore(&gsm->control_lock, flags);
goto retry;
}
ctrl->cmd = command;
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
/* If DLCI0 is in ADM mode skip retries, it won't respond */
if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
gsm->cretries = 0;
else
gsm->cretries = gsm->n2;
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
return ctrl;
}
/**
* gsm_control_wait - wait for a control to finish
* @gsm: GSM mux
* @control: control we are waiting on
*
* Waits for the control to complete or time out. Frees any used
* resources and returns 0 for success, or an error if the remote
* rejected or ignored the request.
*/
static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
{
int err;
wait_event(gsm->event, control->done == 1);
err = control->error;
kfree(control);
return err;
}
/*
* DLCI level handling: Needs krefs
*/
/*
* State transitions and timers
*/
/**
* gsm_dlci_close - a DLCI has closed
* @dlci: DLCI that closed
*
* Perform processing when moving a DLCI into closed state. If there
* is an attached tty this is hung up
*/
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
del_timer(&dlci->t1);
if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
/* Prevent us from sending data before the link is up again */
dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, false);
wake_up_interruptible(&dlci->port.open_wait);
} else {
del_timer(&dlci->gsm->ka_timer);
dlci->gsm->dead = true;
}
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
gsm_dlci_data_kick(dlci);
wake_up_all(&dlci->gsm->event);
}
/**
* gsm_dlci_open - a DLCI has opened
* @dlci: DLCI that opened
*
* Perform processing when moving a DLCI into open state.
*/
static void gsm_dlci_open(struct gsm_dlci *dlci)
{
struct gsm_mux *gsm = dlci->gsm;
/* Note that SABM UA .. SABM UA first UA lost can mean that we go
open -> open */
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
dlci->constipated = false;
if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr) {
gsm_modem_update(dlci, 0);
} else {
/* Start keep-alive control */
gsm->ka_num = 0;
gsm->ka_retries = -1;
mod_timer(&gsm->ka_timer,
jiffies + gsm->keep_alive * HZ / 100);
}
gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
/**
* gsm_dlci_negotiate - start parameter negotiation
* @dlci: DLCI to open
*
* Starts the parameter negotiation for the new DLCI. This needs to be done
* before the DLCI initialized the channel via SABM.
*/
static int gsm_dlci_negotiate(struct gsm_dlci *dlci)
{
struct gsm_mux *gsm = dlci->gsm;
struct gsm_dlci_param_bits params;
int ret;
ret = gsm_encode_params(dlci, ¶ms);
if (ret != 0)
return ret;
/* We cannot asynchronous wait for the command response with
* gsm_command() and gsm_control_wait() at this point.
*/
ret = gsm_control_command(gsm, CMD_PN, (const u8 *)¶ms,
sizeof(params));
return ret;
}
/**
* gsm_dlci_t1 - T1 timer expiry
* @t: timer contained in the DLCI that opened
*
* The T1 timer handles retransmits of control frames (essentially of
* SABM and DISC). We resend the command until the retry count runs out
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
*
* Some control dlci can stay in ADM mode with other dlci working just
* fine. In that case we can just keep the control dlci open after the
* DLCI_OPENING retries time out.
*/
static void gsm_dlci_t1(struct timer_list *t)
{
struct gsm_dlci *dlci = from_timer(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
case DLCI_CONFIGURE:
if (dlci->retries && gsm_dlci_negotiate(dlci) == 0) {
dlci->retries--;
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else {
gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
case DLCI_OPENING:
if (dlci->retries) {
dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
if (debug & DBG_ERRORS)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
case DLCI_CLOSING:
if (dlci->retries) {
dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
gsm_dlci_close(dlci);
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, dlci->state);
break;
}
}
/**
* gsm_dlci_begin_open - start channel open procedure
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
* to the modem which should then reply with a UA or ADM, at which point
* we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
* Parameter negotiation is performed before SABM if required.
*/
static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
{
struct gsm_mux *gsm = dlci ? dlci->gsm : NULL;
bool need_pn = false;
if (!gsm)
return;
if (dlci->addr != 0) {
if (gsm->adaption != 1 || gsm->adaption != dlci->adaption)
need_pn = true;
if (dlci->prio != (roundup(dlci->addr + 1, 8) - 1))
need_pn = true;
if (gsm->ftype != dlci->ftype)
need_pn = true;
}
switch (dlci->state) {
case DLCI_CLOSED:
case DLCI_WAITING_CONFIG:
case DLCI_CLOSING:
dlci->retries = gsm->n2;
if (!need_pn) {
dlci->state = DLCI_OPENING;
gsm_command(gsm, dlci->addr, SABM|PF);
} else {
/* Configure DLCI before setup */
dlci->state = DLCI_CONFIGURE;
if (gsm_dlci_negotiate(dlci) != 0) {
gsm_dlci_close(dlci);
return;
}
}
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
break;
default:
break;
}
}
/**
* gsm_dlci_set_opening - change state to opening
* @dlci: DLCI to open
*
* Change internal state to wait for DLCI open from initiator side.
* We set off timers and responses upon reception of an SABM.
*/
static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
{
switch (dlci->state) {
case DLCI_CLOSED:
case DLCI_WAITING_CONFIG:
case DLCI_CLOSING:
dlci->state = DLCI_OPENING;
break;
default:
break;
}
}
/**
* gsm_dlci_set_wait_config - wait for channel configuration
* @dlci: DLCI to configure
*
* Wait for a DLCI configuration from the application.
*/
static void gsm_dlci_set_wait_config(struct gsm_dlci *dlci)
{
switch (dlci->state) {
case DLCI_CLOSED:
case DLCI_CLOSING:
dlci->state = DLCI_WAITING_CONFIG;
break;
default:
break;
}
}
/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
* Commence closing a DLCI from the Linux side. We issue DISC messages
* to the modem which should then reply with a UA, at which point we
* will move into closed state. Closing is done asynchronously with retry
* off timers. We may also receive a DM reply from the other end which
* indicates the channel was already closed.
*/
static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
{
struct gsm_mux *gsm = dlci->gsm;
if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
return;
dlci->retries = gsm->n2;
dlci->state = DLCI_CLOSING;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
wake_up_interruptible(&gsm->event);
}
/**
* gsm_dlci_data - data arrived
* @dlci: channel
* @data: block of bytes received
* @clen: length of received block
*
* A UI or UIH frame has arrived which contains data for a channel
* other than the control channel. If the relevant virtual tty is
* open we shovel the bits down it, if not we drop them.
*/
static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
struct tty_struct *tty;
unsigned int modem = 0;
int len;
if (debug & DBG_TTY)
pr_debug("%d bytes for tty\n", clen);
switch (dlci->adaption) {
/* Unsupported types */
case 4: /* Packetised interruptible data */
break;
case 3: /* Packetised uininterruptible voice/data */
break;
case 2: /* Asynchronous serial with line state in each frame */
len = gsm_read_ea_val(&modem, data, clen);
if (len < 1)
return;
tty = tty_port_tty_get(port);
if (tty) {
gsm_process_modem(tty, dlci, modem, len);
tty_wakeup(tty);
tty_kref_put(tty);
}
/* Skip processed modem data */
data += len;
clen -= len;
fallthrough;
case 1: /* Line state will go via DLCI 0 controls only */
default:
tty_insert_flip_string(port, data, clen);
tty_flip_buffer_push(port);
}
}
/**
* gsm_dlci_command - data arrived on control channel
* @dlci: channel
* @data: block of bytes received
* @len: length of received block
*
* A UI or UIH frame has arrived which contains data for DLCI 0 the
* control channel. This should contain a command EA followed by
* control data bytes. The command EA contains a command/response bit
* and we divide up the work accordingly.
*/
static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
{
/* See what command is involved */
unsigned int command = 0;
unsigned int clen = 0;
unsigned int dlen;
/* read the command */
dlen = gsm_read_ea_val(&command, data, len);
len -= dlen;
data += dlen;
/* read any control data */
dlen = gsm_read_ea_val(&clen, data, len);
len -= dlen;
data += dlen;
/* Malformed command? */
if (clen > len) {
dlci->gsm->malformed++;
return;
}
if (command & 1)
gsm_control_message(dlci->gsm, command, data, clen);
else
gsm_control_response(dlci->gsm, command, data, clen);
}
/**
* gsm_kick_timer - transmit if possible
* @t: timer contained in our gsm object
*
* Transmit data from DLCIs if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
static void gsm_kick_timer(struct timer_list *t)
{
struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
unsigned long flags;
int sent = 0;
spin_lock_irqsave(&gsm->tx_lock, flags);
/* If we have nothing running then we need to fire up */
if (gsm->tx_bytes < TX_THRESH_LO)
sent = gsm_dlci_data_sweep(gsm);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (sent && debug & DBG_DATA)
pr_info("%s TX queue stalled\n", __func__);
}
/**
* gsm_dlci_copy_config_values - copy DLCI configuration
* @dlci: source DLCI
* @dc: configuration structure to fill
*/
static void gsm_dlci_copy_config_values(struct gsm_dlci *dlci, struct gsm_dlci_config *dc)
{
memset(dc, 0, sizeof(*dc));
dc->channel = (u32)dlci->addr;
dc->adaption = (u32)dlci->adaption;
dc->mtu = (u32)dlci->mtu;
dc->priority = (u32)dlci->prio;
if (dlci->ftype == UIH)
dc->i = 1;
else
dc->i = 2;
dc->k = (u32)dlci->k;
}
/**
* gsm_dlci_config - configure DLCI from configuration
* @dlci: DLCI to configure
* @dc: DLCI configuration
* @open: open DLCI after configuration?
*/
static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, int open)
{
struct gsm_mux *gsm;
bool need_restart = false;
bool need_open = false;
unsigned int i;
/*
* Check that userspace doesn't put stuff in here to prevent breakages
* in the future.
*/
for (i = 0; i < ARRAY_SIZE(dc->reserved); i++)
if (dc->reserved[i])
return -EINVAL;
if (!dlci)
return -EINVAL;
gsm = dlci->gsm;
/* Stuff we don't support yet - I frame transport */
if (dc->adaption != 1 && dc->adaption != 2)
return -EOPNOTSUPP;
if (dc->mtu > MAX_MTU || dc->mtu < MIN_MTU || dc->mtu > gsm->mru)
return -EINVAL;
if (dc->priority >= 64)
return -EINVAL;
if (dc->i == 0 || dc->i > 2) /* UIH and UI only */
return -EINVAL;
if (dc->k > 7)
return -EINVAL;
if (dc->flags & ~GSM_FL_RESTART) /* allow future extensions */
return -EINVAL;
/*
* See what is needed for reconfiguration
*/
/* Framing fields */
if (dc->adaption != dlci->adaption)
need_restart = true;
if (dc->mtu != dlci->mtu)
need_restart = true;
if (dc->i != dlci->ftype)
need_restart = true;
/* Requires care */
if (dc->priority != dlci->prio)
need_restart = true;
if (dc->flags & GSM_FL_RESTART)
need_restart = true;
if ((open && gsm->wait_config) || need_restart)
need_open = true;
if (dlci->state == DLCI_WAITING_CONFIG) {
need_restart = false;
need_open = true;
}
/*
* Close down what is needed, restart and initiate the new
* configuration.
*/
if (need_restart) {
gsm_dlci_begin_close(dlci);
wait_event_interruptible(gsm->event, dlci->state == DLCI_CLOSED);
if (signal_pending(current))
return -EINTR;
}
/*
* Setup the new configuration values
*/
dlci->adaption = (int)dc->adaption;
if (dc->mtu)
dlci->mtu = (unsigned int)dc->mtu;
else
dlci->mtu = gsm->mtu;
if (dc->priority)
dlci->prio = (u8)dc->priority;
else
dlci->prio = roundup(dlci->addr + 1, 8) - 1;
if (dc->i == 1)
dlci->ftype = UIH;
else if (dc->i == 2)
dlci->ftype = UI;
if (dc->k)
dlci->k = (u8)dc->k;
else
dlci->k = gsm->k;
if (need_open) {
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
else
gsm_dlci_set_opening(dlci);
}
return 0;
}
/*
* Allocate/Free DLCI channels
*/
/**
* gsm_dlci_alloc - allocate a DLCI
* @gsm: GSM mux
* @addr: address of the DLCI
*
* Allocate and install a new DLCI object into the GSM mux.
*
* FIXME: review locking races
*/
static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
{
struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
if (dlci == NULL)
return NULL;
spin_lock_init(&dlci->lock);
mutex_init(&dlci->mutex);
if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
kfree(dlci);
return NULL;
}
skb_queue_head_init(&dlci->skb_list);
timer_setup(&dlci->t1, gsm_dlci_t1, 0);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->mtu = gsm->mtu;
if (addr == 0)
dlci->prio = 0;
else
dlci->prio = roundup(addr + 1, 8) - 1;
dlci->ftype = gsm->ftype;
dlci->k = gsm->k;
dlci->state = DLCI_CLOSED;
if (addr) {
dlci->data = gsm_dlci_data;
/* Prevent us from sending data before the link is up */
dlci->constipated = true;
} else {
dlci->data = gsm_dlci_command;
}
gsm->dlci[addr] = dlci;
return dlci;
}
/**
* gsm_dlci_free - free DLCI
* @port: tty port for DLCI to free
*
* Free up a DLCI.
*
* Can sleep.
*/
static void gsm_dlci_free(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
timer_shutdown_sync(&dlci->t1);
dlci->gsm->dlci[dlci->addr] = NULL;
kfifo_free(&dlci->fifo);
while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
dev_kfree_skb(dlci->skb);
kfree(dlci);
}
static inline void dlci_get(struct gsm_dlci *dlci)
{
tty_port_get(&dlci->port);
}
static inline void dlci_put(struct gsm_dlci *dlci)
{
tty_port_put(&dlci->port);
}
static void gsm_destroy_network(struct gsm_dlci *dlci);
/**
* gsm_dlci_release - release DLCI
* @dlci: DLCI to destroy
*
* Release a DLCI. Actual free is deferred until either
* mux is closed or tty is closed - whichever is last.
*
* Can sleep.
*/
static void gsm_dlci_release(struct gsm_dlci *dlci)
{
struct tty_struct *tty = tty_port_tty_get(&dlci->port);
if (tty) {
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
/* We cannot use tty_hangup() because in tty_kref_put() the tty
* driver assumes that the hangup queue is free and reuses it to
* queue release_one_tty() -> NULL pointer panic in
* process_one_work().
*/
tty_vhangup(tty);
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
}
dlci->state = DLCI_CLOSED;
dlci_put(dlci);
}
/*
* LAPBish link layer logic
*/
/**
* gsm_queue - a GSM frame is ready to process
* @gsm: pointer to our gsm mux
*
* At this point in time a frame has arrived and been demangled from
* the line encoding. All the differences between the encodings have
* been handled below us and the frame is unpacked into the structures.
* The fcs holds the header FCS but any data FCS must be added here.
*/
static void gsm_queue(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
u8 cr;
int address;
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
if (debug & DBG_DATA)
pr_debug("BAD FCS %02x\n", gsm->fcs);
return;
}
address = gsm->address >> 1;
if (address >= NUM_DLCI)
goto invalid;
cr = gsm->address & 1; /* C/R bit */
cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
dlci = gsm->dlci[address];
switch (gsm->control) {
case SABM|PF:
if (cr == 1) {
gsm->open_error++;
goto invalid;
}
if (dlci == NULL)
dlci = gsm_dlci_alloc(gsm, address);
if (dlci == NULL) {
gsm->open_error++;
return;
}
if (dlci->dead)
gsm_response(gsm, address, DM|PF);
else {
gsm_response(gsm, address, UA|PF);
gsm_dlci_open(dlci);
}
break;
case DISC|PF:
if (cr == 1)
goto invalid;
if (dlci == NULL || dlci->state == DLCI_CLOSED) {
gsm_response(gsm, address, DM|PF);
return;
}
/* Real close complete */
gsm_response(gsm, address, UA|PF);
gsm_dlci_close(dlci);
break;
case UA|PF:
if (cr == 0 || dlci == NULL)
break;
switch (dlci->state) {
case DLCI_CLOSING:
gsm_dlci_close(dlci);
break;
case DLCI_OPENING:
gsm_dlci_open(dlci);
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__,
dlci->state);
break;
}
break;
case DM: /* DM can be valid unsolicited */
case DM|PF:
if (cr)
goto invalid;
if (dlci == NULL)
return;
gsm_dlci_close(dlci);
break;
case UI:
case UI|PF:
case UIH:
case UIH|PF:
if (dlci == NULL || dlci->state != DLCI_OPEN) {
gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
break;
default:
goto invalid;
}
return;
invalid:
gsm->malformed++;
return;
}
/**
* gsm0_receive - perform processing for non-transparency
* @gsm: gsm data for this ldisc instance
* @c: character
*
* Receive bytes in gsm mode 0
*/
static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
{
unsigned int len;
switch (gsm->state) {
case GSM_SEARCH: /* SOF marker */
if (c == GSM0_SOF) {
gsm->state = GSM_ADDRESS;
gsm->address = 0;
gsm->len = 0;
gsm->fcs = INIT_FCS;
}
break;
case GSM_ADDRESS: /* Address EA */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
gsm->state = GSM_CONTROL;
break;
case GSM_CONTROL: /* Control Byte */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->control = c;
gsm->state = GSM_LEN0;
break;
case GSM_LEN0: /* Length EA */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->len, c)) {
if (gsm->len > gsm->mru) {
gsm->bad_size++;
gsm->state = GSM_SEARCH;
break;
}
gsm->count = 0;
if (!gsm->len)
gsm->state = GSM_FCS;
else
gsm->state = GSM_DATA;
break;
}
gsm->state = GSM_LEN1;
break;
case GSM_LEN1:
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
len = c;
gsm->len |= len << 7;
if (gsm->len > gsm->mru) {
gsm->bad_size++;
gsm->state = GSM_SEARCH;
break;
}
gsm->count = 0;
if (!gsm->len)
gsm->state = GSM_FCS;
else
gsm->state = GSM_DATA;
break;
case GSM_DATA: /* Data */
gsm->buf[gsm->count++] = c;
if (gsm->count == gsm->len) {
/* Calculate final FCS for UI frames over all data */
if ((gsm->control & ~PF) != UIH) {
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
gsm->count);
}
gsm->state = GSM_FCS;
}
break;
case GSM_FCS: /* FCS follows the packet */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->state = GSM_SSOF;
break;
case GSM_SSOF:
gsm->state = GSM_SEARCH;
if (c == GSM0_SOF)
gsm_queue(gsm);
else
gsm->bad_size++;
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
break;
}
}
/**
* gsm1_receive - perform processing for non-transparency
* @gsm: gsm data for this ldisc instance
* @c: character
*
* Receive bytes in mode 1 (Advanced option)
*/
static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
{
/* handle XON/XOFF */
if ((c & ISO_IEC_646_MASK) == XON) {
gsm->constipated = true;
return;
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
gsm->constipated = false;
/* Kick the link in case it is idling */
gsmld_write_trigger(gsm);
return;
}
if (c == GSM1_SOF) {
/* EOF is only valid in frame if we have got to the data state */
if (gsm->state == GSM_DATA) {
if (gsm->count < 1) {
/* Missing FSC */
gsm->malformed++;
gsm->state = GSM_START;
return;
}
/* Remove the FCS from data */
gsm->count--;
if ((gsm->control & ~PF) != UIH) {
/* Calculate final FCS for UI frames over all
* data but FCS
*/
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
gsm->count);
}
/* Add the FCS itself to test against GOOD_FCS */
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
gsm->len = gsm->count;
gsm_queue(gsm);
gsm->state = GSM_START;
return;
}
/* Any partial frame was a runt so go back to start */
if (gsm->state != GSM_START) {
if (gsm->state != GSM_SEARCH)
gsm->malformed++;
gsm->state = GSM_START;
}
/* A SOF in GSM_START means we are still reading idling or
framing bytes */
return;
}
if (c == GSM1_ESCAPE) {
gsm->escape = true;
return;
}
/* Only an unescaped SOF gets us out of GSM search */
if (gsm->state == GSM_SEARCH)
return;
if (gsm->escape) {
c ^= GSM1_ESCAPE_BITS;
gsm->escape = false;
}
switch (gsm->state) {
case GSM_START: /* First byte after SOF */
gsm->address = 0;
gsm->state = GSM_ADDRESS;
gsm->fcs = INIT_FCS;
fallthrough;
case GSM_ADDRESS: /* Address continuation */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
gsm->state = GSM_CONTROL;
break;
case GSM_CONTROL: /* Control Byte */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->control = c;
gsm->count = 0;
gsm->state = GSM_DATA;
break;
case GSM_DATA: /* Data */
if (gsm->count > gsm->mru) { /* Allow one for the FCS */
gsm->state = GSM_OVERRUN;
gsm->bad_size++;
} else
gsm->buf[gsm->count++] = c;
break;
case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
break;
}
}
/**
* gsm_error - handle tty error
* @gsm: ldisc data
*
* Handle an error in the receipt of data for a frame. Currently we just
* go back to hunting for a SOF.
*
* FIXME: better diagnostics ?
*/
static void gsm_error(struct gsm_mux *gsm)
{
gsm->state = GSM_SEARCH;
gsm->io_error++;
}
/**
* gsm_cleanup_mux - generic GSM protocol cleanup
* @gsm: our mux
* @disc: disconnect link?
*
* Clean up the bits of the mux which are the same for all framing
* protocols. Remove the mux from the mux table, stop all the timers
* and then shut down each device hanging up the channels as we go.
*/
static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
{
int i;
struct gsm_dlci *dlci;
struct gsm_msg *txq, *ntxq;
gsm->dead = true;
mutex_lock(&gsm->mutex);
dlci = gsm->dlci[0];
if (dlci) {
if (disc && dlci->state != DLCI_CLOSED) {
gsm_dlci_begin_close(dlci);
wait_event(gsm->event, dlci->state == DLCI_CLOSED);
}
dlci->dead = true;
}
/* Finish outstanding timers, making sure they are done */
del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
del_timer_sync(&gsm->ka_timer);
/* Finish writing to ldisc */
flush_work(&gsm->tx_work);
/* Free up any link layer users and finally the control channel */
if (gsm->has_devices) {
gsm_unregister_devices(gsm_tty_driver, gsm->num);
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i]) {
gsm_dlci_release(gsm->dlci[i]);
gsm->dlci[i] = NULL;
}
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_data_list);
}
/**
* gsm_activate_mux - generic GSM setup
* @gsm: our mux
*
* Set up the bits of the mux which are the same for all framing
* protocols. Add the mux to the mux table so it can be opened and
* finally kick off connecting to DLCI 0 on the modem.
*/
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
int ret;
dlci = gsm_dlci_alloc(gsm, 0);
if (dlci == NULL)
return -ENOMEM;
if (gsm->encoding == GSM_BASIC_OPT)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
ret = gsm_register_devices(gsm_tty_driver, gsm->num);
if (ret)
return ret;
gsm->has_devices = true;
gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
/**
* gsm_free_mux - free up a mux
* @gsm: mux to free
*
* Dispose of allocated resources for a dead mux
*/
static void gsm_free_mux(struct gsm_mux *gsm)
{
int i;
for (i = 0; i < MAX_MUX; i++) {
if (gsm == gsm_mux[i]) {
gsm_mux[i] = NULL;
break;
}
}
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
}
/**
* gsm_free_muxr - free up a mux
* @ref: kreference to the mux to free
*
* Dispose of allocated resources for a dead mux
*/
static void gsm_free_muxr(struct kref *ref)
{
struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref);
gsm_free_mux(gsm);
}
static inline void mux_get(struct gsm_mux *gsm)
{
unsigned long flags;
spin_lock_irqsave(&gsm_mux_lock, flags);
kref_get(&gsm->ref);
spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline void mux_put(struct gsm_mux *gsm)
{
unsigned long flags;
spin_lock_irqsave(&gsm_mux_lock, flags);
kref_put(&gsm->ref, gsm_free_muxr);
spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
{
return gsm->num * NUM_DLCI;
}
static inline unsigned int mux_line_to_num(unsigned int line)
{
return line / NUM_DLCI;
}
/**
* gsm_alloc_mux - allocate a mux
*
* Creates a new mux ready for activation.
*/
static struct gsm_mux *gsm_alloc_mux(void)
{
int i;
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
return NULL;
gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
if (gsm->buf == NULL) {
kfree(gsm);
return NULL;
}
gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
if (gsm->txframe == NULL) {
kfree(gsm->buf);
kfree(gsm);
return NULL;
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
INIT_LIST_HEAD(&gsm->tx_data_list);
timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
timer_setup(&gsm->ka_timer, gsm_control_keep_alive, 0);
INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
gsm->t1 = T1;
gsm->t2 = T2;
gsm->t3 = T3;
gsm->n2 = N2;
gsm->k = K;
gsm->ftype = UIH;
gsm->adaption = 1;
gsm->encoding = GSM_ADV_OPT;
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
gsm->wait_config = false; /* Disabled */
gsm->keep_alive = 0; /* Disabled */
/* Store the instance to the mux array or abort if no space is
* available.
*/
spin_lock(&gsm_mux_lock);
for (i = 0; i < MAX_MUX; i++) {
if (!gsm_mux[i]) {
gsm_mux[i] = gsm;
gsm->num = i;
break;
}
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
return NULL;
}
return gsm;
}
static void gsm_copy_config_values(struct gsm_mux *gsm,
struct gsm_config *c)
{
memset(c, 0, sizeof(*c));
c->adaption = gsm->adaption;
c->encapsulation = gsm->encoding;
c->initiator = gsm->initiator;
c->t1 = gsm->t1;
c->t2 = gsm->t2;
c->t3 = gsm->t3;
c->n2 = gsm->n2;
if (gsm->ftype == UIH)
c->i = 1;
else
c->i = 2;
pr_debug("Ftype %d i %d\n", gsm->ftype, c->i);
c->mru = gsm->mru;
c->mtu = gsm->mtu;
c->k = gsm->k;
}
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{
int need_close = 0;
int need_restart = 0;
/* Stuff we don't support yet - UI or I frame transport */
if (c->adaption != 1 && c->adaption != 2)
return -EOPNOTSUPP;
/* Check the MRU/MTU range looks sane */
if (c->mru < MIN_MTU || c->mtu < MIN_MTU)
return -EINVAL;
if (c->mru > MAX_MRU || c->mtu > MAX_MTU)
return -EINVAL;
if (c->t3 > MAX_T3)
return -EINVAL;
if (c->n2 > 255)
return -EINVAL;
if (c->encapsulation > 1) /* Basic, advanced, no I */
return -EINVAL;
if (c->initiator > 1)
return -EINVAL;
if (c->k > MAX_WINDOW_SIZE)
return -EINVAL;
if (c->i == 0 || c->i > 2) /* UIH and UI only */
return -EINVAL;
/*
* See what is needed for reconfiguration
*/
/* Timing fields */
if (c->t1 != 0 && c->t1 != gsm->t1)
need_restart = 1;
if (c->t2 != 0 && c->t2 != gsm->t2)
need_restart = 1;
if (c->encapsulation != gsm->encoding)
need_restart = 1;
if (c->adaption != gsm->adaption)
need_restart = 1;
/* Requires care */
if (c->initiator != gsm->initiator)
need_close = 1;
if (c->mru != gsm->mru)
need_restart = 1;
if (c->mtu != gsm->mtu)
need_restart = 1;
/*
* Close down what is needed, restart and initiate the new
* configuration. On the first time there is no DLCI[0]
* and closing or cleaning up is not necessary.
*/
if (need_close || need_restart)
gsm_cleanup_mux(gsm, true);
gsm->initiator = c->initiator;
gsm->mru = c->mru;
gsm->mtu = c->mtu;
gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT;
gsm->adaption = c->adaption;
gsm->n2 = c->n2;
if (c->i == 1)
gsm->ftype = UIH;
else if (c->i == 2)
gsm->ftype = UI;
if (c->t1)
gsm->t1 = c->t1;
if (c->t2)
gsm->t2 = c->t2;
if (c->t3)
gsm->t3 = c->t3;
if (c->k)
gsm->k = c->k;
/*
* FIXME: We need to separate activation/deactivation from adding
* and removing from the mux array
*/
if (gsm->dead) {
int ret = gsm_activate_mux(gsm);
if (ret)
return ret;
if (gsm->initiator)
gsm_dlci_begin_open(gsm->dlci[0]);
}
return 0;
}
static void gsm_copy_config_ext_values(struct gsm_mux *gsm,
struct gsm_config_ext *ce)
{
memset(ce, 0, sizeof(*ce));
ce->wait_config = gsm->wait_config ? 1 : 0;
ce->keep_alive = gsm->keep_alive;
}
static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
{
bool need_restart = false;
unsigned int i;
/*
* Check that userspace doesn't put stuff in here to prevent breakages
* in the future.
*/
for (i = 0; i < ARRAY_SIZE(ce->reserved); i++)
if (ce->reserved[i])
return -EINVAL;
if (ce->flags & ~GSM_FL_RESTART)
return -EINVAL;
/* Requires care */
if (ce->flags & GSM_FL_RESTART)
need_restart = true;
/*
* Close down what is needed, restart and initiate the new
* configuration. On the first time there is no DLCI[0]
* and closing or cleaning up is not necessary.
*/
if (need_restart)
gsm_cleanup_mux(gsm, true);
/*
* Setup the new configuration values
*/
gsm->wait_config = ce->wait_config ? true : false;
gsm->keep_alive = ce->keep_alive;
if (gsm->dead) {
int ret = gsm_activate_mux(gsm);
if (ret)
return ret;
if (gsm->initiator)
gsm_dlci_begin_open(gsm->dlci[0]);
}
return 0;
}
/**
* gsmld_output - write to link
* @gsm: our mux
* @data: bytes to output
* @len: size
*
* Write a block of data from the GSM mux to the data channel. This
* will eventually be serialized from above but at the moment isn't.
*/
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
{
if (tty_write_room(gsm->tty) < len) {
set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
return -ENOSPC;
}
if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, data, len);
return gsm->tty->ops->write(gsm->tty, data, len);
}
/**
* gsmld_write_trigger - schedule ldisc write task
* @gsm: our mux
*/
static void gsmld_write_trigger(struct gsm_mux *gsm)
{
if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
return;
schedule_work(&gsm->tx_work);
}
/**
* gsmld_write_task - ldisc write task
* @work: our tx write work
*
* Writes out data to the ldisc if possible. We are doing this here to
* avoid dead-locking. This returns if no space or data is left for output.
*/
static void gsmld_write_task(struct work_struct *work)
{
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
unsigned long flags;
int i, ret;
/* All outstanding control channel and control messages and one data
* frame is sent.
*/
ret = -ENODEV;
spin_lock_irqsave(&gsm->tx_lock, flags);
if (gsm->tty)
ret = gsm_data_kick(gsm);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
if (ret >= 0)
for (i = 0; i < NUM_DLCI; i++)
if (gsm->dlci[i])
tty_port_tty_wakeup(&gsm->dlci[i]->port);
}
/**
* gsmld_attach_gsm - mode set up
* @tty: our tty structure
* @gsm: our mux
*
* Set up the MUX for basic mode and commence connecting to the
* modem. Currently called from the line discipline set up but
* will need moving to an ioctl path.
*/
static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
gsm->tty = tty_kref_get(tty);
/* Turn off tty XON/XOFF handling to handle it explicitly. */
gsm->old_c_iflag = tty->termios.c_iflag;
tty->termios.c_iflag &= (IXON | IXOFF);
}
/**
* gsmld_detach_gsm - stop doing 0710 mux
* @tty: tty attached to the mux
* @gsm: mux
*
* Shutdown and then clean up the resources used by the line discipline
*/
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
WARN_ON(tty != gsm->tty);
/* Restore tty XON/XOFF handling. */
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
gsm->tty = NULL;
}
static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct gsm_mux *gsm = tty->disc_data;
char flags = TTY_NORMAL;
if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
if (fp)
flags = *fp++;
switch (flags) {
case TTY_NORMAL:
if (gsm->receive)
gsm->receive(gsm, *cp);
break;
case TTY_OVERRUN:
case TTY_BREAK:
case TTY_PARITY:
case TTY_FRAME:
gsm_error(gsm);
break;
default:
WARN_ONCE(1, "%s: unknown flag %d\n",
tty_name(tty), flags);
break;
}
}
/* FASYNC if needed ? */
/* If clogged call tty_throttle(tty); */
}
/**
* gsmld_flush_buffer - clean input queue
* @tty: terminal device
*
* Flush the input buffer. Called when the line discipline is
* being closed, when the tty layer wants the buffer flushed (eg
* at hangup).
*/
static void gsmld_flush_buffer(struct tty_struct *tty)
{
}
/**
* gsmld_close - close the ldisc for this tty
* @tty: device
*
* Called from the terminal layer when this line discipline is
* being shut down, either because of a close or becsuse of a
* discipline change. The function will not be called while other
* ldisc methods are in progress.
*/
static void gsmld_close(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
/* The ldisc locks and closes the port before calling our close. This
* means we have no way to do a proper disconnect. We will not bother
* to do one.
*/
gsm_cleanup_mux(gsm, false);
gsmld_detach_gsm(tty, gsm);
gsmld_flush_buffer(tty);
/* Do other clean up here */
mux_put(gsm);
}
/**
* gsmld_open - open an ldisc
* @tty: terminal to open
*
* Called when this line discipline is being attached to the
* terminal device. Can sleep. Called serialized so that no
* other events will occur in parallel. No further open will occur
* until a close.
*/
static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tty->ops->write == NULL)
return -EINVAL;
/* Attach our ldisc data */
gsm = gsm_alloc_mux();
if (gsm == NULL)
return -ENOMEM;
tty->disc_data = gsm;
tty->receive_room = 65536;
/* Attach the initial passive connection */
gsmld_attach_gsm(tty, gsm);
/* The mux will not be activated yet, we wait for correct
* configuration first.
*/
if (gsm->encoding == GSM_BASIC_OPT)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
return 0;
}
/**
* gsmld_write_wakeup - asynchronous I/O notifier
* @tty: tty device
*
* Required for the ptys, serial driver etc. since processes
* that attach themselves to the master and rely on ASYNC
* IO must be woken up
*/
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
/* Queue poll */
gsmld_write_trigger(gsm);
}
/**
* gsmld_read - read function for tty
* @tty: tty device
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
* @cookie: unused
* @offset: unused
*
* Perform reads for the line discipline. We are guaranteed that the
* line discipline will not be closed under us but we may get multiple
* parallel readers and must handle this ourselves. We may also get
* a hangup. Always called in user context, may sleep.
*
* This code must be sure never to sleep through a hangup.
*/
static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, u8 *buf,
size_t nr, void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
/**
* gsmld_write - write function for tty
* @tty: tty device
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
*
* Called when the owner of the device wants to send a frame
* itself (or some other control data). The data is transferred
* as-is and must be properly framed and checksummed as appropriate
* by userspace. Frames are either sent whole or not at all as this
* avoids pain user side.
*/
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const u8 *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
unsigned long flags;
int space;
int ret;
if (!gsm)
return -ENODEV;
ret = -ENOBUFS;
spin_lock_irqsave(&gsm->tx_lock, flags);
space = tty_write_room(tty);
if (space >= nr)
ret = tty->ops->write(tty, buf, nr);
else
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
return ret;
}
/**
* gsmld_poll - poll method for N_GSM0710
* @tty: terminal device
* @file: file accessing it
* @wait: poll table
*
* Called when the line discipline is asked to poll() for data or
* for special events. This code is not serialized with respect to
* other events save open/close.
*
* This code must be sure never to sleep through a hangup.
* Called without the kernel lock held - fine
*/
static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
__poll_t mask = 0;
struct gsm_mux *gsm = tty->disc_data;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
if (gsm->dead)
mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct gsm_config c;
struct gsm_config_ext ce;
struct gsm_dlci_config dc;
struct gsm_mux *gsm = tty->disc_data;
unsigned int base, addr;
struct gsm_dlci *dlci;
switch (cmd) {
case GSMIOC_GETCONF:
gsm_copy_config_values(gsm, &c);
if (copy_to_user((void __user *)arg, &c, sizeof(c)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF:
if (copy_from_user(&c, (void __user *)arg, sizeof(c)))
return -EFAULT;
return gsm_config(gsm, &c);
case GSMIOC_GETFIRST:
base = mux_num_to_base(gsm);
return put_user(base + 1, (__u32 __user *)arg);
case GSMIOC_GETCONF_EXT:
gsm_copy_config_ext_values(gsm, &ce);
if (copy_to_user((void __user *)arg, &ce, sizeof(ce)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF_EXT:
if (copy_from_user(&ce, (void __user *)arg, sizeof(ce)))
return -EFAULT;
return gsm_config_ext(gsm, &ce);
case GSMIOC_GETCONF_DLCI:
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
return -EFAULT;
if (dc.channel == 0 || dc.channel >= NUM_DLCI)
return -EINVAL;
addr = array_index_nospec(dc.channel, NUM_DLCI);
dlci = gsm->dlci[addr];
if (!dlci) {
dlci = gsm_dlci_alloc(gsm, addr);
if (!dlci)
return -ENOMEM;
}
gsm_dlci_copy_config_values(dlci, &dc);
if (copy_to_user((void __user *)arg, &dc, sizeof(dc)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF_DLCI:
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
return -EFAULT;
if (dc.channel == 0 || dc.channel >= NUM_DLCI)
return -EINVAL;
addr = array_index_nospec(dc.channel, NUM_DLCI);
dlci = gsm->dlci[addr];
if (!dlci) {
dlci = gsm_dlci_alloc(gsm, addr);
if (!dlci)
return -ENOMEM;
}
return gsm_dlci_config(dlci, &dc, 0);
default:
return n_tty_ioctl_helper(tty, cmd, arg);
}
}
/*
* Network interface
*
*/
static int gsm_mux_net_open(struct net_device *net)
{
pr_debug("%s called\n", __func__);
netif_start_queue(net);
return 0;
}
static int gsm_mux_net_close(struct net_device *net)
{
netif_stop_queue(net);
return 0;
}
static void dlci_net_free(struct gsm_dlci *dlci)
{
if (!dlci->net) {
WARN_ON(1);
return;
}
dlci->adaption = dlci->prev_adaption;
dlci->data = dlci->prev_data;
free_netdev(dlci->net);
dlci->net = NULL;
}
static void net_free(struct kref *ref)
{
struct gsm_mux_net *mux_net;
struct gsm_dlci *dlci;
mux_net = container_of(ref, struct gsm_mux_net, ref);
dlci = mux_net->dlci;
if (dlci->net) {
unregister_netdev(dlci->net);
dlci_net_free(dlci);
}
}
static inline void muxnet_get(struct gsm_mux_net *mux_net)
{
kref_get(&mux_net->ref);
}
static inline void muxnet_put(struct gsm_mux_net *mux_net)
{
kref_put(&mux_net->ref, net_free);
}
static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
struct gsm_mux_net *mux_net = netdev_priv(net);
struct gsm_dlci *dlci = mux_net->dlci;
muxnet_get(mux_net);
skb_queue_head(&dlci->skb_list, skb);
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
gsm_dlci_data_kick(dlci);
/* And tell the kernel when the last transmit started. */
netif_trans_update(net);
muxnet_put(mux_net);
return NETDEV_TX_OK;
}
/* called when a packet did not ack after watchdogtimeout */
static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
/* Tell syslog we are hosed. */
dev_dbg(&net->dev, "Tx timed out.\n");
/* Update statistics */
net->stats.tx_errors++;
}
static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
const unsigned char *in_buf, int size)
{
struct net_device *net = dlci->net;
struct sk_buff *skb;
struct gsm_mux_net *mux_net = netdev_priv(net);
muxnet_get(mux_net);
/* Allocate an sk_buff */
skb = dev_alloc_skb(size + NET_IP_ALIGN);
if (!skb) {
/* We got no receive buffer. */
net->stats.rx_dropped++;
muxnet_put(mux_net);
return;
}
skb_reserve(skb, NET_IP_ALIGN);
skb_put_data(skb, in_buf, size);
skb->dev = net;
skb->protocol = htons(ETH_P_IP);
/* Ship it off to the kernel */
netif_rx(skb);
/* update out statistics */
net->stats.rx_packets++;
net->stats.rx_bytes += size;
muxnet_put(mux_net);
return;
}
static void gsm_mux_net_init(struct net_device *net)
{
static const struct net_device_ops gsm_netdev_ops = {
.ndo_open = gsm_mux_net_open,
.ndo_stop = gsm_mux_net_close,
.ndo_start_xmit = gsm_mux_net_start_xmit,
.ndo_tx_timeout = gsm_mux_net_tx_timeout,
};
net->netdev_ops = &gsm_netdev_ops;
/* fill in the other fields */
net->watchdog_timeo = GSM_NET_TX_TIMEOUT;
net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
net->type = ARPHRD_NONE;
net->tx_queue_len = 10;
}
/* caller holds the dlci mutex */
static void gsm_destroy_network(struct gsm_dlci *dlci)
{
struct gsm_mux_net *mux_net;
pr_debug("destroy network interface\n");
if (!dlci->net)
return;
mux_net = netdev_priv(dlci->net);
muxnet_put(mux_net);
}
/* caller holds the dlci mutex */
static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
{
char *netname;
int retval = 0;
struct net_device *net;
struct gsm_mux_net *mux_net;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* Already in a non tty mode */
if (dlci->adaption > 2)
return -EBUSY;
if (nc->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
if (nc->adaption != 3 && nc->adaption != 4)
return -EPROTONOSUPPORT;
pr_debug("create network interface\n");
netname = "gsm%d";
if (nc->if_name[0] != '\0')
netname = nc->if_name;
net = alloc_netdev(sizeof(struct gsm_mux_net), netname,
NET_NAME_UNKNOWN, gsm_mux_net_init);
if (!net) {
pr_err("alloc_netdev failed\n");
return -ENOMEM;
}
net->mtu = dlci->mtu;
net->min_mtu = MIN_MTU;
net->max_mtu = dlci->mtu;
mux_net = netdev_priv(net);
mux_net->dlci = dlci;
kref_init(&mux_net->ref);
strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */
/* reconfigure dlci for network */
dlci->prev_adaption = dlci->adaption;
dlci->prev_data = dlci->data;
dlci->adaption = nc->adaption;
dlci->data = gsm_mux_rx_netchar;
dlci->net = net;
pr_debug("register netdev\n");
retval = register_netdev(net);
if (retval) {
pr_err("network register fail %d\n", retval);
dlci_net_free(dlci);
return retval;
}
return net->ifindex; /* return network index */
}
/* Line discipline for real tty */
static struct tty_ldisc_ops tty_ldisc_packet = {
.owner = THIS_MODULE,
.num = N_GSM0710,
.name = "n_gsm",
.open = gsmld_open,
.close = gsmld_close,
.flush_buffer = gsmld_flush_buffer,
.read = gsmld_read,
.write = gsmld_write,
.ioctl = gsmld_ioctl,
.poll = gsmld_poll,
.receive_buf = gsmld_receive_buf,
.write_wakeup = gsmld_write_wakeup
};
/*
* Virtual tty side
*/
/**
* gsm_modem_upd_via_data - send modem bits via convergence layer
* @dlci: channel
* @brk: break signal
*
* Send an empty frame to signal mobile state changes and to transmit the
* break signal for adaption 2.
*/
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
spin_lock_irqsave(&gsm->tx_lock, flags);
gsm_dlci_modem_output(gsm, dlci, brk);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
* gsm_modem_upd_via_msc - send modem bits via control frame
* @dlci: channel
* @brk: break signal
*/
static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
{
u8 modembits[3];
struct gsm_control *ctrl;
int len = 2;
if (dlci->gsm->encoding != GSM_BASIC_OPT)
return 0;
modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
if (!brk) {
modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
} else {
modembits[1] = gsm_encode_modem(dlci) << 1;
modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
len++;
}
ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
if (ctrl == NULL)
return -ENOMEM;
return gsm_control_wait(dlci->gsm, ctrl);
}
/**
* gsm_modem_update - send modem status line state
* @dlci: channel
* @brk: break signal
*/
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
{
if (dlci->adaption == 2) {
/* Send convergence layer type 2 empty data frame. */
gsm_modem_upd_via_data(dlci, brk);
return 0;
} else if (dlci->gsm->encoding == GSM_BASIC_OPT) {
/* Send as MSC control message. */
return gsm_modem_upd_via_msc(dlci, brk);
}
/* Modem status lines are not supported. */
return -EPROTONOSUPPORT;
}
/**
* gsm_wait_modem_change - wait for modem status line change
* @dlci: channel
* @mask: modem status line bits
*
* The function returns if:
* - any given modem status line bit changed
* - the wait event function got interrupted (e.g. by a signal)
* - the underlying DLCI was closed
* - the underlying ldisc device was removed
*/
static int gsm_wait_modem_change(struct gsm_dlci *dlci, u32 mask)
{
struct gsm_mux *gsm = dlci->gsm;
u32 old = dlci->modem_rx;
int ret;
ret = wait_event_interruptible(gsm->event, gsm->dead ||
dlci->state != DLCI_OPEN ||
(old ^ dlci->modem_rx) & mask);
if (gsm->dead)
return -ENODEV;
if (dlci->state != DLCI_OPEN)
return -EL2NSYNC;
return ret;
}
static bool gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
struct gsm_mux *gsm = dlci->gsm;
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return false;
if (debug & DBG_CD_ON)
return true;
/*
* Basic mode with control channel in ADM mode may not respond
* to CMD_MSC at all and modem_rx is empty.
*/
if (gsm->encoding == GSM_BASIC_OPT &&
gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx)
return true;
return dlci->modem_rx & TIOCM_CD;
}
static void gsm_dtr_rts(struct tty_port *port, bool active)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
unsigned int modem_tx = dlci->modem_tx;
if (active)
modem_tx |= TIOCM_DTR | TIOCM_RTS;
else
modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
gsm_modem_update(dlci, 0);
}
}
static const struct tty_port_operations gsm_port_ops = {
.carrier_raised = gsm_carrier_raised,
.dtr_rts = gsm_dtr_rts,
.destruct = gsm_dlci_free,
};
static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct gsm_mux *gsm;
struct gsm_dlci *dlci;
unsigned int line = tty->index;
unsigned int mux = mux_line_to_num(line);
bool alloc = false;
int ret;
line = line & 0x3F;
if (mux >= MAX_MUX)
return -ENXIO;
/* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
if (gsm_mux[mux] == NULL)
return -EUNATCH;
if (line == 0 || line > 61) /* 62/63 reserved */
return -ECHRNG;
gsm = gsm_mux[mux];
if (gsm->dead)
return -EL2HLT;
/* If DLCI 0 is not yet fully open return an error.
This is ok from a locking
perspective as we don't have to worry about this
if DLCI0 is lost */
mutex_lock(&gsm->mutex);
if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
mutex_unlock(&gsm->mutex);
return -EL2NSYNC;
}
dlci = gsm->dlci[line];
if (dlci == NULL) {
alloc = true;
dlci = gsm_dlci_alloc(gsm, line);
}
if (dlci == NULL) {
mutex_unlock(&gsm->mutex);
return -ENOMEM;
}
ret = tty_port_install(&dlci->port, driver, tty);
if (ret) {
if (alloc)
dlci_put(dlci);
mutex_unlock(&gsm->mutex);
return ret;
}
dlci_get(dlci);
dlci_get(gsm->dlci[0]);
mux_get(gsm);
tty->driver_data = dlci;
mutex_unlock(&gsm->mutex);
return 0;
}
static int gsmtty_open(struct tty_struct *tty, struct file *filp)
{
struct gsm_dlci *dlci = tty->driver_data;
struct tty_port *port = &dlci->port;
port->count++;
tty_port_tty_set(port, tty);
dlci->modem_rx = 0;
/* We could in theory open and close before we wait - eg if we get
a DM straight back. This is ok as that will have caused a hangup */
tty_port_set_initialized(port, true);
/* Start sending off SABM messages */
if (!dlci->gsm->wait_config) {
/* Start sending off SABM messages */
if (dlci->gsm->initiator)
gsm_dlci_begin_open(dlci);
else
gsm_dlci_set_opening(dlci);
} else {
gsm_dlci_set_wait_config(dlci);
}
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
static void gsmtty_close(struct tty_struct *tty, struct file *filp)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci == NULL)
return;
if (dlci->state == DLCI_CLOSED)
return;
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
return;
gsm_dlci_begin_close(dlci);
if (tty_port_initialized(&dlci->port) && C_HUPCL(tty))
tty_port_lower_dtr_rts(&dlci->port);
tty_port_close_end(&dlci->port, tty);
tty_port_tty_set(&dlci->port, NULL);
return;
}
static void gsmtty_hangup(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
tty_port_hangup(&dlci->port);
gsm_dlci_begin_close(dlci);
}
static ssize_t gsmtty_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
int sent;
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
/* Stuff the bytes into the fifo queue */
sent = kfifo_in_locked(&dlci->fifo, buf, len, &dlci->lock);
/* Need to kick the channel */
gsm_dlci_data_kick(dlci);
return sent;
}
static unsigned int gsmtty_write_room(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return 0;
return kfifo_avail(&dlci->fifo);
}
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return 0;
return kfifo_len(&dlci->fifo);
}
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
unsigned long flags;
if (dlci->state == DLCI_CLOSED)
return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
away */
spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
spin_unlock_irqrestore(&dlci->lock, flags);
/* Need to unhook this DLCI from the transmit queue logic */
}
static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
{
/* The FIFO handles the queue so the kernel will do the right
thing waiting on chars_in_buffer before calling us. No work
to do here */
}
static int gsmtty_tiocmget(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
return dlci->modem_rx;
}
static int gsmtty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct gsm_dlci *dlci = tty->driver_data;
unsigned int modem_tx = dlci->modem_tx;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
modem_tx &= ~clear;
modem_tx |= set;
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
return gsm_modem_update(dlci, 0);
}
return 0;
}
static int gsmtty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct gsm_dlci *dlci = tty->driver_data;
struct gsm_netconfig nc;
struct gsm_dlci_config dc;
int index;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
switch (cmd) {
case GSMIOC_ENABLE_NET:
if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
return -EFAULT;
nc.if_name[IFNAMSIZ-1] = '\0';
/* return net interface index or error code */
mutex_lock(&dlci->mutex);
index = gsm_create_network(dlci, &nc);
mutex_unlock(&dlci->mutex);
if (copy_to_user((void __user *)arg, &nc, sizeof(nc)))
return -EFAULT;
return index;
case GSMIOC_DISABLE_NET:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
return 0;
case GSMIOC_GETCONF_DLCI:
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
return -EFAULT;
if (dc.channel != dlci->addr)
return -EPERM;
gsm_dlci_copy_config_values(dlci, &dc);
if (copy_to_user((void __user *)arg, &dc, sizeof(dc)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF_DLCI:
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
return -EFAULT;
if (dc.channel >= NUM_DLCI)
return -EINVAL;
if (dc.channel != 0 && dc.channel != dlci->addr)
return -EPERM;
return gsm_dlci_config(dlci, &dc, 1);
case TIOCMIWAIT:
return gsm_wait_modem_change(dlci, (u32)arg);
default:
return -ENOIOCTLCMD;
}
}
static void gsmtty_set_termios(struct tty_struct *tty,
const struct ktermios *old)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
/* For the moment its fixed. In actual fact the speed information
for the virtual channel can be propogated in both directions by
the RPN control message. This however rapidly gets nasty as we
then have to remap modem signals each way according to whether
our virtual cable is null modem etc .. */
tty_termios_copy_hw(&tty->termios, old);
}
static void gsmtty_throttle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
/* Send an MSC with RTS cleared */
gsm_modem_update(dlci, 0);
}
static void gsmtty_unthrottle(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
/* Send an MSC with RTS set */
gsm_modem_update(dlci, 0);
}
static int gsmtty_break_ctl(struct tty_struct *tty, int state)
{
struct gsm_dlci *dlci = tty->driver_data;
int encode = 0; /* Off */
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
if (state == -1) /* "On indefinitely" - we can't encode this
properly */
encode = 0x0F;
else if (state > 0) {
encode = state / 200; /* mS to encoding */
if (encode > 0x0F)
encode = 0x0F; /* Best effort */
}
return gsm_modem_update(dlci, encode);
}
static void gsmtty_cleanup(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
struct gsm_mux *gsm = dlci->gsm;
dlci_put(dlci);
dlci_put(gsm->dlci[0]);
mux_put(gsm);
}
/* Virtual ttys for the demux */
static const struct tty_operations gsmtty_ops = {
.install = gsmtty_install,
.open = gsmtty_open,
.close = gsmtty_close,
.write = gsmtty_write,
.write_room = gsmtty_write_room,
.chars_in_buffer = gsmtty_chars_in_buffer,
.flush_buffer = gsmtty_flush_buffer,
.ioctl = gsmtty_ioctl,
.throttle = gsmtty_throttle,
.unthrottle = gsmtty_unthrottle,
.set_termios = gsmtty_set_termios,
.hangup = gsmtty_hangup,
.wait_until_sent = gsmtty_wait_until_sent,
.tiocmget = gsmtty_tiocmget,
.tiocmset = gsmtty_tiocmset,
.break_ctl = gsmtty_break_ctl,
.cleanup = gsmtty_cleanup,
};
static int __init gsm_init(void)
{
/* Fill in our line protocol discipline, and register it */
int status = tty_register_ldisc(&tty_ldisc_packet);
if (status != 0) {
pr_err("n_gsm: can't register line discipline (err = %d)\n",
status);
return status;
}
gsm_tty_driver = tty_alloc_driver(GSM_TTY_MINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
if (IS_ERR(gsm_tty_driver)) {
pr_err("gsm_init: tty allocation failed.\n");
status = PTR_ERR(gsm_tty_driver);
goto err_unreg_ldisc;
}
gsm_tty_driver->driver_name = "gsmtty";
gsm_tty_driver->name = "gsmtty";
gsm_tty_driver->major = 0; /* Dynamic */
gsm_tty_driver->minor_start = 0;
gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
gsm_tty_driver->init_termios = tty_std_termios;
/* Fixme */
gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
tty_set_operations(gsm_tty_driver, &gsmtty_ops);
if (tty_register_driver(gsm_tty_driver)) {
pr_err("gsm_init: tty registration failed.\n");
status = -EBUSY;
goto err_put_driver;
}
pr_debug("gsm_init: loaded as %d,%d.\n",
gsm_tty_driver->major, gsm_tty_driver->minor_start);
return 0;
err_put_driver:
tty_driver_kref_put(gsm_tty_driver);
err_unreg_ldisc:
tty_unregister_ldisc(&tty_ldisc_packet);
return status;
}
static void __exit gsm_exit(void)
{
tty_unregister_ldisc(&tty_ldisc_packet);
tty_unregister_driver(gsm_tty_driver);
tty_driver_kref_put(gsm_tty_driver);
}
module_init(gsm_init);
module_exit(gsm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_GSM0710);
| linux-master | drivers/tty/n_gsm.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
* Written by: Ulf Jakobsson,
* Jan Åkerfeldt,
* Stefan Thomasson,
*
* Maintained by: Paul Hardwick ([email protected])
*
* Patches:
* Locking code changes for Vodafone by Sphere Systems Ltd,
* Andrew Bird ([email protected] )
* & Phil Sanderson
*
* Source has been ported from an implementation made by Filip Aben @ Option
*
* --------------------------------------------------------------------------
*
* Copyright (c) 2005,2006 Option Wireless Sweden AB
* Copyright (c) 2006 Sphere Systems Ltd
* Copyright (c) 2006 Option Wireless n/v
* All rights Reserved.
*
* --------------------------------------------------------------------------
*/
/* Enable this to have a lot of debug printouts */
#define DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/serial.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/delay.h>
/* Default debug printout level */
#define NOZOMI_DEBUG_LEVEL 0x00
static int debug = NOZOMI_DEBUG_LEVEL;
module_param(debug, int, S_IRUGO | S_IWUSR);
/* Macros definitions */
#define DBG_(lvl, fmt, args...) \
do { \
if (lvl & debug) \
pr_debug("[%d] %s(): " fmt "\n", \
__LINE__, __func__, ##args); \
} while (0)
#define DBG1(args...) DBG_(0x01, ##args)
#define DBG2(args...) DBG_(0x02, ##args)
#define DBG3(args...) DBG_(0x04, ##args)
#define DBG4(args...) DBG_(0x08, ##args)
/* TODO: rewrite to optimize macros... */
#define TMP_BUF_MAX 256
#define DUMP(buf__, len__) \
do { \
char tbuf[TMP_BUF_MAX] = {0}; \
if (len__ > 1) { \
u32 data_len = min_t(u32, len__, TMP_BUF_MAX); \
strscpy(tbuf, buf__, data_len); \
if (tbuf[data_len - 2] == '\r') \
tbuf[data_len - 2] = 'r'; \
DBG1("SENDING: '%s' (%d+n)", tbuf, len__); \
} else { \
DBG1("SENDING: '%s' (%d)", tbuf, len__); \
} \
} while (0)
/* Defines */
#define NOZOMI_NAME "nozomi"
#define NOZOMI_NAME_TTY "nozomi_tty"
#define NTTY_TTY_MAXMINORS 256
#define NTTY_FIFO_BUFFER_SIZE 8192
/* Must be power of 2 */
#define FIFO_BUFFER_SIZE_UL 8192
/* Size of tmp send buffer to card */
#define SEND_BUF_MAX 1024
#define RECEIVE_BUF_MAX 4
#define R_IIR 0x0000 /* Interrupt Identity Register */
#define R_FCR 0x0000 /* Flow Control Register */
#define R_IER 0x0004 /* Interrupt Enable Register */
#define NOZOMI_CONFIG_MAGIC 0xEFEFFEFE
#define TOGGLE_VALID 0x0000
/* Definition of interrupt tokens */
#define MDM_DL1 0x0001
#define MDM_UL1 0x0002
#define MDM_DL2 0x0004
#define MDM_UL2 0x0008
#define DIAG_DL1 0x0010
#define DIAG_DL2 0x0020
#define DIAG_UL 0x0040
#define APP1_DL 0x0080
#define APP1_UL 0x0100
#define APP2_DL 0x0200
#define APP2_UL 0x0400
#define CTRL_DL 0x0800
#define CTRL_UL 0x1000
#define RESET 0x8000
#define MDM_DL (MDM_DL1 | MDM_DL2)
#define MDM_UL (MDM_UL1 | MDM_UL2)
#define DIAG_DL (DIAG_DL1 | DIAG_DL2)
/* modem signal definition */
#define CTRL_DSR 0x0001
#define CTRL_DCD 0x0002
#define CTRL_RI 0x0004
#define CTRL_CTS 0x0008
#define CTRL_DTR 0x0001
#define CTRL_RTS 0x0002
#define MAX_PORT 4
#define NOZOMI_MAX_PORTS 5
#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT)
/* Type definitions */
/*
* There are two types of nozomi cards,
* one with 2048 memory and with 8192 memory
*/
enum card_type {
F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */
F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
};
/* Initialization states a card can be in */
enum card_state {
NOZOMI_STATE_UNKNOWN = 0,
NOZOMI_STATE_ENABLED = 1, /* pci device enabled */
NOZOMI_STATE_ALLOCATED = 2, /* config setup done */
NOZOMI_STATE_READY = 3, /* flowcontrols received */
};
/* Two different toggle channels exist */
enum channel_type {
CH_A = 0,
CH_B = 1,
};
/* Port definition for the card regarding flow control */
enum ctrl_port_type {
CTRL_CMD = 0,
CTRL_MDM = 1,
CTRL_DIAG = 2,
CTRL_APP1 = 3,
CTRL_APP2 = 4,
CTRL_ERROR = -1,
};
/* Ports that the nozomi has */
enum port_type {
PORT_MDM = 0,
PORT_DIAG = 1,
PORT_APP1 = 2,
PORT_APP2 = 3,
PORT_CTRL = 4,
PORT_ERROR = -1,
};
#ifdef __BIG_ENDIAN
/* Big endian */
struct toggles {
unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
unsigned int diag_dl:1;
unsigned int mdm_dl:1;
unsigned int mdm_ul:1;
} __attribute__ ((packed));
/* Configuration table to read at startup of card */
/* Is for now only needed during initialization phase */
struct config_table {
u32 signature;
u16 product_information;
u16 version;
u8 pad3[3];
struct toggles toggle;
u8 pad1[4];
u16 dl_mdm_len1; /*
* If this is 64, it can hold
* 60 bytes + 4 that is length field
*/
u16 dl_start;
u16 dl_diag_len1;
u16 dl_mdm_len2; /*
* If this is 64, it can hold
* 60 bytes + 4 that is length field
*/
u16 dl_app1_len;
u16 dl_diag_len2;
u16 dl_ctrl_len;
u16 dl_app2_len;
u8 pad2[16];
u16 ul_mdm_len1;
u16 ul_start;
u16 ul_diag_len;
u16 ul_mdm_len2;
u16 ul_app1_len;
u16 ul_app2_len;
u16 ul_ctrl_len;
} __attribute__ ((packed));
/* This stores all control downlink flags */
struct ctrl_dl {
u8 port;
unsigned int reserved:4;
unsigned int CTS:1;
unsigned int RI:1;
unsigned int DCD:1;
unsigned int DSR:1;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
u8 port;
unsigned int reserved:6;
unsigned int RTS:1;
unsigned int DTR:1;
} __attribute__ ((packed));
#else
/* Little endian */
/* This represents the toggle information */
struct toggles {
unsigned int mdm_ul:1;
unsigned int mdm_dl:1;
unsigned int diag_dl:1;
unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
} __attribute__ ((packed));
/* Configuration table to read at startup of card */
struct config_table {
u32 signature;
u16 version;
u16 product_information;
struct toggles toggle;
u8 pad1[7];
u16 dl_start;
u16 dl_mdm_len1; /*
* If this is 64, it can hold
* 60 bytes + 4 that is length field
*/
u16 dl_mdm_len2;
u16 dl_diag_len1;
u16 dl_diag_len2;
u16 dl_app1_len;
u16 dl_app2_len;
u16 dl_ctrl_len;
u8 pad2[16];
u16 ul_start;
u16 ul_mdm_len2;
u16 ul_mdm_len1;
u16 ul_diag_len;
u16 ul_app1_len;
u16 ul_app2_len;
u16 ul_ctrl_len;
} __attribute__ ((packed));
/* This stores all control downlink flags */
struct ctrl_dl {
unsigned int DSR:1;
unsigned int DCD:1;
unsigned int RI:1;
unsigned int CTS:1;
unsigned int reserved:4;
u8 port;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
unsigned int DTR:1;
unsigned int RTS:1;
unsigned int reserved:6;
u8 port;
} __attribute__ ((packed));
#endif
/* This holds all information that is needed regarding a port */
struct port {
struct tty_port port;
u8 update_flow_control;
struct ctrl_ul ctrl_ul;
struct ctrl_dl ctrl_dl;
struct kfifo fifo_ul;
void __iomem *dl_addr[2];
u32 dl_size[2];
u8 toggle_dl;
void __iomem *ul_addr[2];
u32 ul_size[2];
u8 toggle_ul;
u16 token_dl;
wait_queue_head_t tty_wait;
struct async_icount tty_icount;
struct nozomi *dc;
};
/* Private data one for each card in the system */
struct nozomi {
void __iomem *base_addr;
unsigned long flip;
/* Pointers to registers */
void __iomem *reg_iir;
void __iomem *reg_fcr;
void __iomem *reg_ier;
u16 last_ier;
enum card_type card_type;
struct config_table config_table; /* Configuration table */
struct pci_dev *pdev;
struct port port[NOZOMI_MAX_PORTS];
u8 *send_buf;
spinlock_t spin_mutex; /* secures access to registers and tty */
unsigned int index_start;
enum card_state state;
u32 open_ttys;
};
/* Global variables */
static const struct pci_device_id nozomi_pci_tbl[] = {
{PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
{},
};
MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
static struct tty_driver *ntty_driver;
static const struct tty_port_operations noz_tty_port_ops;
/*
* find card by tty_index
*/
static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
{
return tty ? ndevs[tty->index / MAX_PORT] : NULL;
}
static inline struct port *get_port_by_tty(const struct tty_struct *tty)
{
struct nozomi *ndev = get_dc_by_tty(tty);
return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
}
/*
* TODO:
* -Optimize
* -Rewrite cleaner
*/
static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
u32 size_bytes)
{
u32 i = 0;
const u32 __iomem *ptr = mem_addr_start;
u16 *buf16;
if (unlikely(!ptr || !buf))
goto out;
/* shortcut for extremely often used cases */
switch (size_bytes) {
case 2: /* 2 bytes */
buf16 = (u16 *) buf;
*buf16 = __le16_to_cpu(readw(ptr));
goto out;
case 4: /* 4 bytes */
*(buf) = __le32_to_cpu(readl(ptr));
goto out;
}
while (i < size_bytes) {
if (size_bytes - i == 2) {
/* Handle 2 bytes in the end */
buf16 = (u16 *) buf;
*(buf16) = __le16_to_cpu(readw(ptr));
i += 2;
} else {
/* Read 4 bytes */
*(buf) = __le32_to_cpu(readl(ptr));
i += 4;
}
buf++;
ptr++;
}
out:
return;
}
/*
* TODO:
* -Optimize
* -Rewrite cleaner
*/
static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf,
u32 size_bytes)
{
u32 i = 0;
u32 __iomem *ptr = mem_addr_start;
const u16 *buf16;
if (unlikely(!ptr || !buf))
return 0;
/* shortcut for extremely often used cases */
switch (size_bytes) {
case 2: /* 2 bytes */
buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), ptr);
return 2;
case 1: /*
* also needs to write 4 bytes in this case
* so falling through..
*/
fallthrough;
case 4: /* 4 bytes */
writel(__cpu_to_le32(*buf), ptr);
return 4;
}
while (i < size_bytes) {
if (size_bytes - i == 2) {
/* 2 bytes */
buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), ptr);
i += 2;
} else {
/* 4 bytes */
writel(__cpu_to_le32(*buf), ptr);
i += 4;
}
buf++;
ptr++;
}
return i;
}
/* Setup pointers to different channels and also setup buffer sizes. */
static void nozomi_setup_memory(struct nozomi *dc)
{
void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
/* The length reported is including the length field of 4 bytes,
* hence subtract with 4.
*/
const u16 buff_offset = 4;
/* Modem port dl configuration */
dc->port[PORT_MDM].dl_addr[CH_A] = offset;
dc->port[PORT_MDM].dl_addr[CH_B] =
(offset += dc->config_table.dl_mdm_len1);
dc->port[PORT_MDM].dl_size[CH_A] =
dc->config_table.dl_mdm_len1 - buff_offset;
dc->port[PORT_MDM].dl_size[CH_B] =
dc->config_table.dl_mdm_len2 - buff_offset;
/* Diag port dl configuration */
dc->port[PORT_DIAG].dl_addr[CH_A] =
(offset += dc->config_table.dl_mdm_len2);
dc->port[PORT_DIAG].dl_size[CH_A] =
dc->config_table.dl_diag_len1 - buff_offset;
dc->port[PORT_DIAG].dl_addr[CH_B] =
(offset += dc->config_table.dl_diag_len1);
dc->port[PORT_DIAG].dl_size[CH_B] =
dc->config_table.dl_diag_len2 - buff_offset;
/* App1 port dl configuration */
dc->port[PORT_APP1].dl_addr[CH_A] =
(offset += dc->config_table.dl_diag_len2);
dc->port[PORT_APP1].dl_size[CH_A] =
dc->config_table.dl_app1_len - buff_offset;
/* App2 port dl configuration */
dc->port[PORT_APP2].dl_addr[CH_A] =
(offset += dc->config_table.dl_app1_len);
dc->port[PORT_APP2].dl_size[CH_A] =
dc->config_table.dl_app2_len - buff_offset;
/* Ctrl dl configuration */
dc->port[PORT_CTRL].dl_addr[CH_A] =
(offset += dc->config_table.dl_app2_len);
dc->port[PORT_CTRL].dl_size[CH_A] =
dc->config_table.dl_ctrl_len - buff_offset;
offset = dc->base_addr + dc->config_table.ul_start;
/* Modem Port ul configuration */
dc->port[PORT_MDM].ul_addr[CH_A] = offset;
dc->port[PORT_MDM].ul_size[CH_A] =
dc->config_table.ul_mdm_len1 - buff_offset;
dc->port[PORT_MDM].ul_addr[CH_B] =
(offset += dc->config_table.ul_mdm_len1);
dc->port[PORT_MDM].ul_size[CH_B] =
dc->config_table.ul_mdm_len2 - buff_offset;
/* Diag port ul configuration */
dc->port[PORT_DIAG].ul_addr[CH_A] =
(offset += dc->config_table.ul_mdm_len2);
dc->port[PORT_DIAG].ul_size[CH_A] =
dc->config_table.ul_diag_len - buff_offset;
/* App1 port ul configuration */
dc->port[PORT_APP1].ul_addr[CH_A] =
(offset += dc->config_table.ul_diag_len);
dc->port[PORT_APP1].ul_size[CH_A] =
dc->config_table.ul_app1_len - buff_offset;
/* App2 port ul configuration */
dc->port[PORT_APP2].ul_addr[CH_A] =
(offset += dc->config_table.ul_app1_len);
dc->port[PORT_APP2].ul_size[CH_A] =
dc->config_table.ul_app2_len - buff_offset;
/* Ctrl ul configuration */
dc->port[PORT_CTRL].ul_addr[CH_A] =
(offset += dc->config_table.ul_app2_len);
dc->port[PORT_CTRL].ul_size[CH_A] =
dc->config_table.ul_ctrl_len - buff_offset;
}
/* Dump config table under initalization phase */
#ifdef DEBUG
static void dump_table(const struct nozomi *dc)
{
DBG3("signature: 0x%08X", dc->config_table.signature);
DBG3("version: 0x%04X", dc->config_table.version);
DBG3("product_information: 0x%04X", \
dc->config_table.product_information);
DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
dc->config_table.dl_mdm_len1);
DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
dc->config_table.dl_mdm_len2);
DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
dc->config_table.dl_diag_len1);
DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
dc->config_table.dl_diag_len2);
DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
dc->config_table.dl_app1_len);
DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
dc->config_table.dl_app2_len);
DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
dc->config_table.dl_ctrl_len);
DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
dc->config_table.ul_start);
DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
dc->config_table.ul_mdm_len1);
DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
dc->config_table.ul_mdm_len2);
DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
dc->config_table.ul_diag_len);
DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
dc->config_table.ul_app1_len);
DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
dc->config_table.ul_app2_len);
DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
dc->config_table.ul_ctrl_len);
}
#else
static inline void dump_table(const struct nozomi *dc) { }
#endif
/*
* Read configuration table from card under intalization phase
* Returns 1 if ok, else 0
*/
static int nozomi_read_config_table(struct nozomi *dc)
{
read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
sizeof(struct config_table));
if (dc->config_table.signature != NOZOMI_CONFIG_MAGIC) {
dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
dc->config_table.signature, NOZOMI_CONFIG_MAGIC);
return 0;
}
if ((dc->config_table.version == 0)
|| (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
int i;
DBG1("Second phase, configuring card");
nozomi_setup_memory(dc);
dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
dc->port[PORT_MDM].toggle_ul,
dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
dump_table(dc);
for (i = PORT_MDM; i < MAX_PORT; i++) {
memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
}
/* Enable control channel */
dc->last_ier = dc->last_ier | CTRL_DL;
writew(dc->last_ier, dc->reg_ier);
dc->state = NOZOMI_STATE_ALLOCATED;
dev_info(&dc->pdev->dev, "Initialization OK!\n");
return 1;
}
if ((dc->config_table.version > 0)
&& (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
u32 offset = 0;
DBG1("First phase: pushing upload buffers, clearing download");
dev_info(&dc->pdev->dev, "Version of card: %d\n",
dc->config_table.version);
/* Here we should disable all I/O over F32. */
nozomi_setup_memory(dc);
/*
* We should send ALL channel pair tokens back along
* with reset token
*/
/* push upload modem buffers */
write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
(u32 *) &offset, 4);
write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
(u32 *) &offset, 4);
writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
DBG1("First phase done");
}
return 1;
}
/* Enable uplink interrupts */
static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
{
static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
writew(dc->last_ier, dc->reg_ier);
} else {
dev_err(&dc->pdev->dev, "Called with wrong port?\n");
}
}
/* Disable uplink interrupts */
static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
{
static const u16 mask[] =
{~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
writew(dc->last_ier, dc->reg_ier);
} else {
dev_err(&dc->pdev->dev, "Called with wrong port?\n");
}
}
/* Enable downlink interrupts */
static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
{
static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
writew(dc->last_ier, dc->reg_ier);
} else {
dev_err(&dc->pdev->dev, "Called with wrong port?\n");
}
}
/* Disable downlink interrupts */
static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
{
static const u16 mask[] =
{~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
writew(dc->last_ier, dc->reg_ier);
} else {
dev_err(&dc->pdev->dev, "Called with wrong port?\n");
}
}
/*
* Return 1 - send buffer to card and ack.
* Return 0 - don't ack, don't send buffer to card.
*/
static int send_data(enum port_type index, struct nozomi *dc)
{
u32 size = 0;
struct port *port = &dc->port[index];
const u8 toggle = port->toggle_ul;
void __iomem *addr = port->ul_addr[toggle];
const u32 ul_size = port->ul_size[toggle];
/* Get data from tty and place in buf for now */
size = kfifo_out(&port->fifo_ul, dc->send_buf,
ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
if (size == 0) {
DBG4("No more data to send, disable link:");
return 0;
}
/* DUMP(buf, size); */
/* Write length + data */
write_mem32(addr, (u32 *) &size, 4);
write_mem32(addr + 4, (u32 *) dc->send_buf, size);
tty_port_tty_wakeup(&port->port);
return 1;
}
/* If all data has been read, return 1, else 0 */
static int receive_data(enum port_type index, struct nozomi *dc)
{
u8 buf[RECEIVE_BUF_MAX] = { 0 };
int size;
u32 offset = 4;
struct port *port = &dc->port[index];
void __iomem *addr = port->dl_addr[port->toggle_dl];
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
size = __le32_to_cpu(readl(addr));
if (tty && tty_throttled(tty)) {
DBG1("No room in tty, don't read data, don't ack interrupt, "
"disable interrupt");
/* disable interrupt in downlink... */
disable_transmit_dl(index, dc);
ret = 0;
goto put;
}
if (unlikely(size == 0)) {
dev_err(&dc->pdev->dev, "size == 0?\n");
ret = 1;
goto put;
}
while (size > 0) {
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
if (size == 1) {
tty_insert_flip_char(&port->port, buf[0], TTY_NORMAL);
size = 0;
} else if (size < RECEIVE_BUF_MAX) {
size -= tty_insert_flip_string(&port->port,
(char *)buf, size);
} else {
i = tty_insert_flip_string(&port->port,
(char *)buf, RECEIVE_BUF_MAX);
size -= i;
offset += i;
}
}
set_bit(index, &dc->flip);
ret = 1;
put:
tty_kref_put(tty);
return ret;
}
/* Debug for interrupts */
#ifdef DEBUG
static char *interrupt2str(u16 interrupt)
{
static char buf[TMP_BUF_MAX];
char *p = buf;
if (interrupt & MDM_DL1)
p += scnprintf(p, TMP_BUF_MAX, "MDM_DL1 ");
if (interrupt & MDM_DL2)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_DL2 ");
if (interrupt & MDM_UL1)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL1 ");
if (interrupt & MDM_UL2)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL2 ");
if (interrupt & DIAG_DL1)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL1 ");
if (interrupt & DIAG_DL2)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL2 ");
if (interrupt & DIAG_UL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_UL ");
if (interrupt & APP1_DL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP1_DL ");
if (interrupt & APP2_DL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP2_DL ");
if (interrupt & APP1_UL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP1_UL ");
if (interrupt & APP2_UL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP2_UL ");
if (interrupt & CTRL_DL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_DL ");
if (interrupt & CTRL_UL)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_UL ");
if (interrupt & RESET)
p += scnprintf(p, TMP_BUF_MAX - (p - buf), "RESET ");
return buf;
}
#endif
/*
* Receive flow control
* Return 1 - If ok, else 0
*/
static int receive_flow_control(struct nozomi *dc)
{
enum port_type port = PORT_MDM;
struct ctrl_dl ctrl_dl;
struct ctrl_dl old_ctrl;
u16 enable_ier = 0;
read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
switch (ctrl_dl.port) {
case CTRL_CMD:
DBG1("The Base Band sends this value as a response to a "
"request for IMSI detach sent over the control "
"channel uplink (see section 7.6.1).");
break;
case CTRL_MDM:
port = PORT_MDM;
enable_ier = MDM_DL;
break;
case CTRL_DIAG:
port = PORT_DIAG;
enable_ier = DIAG_DL;
break;
case CTRL_APP1:
port = PORT_APP1;
enable_ier = APP1_DL;
break;
case CTRL_APP2:
port = PORT_APP2;
enable_ier = APP2_DL;
if (dc->state == NOZOMI_STATE_ALLOCATED) {
/*
* After card initialization the flow control
* received for APP2 is always the last
*/
dc->state = NOZOMI_STATE_READY;
dev_info(&dc->pdev->dev, "Device READY!\n");
}
break;
default:
dev_err(&dc->pdev->dev,
"ERROR: flow control received for non-existing port\n");
return 0;
}
DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
*((u16 *)&ctrl_dl));
old_ctrl = dc->port[port].ctrl_dl;
dc->port[port].ctrl_dl = ctrl_dl;
if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
DBG1("Disable interrupt (0x%04X) on port: %d",
enable_ier, port);
disable_transmit_ul(port, dc);
} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
if (kfifo_len(&dc->port[port].fifo_ul)) {
DBG1("Enable interrupt (0x%04X) on port: %d",
enable_ier, port);
DBG1("Data in buffer [%d], enable transmit! ",
kfifo_len(&dc->port[port].fifo_ul));
enable_transmit_ul(port, dc);
} else {
DBG1("No data in buffer...");
}
}
if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
DBG1(" No change in mctrl");
return 1;
}
/* Update statistics */
if (old_ctrl.CTS != ctrl_dl.CTS)
dc->port[port].tty_icount.cts++;
if (old_ctrl.DSR != ctrl_dl.DSR)
dc->port[port].tty_icount.dsr++;
if (old_ctrl.RI != ctrl_dl.RI)
dc->port[port].tty_icount.rng++;
if (old_ctrl.DCD != ctrl_dl.DCD)
dc->port[port].tty_icount.dcd++;
wake_up_interruptible(&dc->port[port].tty_wait);
DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
port,
dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
return 1;
}
static enum ctrl_port_type port2ctrl(enum port_type port,
const struct nozomi *dc)
{
switch (port) {
case PORT_MDM:
return CTRL_MDM;
case PORT_DIAG:
return CTRL_DIAG;
case PORT_APP1:
return CTRL_APP1;
case PORT_APP2:
return CTRL_APP2;
default:
dev_err(&dc->pdev->dev,
"ERROR: send flow control " \
"received for non-existing port\n");
}
return CTRL_ERROR;
}
/*
* Send flow control, can only update one channel at a time
* Return 0 - If we have updated all flow control
* Return 1 - If we need to update more flow control, ack current enable more
*/
static int send_flow_control(struct nozomi *dc)
{
u32 i, more_flow_control_to_be_updated = 0;
u16 *ctrl;
for (i = PORT_MDM; i < MAX_PORT; i++) {
if (dc->port[i].update_flow_control) {
if (more_flow_control_to_be_updated) {
/* We have more flow control to be updated */
return 1;
}
dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
ctrl = (u16 *)&dc->port[i].ctrl_ul;
write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
(u32 *) ctrl, 2);
dc->port[i].update_flow_control = 0;
more_flow_control_to_be_updated = 1;
}
}
return 0;
}
/*
* Handle downlink data, ports that are handled are modem and diagnostics
* Return 1 - ok
* Return 0 - toggle fields are out of sync
*/
static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
u16 read_iir, u16 mask1, u16 mask2)
{
if (*toggle == 0 && read_iir & mask1) {
if (receive_data(port, dc)) {
writew(mask1, dc->reg_fcr);
*toggle = !(*toggle);
}
if (read_iir & mask2) {
if (receive_data(port, dc)) {
writew(mask2, dc->reg_fcr);
*toggle = !(*toggle);
}
}
} else if (*toggle == 1 && read_iir & mask2) {
if (receive_data(port, dc)) {
writew(mask2, dc->reg_fcr);
*toggle = !(*toggle);
}
if (read_iir & mask1) {
if (receive_data(port, dc)) {
writew(mask1, dc->reg_fcr);
*toggle = !(*toggle);
}
}
} else {
dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
*toggle);
return 0;
}
return 1;
}
/*
* Handle uplink data, this is currently for the modem port
* Return 1 - ok
* Return 0 - toggle field are out of sync
*/
static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
{
u8 *toggle = &(dc->port[port].toggle_ul);
if (*toggle == 0 && read_iir & MDM_UL1) {
dc->last_ier &= ~MDM_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(port, dc)) {
writew(MDM_UL1, dc->reg_fcr);
dc->last_ier = dc->last_ier | MDM_UL;
writew(dc->last_ier, dc->reg_ier);
*toggle = !*toggle;
}
if (read_iir & MDM_UL2) {
dc->last_ier &= ~MDM_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(port, dc)) {
writew(MDM_UL2, dc->reg_fcr);
dc->last_ier = dc->last_ier | MDM_UL;
writew(dc->last_ier, dc->reg_ier);
*toggle = !*toggle;
}
}
} else if (*toggle == 1 && read_iir & MDM_UL2) {
dc->last_ier &= ~MDM_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(port, dc)) {
writew(MDM_UL2, dc->reg_fcr);
dc->last_ier = dc->last_ier | MDM_UL;
writew(dc->last_ier, dc->reg_ier);
*toggle = !*toggle;
}
if (read_iir & MDM_UL1) {
dc->last_ier &= ~MDM_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(port, dc)) {
writew(MDM_UL1, dc->reg_fcr);
dc->last_ier = dc->last_ier | MDM_UL;
writew(dc->last_ier, dc->reg_ier);
*toggle = !*toggle;
}
}
} else {
writew(read_iir & MDM_UL, dc->reg_fcr);
dev_err(&dc->pdev->dev, "port out of sync!\n");
return 0;
}
return 1;
}
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
struct nozomi *dc = dev_id;
unsigned int a;
u16 read_iir;
if (!dc)
return IRQ_NONE;
spin_lock(&dc->spin_mutex);
read_iir = readw(dc->reg_iir);
/* Card removed */
if (read_iir == (u16)-1)
goto none;
/*
* Just handle interrupt enabled in IER
* (by masking with dc->last_ier)
*/
read_iir &= dc->last_ier;
if (read_iir == 0)
goto none;
DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
dc->last_ier);
if (read_iir & RESET) {
if (unlikely(!nozomi_read_config_table(dc))) {
dc->last_ier = 0x0;
writew(dc->last_ier, dc->reg_ier);
dev_err(&dc->pdev->dev, "Could not read status from "
"card, we should disable interface\n");
} else {
writew(RESET, dc->reg_fcr);
}
/* No more useful info if this was the reset interrupt. */
goto exit_handler;
}
if (read_iir & CTRL_UL) {
DBG1("CTRL_UL");
dc->last_ier &= ~CTRL_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_flow_control(dc)) {
writew(CTRL_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | CTRL_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & CTRL_DL) {
receive_flow_control(dc);
writew(CTRL_DL, dc->reg_fcr);
}
if (read_iir & MDM_DL) {
if (!handle_data_dl(dc, PORT_MDM,
&(dc->port[PORT_MDM].toggle_dl), read_iir,
MDM_DL1, MDM_DL2)) {
dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & MDM_UL) {
if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & DIAG_DL) {
if (!handle_data_dl(dc, PORT_DIAG,
&(dc->port[PORT_DIAG].toggle_dl), read_iir,
DIAG_DL1, DIAG_DL2)) {
dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
goto exit_handler;
}
}
if (read_iir & DIAG_UL) {
dc->last_ier &= ~DIAG_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(PORT_DIAG, dc)) {
writew(DIAG_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | DIAG_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & APP1_DL) {
if (receive_data(PORT_APP1, dc))
writew(APP1_DL, dc->reg_fcr);
}
if (read_iir & APP1_UL) {
dc->last_ier &= ~APP1_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(PORT_APP1, dc)) {
writew(APP1_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | APP1_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
if (read_iir & APP2_DL) {
if (receive_data(PORT_APP2, dc))
writew(APP2_DL, dc->reg_fcr);
}
if (read_iir & APP2_UL) {
dc->last_ier &= ~APP2_UL;
writew(dc->last_ier, dc->reg_ier);
if (send_data(PORT_APP2, dc)) {
writew(APP2_UL, dc->reg_fcr);
dc->last_ier = dc->last_ier | APP2_UL;
writew(dc->last_ier, dc->reg_ier);
}
}
exit_handler:
spin_unlock(&dc->spin_mutex);
for (a = 0; a < NOZOMI_MAX_PORTS; a++)
if (test_and_clear_bit(a, &dc->flip))
tty_flip_buffer_push(&dc->port[a].port);
return IRQ_HANDLED;
none:
spin_unlock(&dc->spin_mutex);
return IRQ_NONE;
}
static void nozomi_get_card_type(struct nozomi *dc)
{
int i;
u32 size = 0;
for (i = 0; i < 6; i++)
size += pci_resource_len(dc->pdev, i);
/* Assume card type F32_8 if no match */
dc->card_type = size == 2048 ? F32_2 : F32_8;
dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
}
static void nozomi_setup_private_data(struct nozomi *dc)
{
void __iomem *offset = dc->base_addr + dc->card_type / 2;
unsigned int i;
dc->reg_fcr = (void __iomem *)(offset + R_FCR);
dc->reg_iir = (void __iomem *)(offset + R_IIR);
dc->reg_ier = (void __iomem *)(offset + R_IER);
dc->last_ier = 0;
dc->flip = 0;
dc->port[PORT_MDM].token_dl = MDM_DL;
dc->port[PORT_DIAG].token_dl = DIAG_DL;
dc->port[PORT_APP1].token_dl = APP1_DL;
dc->port[PORT_APP2].token_dl = APP2_DL;
for (i = 0; i < MAX_PORT; i++)
init_waitqueue_head(&dc->port[i].tty_wait);
}
static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct nozomi *dc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dc->card_type);
}
static DEVICE_ATTR_RO(card_type);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct nozomi *dc = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dc->open_ttys);
}
static DEVICE_ATTR_RO(open_ttys);
static void make_sysfs_files(struct nozomi *dc)
{
if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
dev_err(&dc->pdev->dev,
"Could not create sysfs file for card_type\n");
if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
dev_err(&dc->pdev->dev,
"Could not create sysfs file for open_ttys\n");
}
static void remove_sysfs_files(struct nozomi *dc)
{
device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
}
/* Allocate memory for one device */
static int nozomi_card_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
struct nozomi *dc = NULL;
int ndev_idx;
int i;
for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
if (!ndevs[ndev_idx])
break;
if (ndev_idx >= ARRAY_SIZE(ndevs)) {
dev_err(&pdev->dev, "no free tty range for this card left\n");
ret = -EIO;
goto err;
}
dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
if (unlikely(!dc)) {
dev_err(&pdev->dev, "Could not allocate memory\n");
ret = -ENOMEM;
goto err_free;
}
dc->pdev = pdev;
ret = pci_enable_device(dc->pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to enable PCI Device\n");
goto err_free;
}
ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
if (ret) {
dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
(int) /* nozomi_private.io_addr */ 0);
goto err_disable_device;
}
/* Find out what card type it is */
nozomi_get_card_type(dc);
dc->base_addr = pci_iomap(dc->pdev, 0, dc->card_type);
if (!dc->base_addr) {
dev_err(&pdev->dev, "Unable to map card MMIO\n");
ret = -ENODEV;
goto err_rel_regs;
}
dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
if (!dc->send_buf) {
dev_err(&pdev->dev, "Could not allocate send buffer?\n");
ret = -ENOMEM;
goto err_free_sbuf;
}
for (i = PORT_MDM; i < MAX_PORT; i++) {
if (kfifo_alloc(&dc->port[i].fifo_ul, FIFO_BUFFER_SIZE_UL,
GFP_KERNEL)) {
dev_err(&pdev->dev,
"Could not allocate kfifo buffer\n");
ret = -ENOMEM;
goto err_free_kfifo;
}
}
spin_lock_init(&dc->spin_mutex);
nozomi_setup_private_data(dc);
/* Disable all interrupts */
dc->last_ier = 0;
writew(dc->last_ier, dc->reg_ier);
ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
NOZOMI_NAME, dc);
if (unlikely(ret)) {
dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
goto err_free_all_kfifo;
}
DBG1("base_addr: %p", dc->base_addr);
make_sysfs_files(dc);
dc->index_start = ndev_idx * MAX_PORT;
ndevs[ndev_idx] = dc;
pci_set_drvdata(pdev, dc);
/* Enable RESET interrupt */
dc->last_ier = RESET;
iowrite16(dc->last_ier, dc->reg_ier);
dc->state = NOZOMI_STATE_ENABLED;
for (i = 0; i < MAX_PORT; i++) {
struct device *tty_dev;
struct port *port = &dc->port[i];
port->dc = dc;
tty_port_init(&port->port);
port->port.ops = &noz_tty_port_ops;
tty_dev = tty_port_register_device(&port->port, ntty_driver,
dc->index_start + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
ret = PTR_ERR(tty_dev);
dev_err(&pdev->dev, "Could not allocate tty?\n");
tty_port_destroy(&port->port);
goto err_free_tty;
}
}
return 0;
err_free_tty:
for (i--; i >= 0; i--) {
tty_unregister_device(ntty_driver, dc->index_start + i);
tty_port_destroy(&dc->port[i].port);
}
free_irq(pdev->irq, dc);
err_free_all_kfifo:
i = MAX_PORT;
err_free_kfifo:
for (i--; i >= PORT_MDM; i--)
kfifo_free(&dc->port[i].fifo_ul);
err_free_sbuf:
kfree(dc->send_buf);
iounmap(dc->base_addr);
err_rel_regs:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
err_free:
kfree(dc);
err:
return ret;
}
static void tty_exit(struct nozomi *dc)
{
unsigned int i;
for (i = 0; i < MAX_PORT; ++i)
tty_port_tty_hangup(&dc->port[i].port, false);
/* Racy below - surely should wait for scheduled work to be done or
complete off a hangup method ? */
while (dc->open_ttys)
msleep(1);
for (i = 0; i < MAX_PORT; ++i) {
tty_unregister_device(ntty_driver, dc->index_start + i);
tty_port_destroy(&dc->port[i].port);
}
}
/* Deallocate memory for one device */
static void nozomi_card_exit(struct pci_dev *pdev)
{
int i;
struct ctrl_ul ctrl;
struct nozomi *dc = pci_get_drvdata(pdev);
/* Disable all interrupts */
dc->last_ier = 0;
writew(dc->last_ier, dc->reg_ier);
tty_exit(dc);
/* Send 0x0001, command card to resend the reset token. */
/* This is to get the reset when the module is reloaded. */
ctrl.port = 0x00;
ctrl.reserved = 0;
ctrl.RTS = 0;
ctrl.DTR = 1;
DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
/* Setup dc->reg addresses to we can use defines here */
write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */
remove_sysfs_files(dc);
free_irq(pdev->irq, dc);
for (i = 0; i < MAX_PORT; i++)
kfifo_free(&dc->port[i].fifo_ul);
kfree(dc->send_buf);
iounmap(dc->base_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
ndevs[dc->index_start / MAX_PORT] = NULL;
kfree(dc);
}
static void set_rts(const struct tty_struct *tty, int rts)
{
struct port *port = get_port_by_tty(tty);
port->ctrl_ul.RTS = rts;
port->update_flow_control = 1;
enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
}
static void set_dtr(const struct tty_struct *tty, int dtr)
{
struct port *port = get_port_by_tty(tty);
DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
port->ctrl_ul.DTR = dtr;
port->update_flow_control = 1;
enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
}
/*
* ----------------------------------------------------------------------------
* TTY code
* ----------------------------------------------------------------------------
*/
static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct port *port = get_port_by_tty(tty);
struct nozomi *dc = get_dc_by_tty(tty);
int ret;
if (!port || !dc || dc->state != NOZOMI_STATE_READY)
return -ENODEV;
ret = tty_standard_install(driver, tty);
if (ret == 0)
tty->driver_data = port;
return ret;
}
static void ntty_cleanup(struct tty_struct *tty)
{
tty->driver_data = NULL;
}
static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct port *port = container_of(tport, struct port, port);
struct nozomi *dc = port->dc;
unsigned long flags;
DBG1("open: %d", port->token_dl);
spin_lock_irqsave(&dc->spin_mutex, flags);
dc->last_ier = dc->last_ier | port->token_dl;
writew(dc->last_ier, dc->reg_ier);
dc->open_ttys++;
spin_unlock_irqrestore(&dc->spin_mutex, flags);
printk("noz: activated %d: %p\n", tty->index, tport);
return 0;
}
static int ntty_open(struct tty_struct *tty, struct file *filp)
{
struct port *port = tty->driver_data;
return tty_port_open(&port->port, tty, filp);
}
static void ntty_shutdown(struct tty_port *tport)
{
struct port *port = container_of(tport, struct port, port);
struct nozomi *dc = port->dc;
unsigned long flags;
DBG1("close: %d", port->token_dl);
spin_lock_irqsave(&dc->spin_mutex, flags);
dc->last_ier &= ~(port->token_dl);
writew(dc->last_ier, dc->reg_ier);
dc->open_ttys--;
spin_unlock_irqrestore(&dc->spin_mutex, flags);
printk("noz: shutdown %p\n", tport);
}
static void ntty_close(struct tty_struct *tty, struct file *filp)
{
struct port *port = tty->driver_data;
if (port)
tty_port_close(&port->port, tty, filp);
}
static void ntty_hangup(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
tty_port_hangup(&port->port);
}
/*
* called when the userspace process writes to the tty (/dev/noz*).
* Data is inserted into a fifo, which is then read and transferred to the modem.
*/
static ssize_t ntty_write(struct tty_struct *tty, const u8 *buffer,
size_t count)
{
int rval = -EINVAL;
struct nozomi *dc = get_dc_by_tty(tty);
struct port *port = tty->driver_data;
unsigned long flags;
if (!dc || !port)
return -ENODEV;
rval = kfifo_in(&port->fifo_ul, buffer, count);
spin_lock_irqsave(&dc->spin_mutex, flags);
/* CTS is only valid on the modem channel */
if (port == &(dc->port[PORT_MDM])) {
if (port->ctrl_dl.CTS) {
DBG4("Enable interrupt");
enable_transmit_ul(tty->index % MAX_PORT, dc);
} else {
dev_err(&dc->pdev->dev,
"CTS not active on modem port?\n");
}
} else {
enable_transmit_ul(tty->index % MAX_PORT, dc);
}
spin_unlock_irqrestore(&dc->spin_mutex, flags);
return rval;
}
/*
* Calculate how much is left in device
* This method is called by the upper tty layer.
* #according to sources N_TTY.c it expects a value >= 0 and
* does not check for negative values.
*
* If the port is unplugged report lots of room and let the bits
* dribble away so we don't block anything.
*/
static unsigned int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
unsigned int room = 4096;
const struct nozomi *dc = get_dc_by_tty(tty);
if (dc)
room = kfifo_avail(&port->fifo_ul);
return room;
}
/* Gets io control parameters */
static int ntty_tiocmget(struct tty_struct *tty)
{
const struct port *port = tty->driver_data;
const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
/* Note: these could change under us but it is not clear this
matters if so */
return (ctrl_ul->RTS ? TIOCM_RTS : 0)
| (ctrl_ul->DTR ? TIOCM_DTR : 0)
| (ctrl_dl->DCD ? TIOCM_CAR : 0)
| (ctrl_dl->RI ? TIOCM_RNG : 0)
| (ctrl_dl->DSR ? TIOCM_DSR : 0)
| (ctrl_dl->CTS ? TIOCM_CTS : 0);
}
/* Sets io controls parameters */
static int ntty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct nozomi *dc = get_dc_by_tty(tty);
unsigned long flags;
spin_lock_irqsave(&dc->spin_mutex, flags);
if (set & TIOCM_RTS)
set_rts(tty, 1);
else if (clear & TIOCM_RTS)
set_rts(tty, 0);
if (set & TIOCM_DTR)
set_dtr(tty, 1);
else if (clear & TIOCM_DTR)
set_dtr(tty, 0);
spin_unlock_irqrestore(&dc->spin_mutex, flags);
return 0;
}
static int ntty_cflags_changed(struct port *port, unsigned long flags,
struct async_icount *cprev)
{
const struct async_icount cnow = port->tty_icount;
int ret;
ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng))
|| ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr))
|| ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd))
|| ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
*cprev = cnow;
return ret;
}
static int ntty_tiocgicount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct port *port = tty->driver_data;
const struct async_icount cnow = port->tty_icount;
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
static int ntty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct port *port = tty->driver_data;
int rval = -ENOIOCTLCMD;
switch (cmd) {
case TIOCMIWAIT: {
struct async_icount cprev = port->tty_icount;
rval = wait_event_interruptible(port->tty_wait,
ntty_cflags_changed(port, arg, &cprev));
break;
}
default:
DBG1("ERR: 0x%08X, %d", cmd, cmd);
break;
}
return rval;
}
/*
* Called by the upper tty layer when tty buffers are ready
* to receive data again after a call to throttle.
*/
static void ntty_unthrottle(struct tty_struct *tty)
{
struct nozomi *dc = get_dc_by_tty(tty);
unsigned long flags;
spin_lock_irqsave(&dc->spin_mutex, flags);
enable_transmit_dl(tty->index % MAX_PORT, dc);
set_rts(tty, 1);
spin_unlock_irqrestore(&dc->spin_mutex, flags);
}
/*
* Called by the upper tty layer when the tty buffers are almost full.
* The driver should stop send more data.
*/
static void ntty_throttle(struct tty_struct *tty)
{
struct nozomi *dc = get_dc_by_tty(tty);
unsigned long flags;
spin_lock_irqsave(&dc->spin_mutex, flags);
set_rts(tty, 0);
spin_unlock_irqrestore(&dc->spin_mutex, flags);
}
/* Returns number of chars in buffer, called by tty layer */
static unsigned int ntty_chars_in_buffer(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
struct nozomi *dc = get_dc_by_tty(tty);
if (unlikely(!dc || !port))
return 0;
return kfifo_len(&port->fifo_ul);
}
static const struct tty_port_operations noz_tty_port_ops = {
.activate = ntty_activate,
.shutdown = ntty_shutdown,
};
static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
.hangup = ntty_hangup,
.write = ntty_write,
.write_room = ntty_write_room,
.unthrottle = ntty_unthrottle,
.throttle = ntty_throttle,
.chars_in_buffer = ntty_chars_in_buffer,
.tiocmget = ntty_tiocmget,
.tiocmset = ntty_tiocmset,
.get_icount = ntty_tiocgicount,
.install = ntty_install,
.cleanup = ntty_cleanup,
};
/* Module initialization */
static struct pci_driver nozomi_driver = {
.name = NOZOMI_NAME,
.id_table = nozomi_pci_tbl,
.probe = nozomi_card_init,
.remove = nozomi_card_exit,
};
static __init int nozomi_init(void)
{
int ret;
ntty_driver = tty_alloc_driver(NTTY_TTY_MAXMINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(ntty_driver))
return PTR_ERR(ntty_driver);
ntty_driver->driver_name = NOZOMI_NAME_TTY;
ntty_driver->name = "noz";
ntty_driver->major = 0;
ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
ntty_driver->subtype = SERIAL_TYPE_NORMAL;
ntty_driver->init_termios = tty_std_termios;
ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
HUPCL | CLOCAL;
ntty_driver->init_termios.c_ispeed = 115200;
ntty_driver->init_termios.c_ospeed = 115200;
tty_set_operations(ntty_driver, &tty_ops);
ret = tty_register_driver(ntty_driver);
if (ret) {
printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
goto free_tty;
}
ret = pci_register_driver(&nozomi_driver);
if (ret) {
printk(KERN_ERR "Nozomi: can't register pci driver\n");
goto unr_tty;
}
return 0;
unr_tty:
tty_unregister_driver(ntty_driver);
free_tty:
tty_driver_kref_put(ntty_driver);
return ret;
}
static __exit void nozomi_exit(void)
{
pci_unregister_driver(&nozomi_driver);
tty_unregister_driver(ntty_driver);
tty_driver_kref_put(ntty_driver);
}
module_init(nozomi_init);
module_exit(nozomi_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Nozomi driver");
| linux-master | drivers/tty/nozomi.c |
// SPDX-License-Identifier: GPL-1.0+
/* generic HDLC line discipline for Linux
*
* Written by Paul Fulghum [email protected]
* for Microgate Corporation
*
* Microgate and SyncLink are registered trademarks of Microgate Corporation
*
* Adapted from ppp.c, written by Michael Callahan <[email protected]>,
* Al Longyear <[email protected]>,
* Paul Mackerras <[email protected]>
*
* Original release 01/11/99
*
* This module implements the tty line discipline N_HDLC for use with
* tty device drivers that support bit-synchronous HDLC communications.
*
* All HDLC data is frame oriented which means:
*
* 1. tty write calls represent one complete transmit frame of data
* The device driver should accept the complete frame or none of
* the frame (busy) in the write method. Each write call should have
* a byte count in the range of 2-65535 bytes (2 is min HDLC frame
* with 1 addr byte and 1 ctrl byte). The max byte count of 65535
* should include any crc bytes required. For example, when using
* CCITT CRC32, 4 crc bytes are required, so the maximum size frame
* the application may transmit is limited to 65531 bytes. For CCITT
* CRC16, the maximum application frame size would be 65533.
*
*
* 2. receive callbacks from the device driver represents
* one received frame. The device driver should bypass
* the tty flip buffer and call the line discipline receive
* callback directly to avoid fragmenting or concatenating
* multiple frames into a single receive callback.
*
* The HDLC line discipline queues the receive frames in separate
* buffers so complete receive frames can be returned by the
* tty read calls.
*
* 3. tty read calls returns an entire frame of data or nothing.
*
* 4. all send and receive data is considered raw. No processing
* or translation is performed by the line discipline, regardless
* of the tty flags
*
* 5. When line discipline is queried for the amount of receive
* data available (FIOC), 0 is returned if no data available,
* otherwise the count of the next available frame is returned.
* (instead of the sum of all received frame counts).
*
* These conventions allow the standard tty programming interface
* to be used for synchronous HDLC applications when used with
* this line discipline (or another line discipline that is frame
* oriented such as N_PPP).
*
* The SyncLink driver (synclink.c) implements both asynchronous
* (using standard line discipline N_TTY) and synchronous HDLC
* (using N_HDLC) communications, with the latter using the above
* conventions.
*
* This implementation is very basic and does not maintain
* any statistics. The main point is to enforce the raw data
* and frame orientation of HDLC communications.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/poll.h>
#include <linux/in.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h> /* used in new tty drivers */
#include <linux/signal.h> /* used in new tty drivers */
#include <linux/if.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include "tty.h"
/*
* Buffers for individual HDLC frames
*/
#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
struct list_head list_item;
int count;
char buf[];
};
struct n_hdlc_buf_list {
struct list_head list;
int count;
spinlock_t spinlock;
};
/**
* struct n_hdlc - per device instance data structure
* @tbusy: reentrancy flag for tx wakeup code
* @woke_up: tx wakeup needs to be run again as it was called while @tbusy
* @tx_buf_list: list of pending transmit frame buffers
* @rx_buf_list: list of received frame buffers
* @tx_free_buf_list: list unused transmit frame buffers
* @rx_free_buf_list: list unused received frame buffers
*/
struct n_hdlc {
bool tbusy;
bool woke_up;
struct n_hdlc_buf_list tx_buf_list;
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
struct n_hdlc_buf_list rx_free_buf_list;
struct work_struct write_work;
struct tty_struct *tty_for_write_work;
};
/*
* HDLC buffer list manipulation functions
*/
static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
/* Local functions */
static struct n_hdlc *n_hdlc_alloc(void);
static void n_hdlc_tty_write_work(struct work_struct *work);
/* max frame size for memory allocations */
static int maxframe = 4096;
static void flush_rx_queue(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
struct n_hdlc_buf *buf;
while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
}
static void flush_tx_queue(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
struct n_hdlc_buf *buf;
while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
}
static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list)
{
struct n_hdlc_buf *buf;
do {
buf = n_hdlc_buf_get(list);
kfree(buf);
} while (buf);
}
/**
* n_hdlc_tty_close - line discipline close
* @tty: pointer to tty info structure
*
* Called when the line discipline is changed to something
* else, the tty is closed, or the tty detects a hangup.
*/
static void n_hdlc_tty_close(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
#if defined(TTY_NO_WRITE_SPLIT)
clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
#endif
tty->disc_data = NULL;
/* Ensure that the n_hdlcd process is not hanging on select()/poll() */
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
cancel_work_sync(&n_hdlc->write_work);
n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
n_hdlc_free_buf_list(&n_hdlc->tx_buf_list);
kfree(n_hdlc);
} /* end of n_hdlc_tty_close() */
/**
* n_hdlc_tty_open - called when line discipline changed to n_hdlc
* @tty: pointer to tty info structure
*
* Returns 0 if success, otherwise error code
*/
static int n_hdlc_tty_open(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
pr_debug("%s() called (device=%s)\n", __func__, tty->name);
/* There should not be an existing table for this slot. */
if (n_hdlc) {
pr_err("%s: tty already associated!\n", __func__);
return -EEXIST;
}
n_hdlc = n_hdlc_alloc();
if (!n_hdlc) {
pr_err("%s: n_hdlc_alloc failed\n", __func__);
return -ENFILE;
}
INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work);
n_hdlc->tty_for_write_work = tty;
tty->disc_data = n_hdlc;
tty->receive_room = 65536;
/* change tty_io write() to not split large writes into 8K chunks */
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
/* flush receive data from driver */
tty_driver_flush_buffer(tty);
return 0;
} /* end of n_tty_hdlc_open() */
/**
* n_hdlc_send_frames - send frames on pending send buffer list
* @n_hdlc: pointer to ldisc instance data
* @tty: pointer to tty instance data
*
* Send frames on pending send buffer list until the driver does not accept a
* frame (busy) this function is called after adding a frame to the send buffer
* list and by the tty wakeup callback.
*/
static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
{
register int actual;
unsigned long flags;
struct n_hdlc_buf *tbuf;
check_again:
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
if (n_hdlc->tbusy) {
n_hdlc->woke_up = true;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
return;
}
n_hdlc->tbusy = true;
n_hdlc->woke_up = false;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count);
/* Send the next block of data to device */
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
/* rollback was possible and has been done */
if (actual == -ERESTARTSYS) {
n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
/* if transmit error, throw frame away by */
/* pretending it was accepted by driver */
if (actual < 0)
actual = tbuf->count;
if (actual == tbuf->count) {
pr_debug("frame %p completed\n", tbuf);
/* free current transmit buffer */
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
/* wait up sleeping writers */
wake_up_interruptible(&tty->write_wait);
/* get next pending transmit buffer */
tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
} else {
pr_debug("frame %p pending\n", tbuf);
/*
* the buffer was not accepted by driver,
* return it back into tx queue
*/
n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
}
if (!tbuf)
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/* Clear the re-entry flag */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
n_hdlc->tbusy = false;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
if (n_hdlc->woke_up)
goto check_again;
} /* end of n_hdlc_send_frames() */
/**
* n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup
* @work: pointer to work_struct
*
* Called when low level device driver can accept more send data.
*/
static void n_hdlc_tty_write_work(struct work_struct *work)
{
struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
struct tty_struct *tty = n_hdlc->tty_for_write_work;
n_hdlc_send_frames(n_hdlc, tty);
} /* end of n_hdlc_tty_write_work() */
/**
* n_hdlc_tty_wakeup - Callback for transmit wakeup
* @tty: pointer to associated tty instance data
*
* Called when low level device driver can accept more send data.
*/
static void n_hdlc_tty_wakeup(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
schedule_work(&n_hdlc->write_work);
} /* end of n_hdlc_tty_wakeup() */
/**
* n_hdlc_tty_receive - Called by tty driver when receive data is available
* @tty: pointer to tty instance data
* @data: pointer to received data
* @flags: pointer to flags for data
* @count: count of received data in bytes
*
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
static void n_hdlc_tty_receive(struct tty_struct *tty, const u8 *data,
const u8 *flags, size_t count)
{
register struct n_hdlc *n_hdlc = tty->disc_data;
register struct n_hdlc_buf *buf;
pr_debug("%s() called count=%zu\n", __func__, count);
if (count > maxframe) {
pr_debug("rx count>maxframesize, data discarded\n");
return;
}
/* get a free HDLC buffer */
buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
if (!buf) {
/*
* no buffers in free list, attempt to allocate another rx
* buffer unless the maximum count has been reached
*/
if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
buf = kmalloc(struct_size(buf, buf, maxframe),
GFP_ATOMIC);
}
if (!buf) {
pr_debug("no more rx buffers, data discarded\n");
return;
}
/* copy received data to HDLC buffer */
memcpy(buf->buf, data, count);
buf->count = count;
/* add HDLC buffer to list of received frames */
n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
/* wake up any blocked reads and perform async signalling */
wake_up_interruptible(&tty->read_wait);
if (tty->fasync != NULL)
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
} /* end of n_hdlc_tty_receive() */
/**
* n_hdlc_tty_read - Called to retrieve one frame of data (if available)
* @tty: pointer to tty instance data
* @file: pointer to open file object
* @kbuf: pointer to returned data buffer
* @nr: size of returned data buffer
* @cookie: stored rbuf from previous run
* @offset: offset into the data buffer
*
* Returns the number of bytes returned or error code.
*/
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
u8 *kbuf, size_t nr, void **cookie,
unsigned long offset)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int ret = 0;
struct n_hdlc_buf *rbuf;
DECLARE_WAITQUEUE(wait, current);
/* Is this a repeated call for an rbuf we already found earlier? */
rbuf = *cookie;
if (rbuf)
goto have_rbuf;
add_wait_queue(&tty->read_wait, &wait);
for (;;) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
ret = -EIO;
break;
}
if (tty_hung_up_p(file))
break;
set_current_state(TASK_INTERRUPTIBLE);
rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
if (rbuf)
break;
/* no data */
if (tty_io_nonblock(tty, file)) {
ret = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
remove_wait_queue(&tty->read_wait, &wait);
__set_current_state(TASK_RUNNING);
if (!rbuf)
return ret;
*cookie = rbuf;
have_rbuf:
/* Have we used it up entirely? */
if (offset >= rbuf->count)
goto done_with_rbuf;
/* More data to go, but can't copy any more? EOVERFLOW */
ret = -EOVERFLOW;
if (!nr)
goto done_with_rbuf;
/* Copy as much data as possible */
ret = rbuf->count - offset;
if (ret > nr)
ret = nr;
memcpy(kbuf, rbuf->buf+offset, ret);
offset += ret;
/* If we still have data left, we leave the rbuf in the cookie */
if (offset < rbuf->count)
return ret;
done_with_rbuf:
*cookie = NULL;
if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
kfree(rbuf);
else
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
return ret;
} /* end of n_hdlc_tty_read() */
/**
* n_hdlc_tty_write - write a single frame of data to device
* @tty: pointer to associated tty device instance data
* @file: pointer to file object data
* @data: pointer to transmit data (one frame)
* @count: size of transmit frame in bytes
*
* Returns the number of bytes written (or error code).
*/
static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const u8 *data, size_t count)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
DECLARE_WAITQUEUE(wait, current);
struct n_hdlc_buf *tbuf;
pr_debug("%s() called count=%zd\n", __func__, count);
/* verify frame size */
if (count > maxframe) {
pr_debug("%s: truncating user packet from %zu to %d\n",
__func__, count, maxframe);
count = maxframe;
}
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
if (tbuf)
break;
if (tty_io_nonblock(tty, file)) {
error = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
error = -EINTR;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (!error) {
/* Retrieve the user's buffer */
memcpy(tbuf->buf, data, count);
/* Send the data */
tbuf->count = error = count;
n_hdlc_buf_put(&n_hdlc->tx_buf_list, tbuf);
n_hdlc_send_frames(n_hdlc, tty);
}
return error;
} /* end of n_hdlc_tty_write() */
/**
* n_hdlc_tty_ioctl - process IOCTL system call for the tty device.
* @tty: pointer to tty instance data
* @cmd: IOCTL command code
* @arg: argument for IOCTL call (cmd dependent)
*
* Returns command dependent result.
*/
static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
int count;
unsigned long flags;
struct n_hdlc_buf *buf = NULL;
pr_debug("%s() called %d\n", __func__, cmd);
switch (cmd) {
case FIONREAD:
/* report count of read data available */
/* in next available frame (if any) */
spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock, flags);
buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
struct n_hdlc_buf, list_item);
if (buf)
count = buf->count;
else
count = 0;
spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock, flags);
error = put_user(count, (int __user *)arg);
break;
case TIOCOUTQ:
/* get the pending tx byte count in the driver */
count = tty_chars_in_buffer(tty);
/* add size of next output frame in queue */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
struct n_hdlc_buf, list_item);
if (buf)
count += buf->count;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
error = put_user(count, (int __user *)arg);
break;
case TCFLSH:
switch (arg) {
case TCIOFLUSH:
case TCOFLUSH:
flush_tx_queue(tty);
}
fallthrough; /* to default */
default:
error = n_tty_ioctl_helper(tty, cmd, arg);
break;
}
return error;
} /* end of n_hdlc_tty_ioctl() */
/**
* n_hdlc_tty_poll - TTY callback for poll system call
* @tty: pointer to tty instance data
* @filp: pointer to open file object for device
* @wait: wait queue for operations
*
* Determine which operations (read/write) will not block and return info
* to caller.
* Returns a bit mask containing info on which ops will not block.
*/
static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
struct n_hdlc *n_hdlc = tty->disc_data;
__poll_t mask = 0;
/*
* queue the current process into any wait queue that may awaken in the
* future (read and write)
*/
poll_wait(filp, &tty->read_wait, wait);
poll_wait(filp, &tty->write_wait, wait);
/* set bits for operations that won't block */
if (!list_empty(&n_hdlc->rx_buf_list.list))
mask |= EPOLLIN | EPOLLRDNORM; /* readable */
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= EPOLLHUP;
if (tty_hung_up_p(filp))
mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) &&
!list_empty(&n_hdlc->tx_free_buf_list.list))
mask |= EPOLLOUT | EPOLLWRNORM; /* writable */
return mask;
} /* end of n_hdlc_tty_poll() */
static void n_hdlc_alloc_buf(struct n_hdlc_buf_list *list, unsigned int count,
const char *name)
{
struct n_hdlc_buf *buf;
unsigned int i;
for (i = 0; i < count; i++) {
buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
if (!buf) {
pr_debug("%s(), kmalloc() failed for %s buffer %u\n",
__func__, name, i);
return;
}
n_hdlc_buf_put(list, buf);
}
}
/**
* n_hdlc_alloc - allocate an n_hdlc instance data structure
*
* Returns a pointer to newly created structure if success, otherwise %NULL
*/
static struct n_hdlc *n_hdlc_alloc(void)
{
struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL);
if (!n_hdlc)
return NULL;
spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx");
n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx");
return n_hdlc;
} /* end of n_hdlc_alloc() */
/**
* n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
* @buf_list: pointer to the buffer list
* @buf: pointer to the buffer
*/
static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
{
unsigned long flags;
spin_lock_irqsave(&buf_list->spinlock, flags);
list_add(&buf->list_item, &buf_list->list);
buf_list->count++;
spin_unlock_irqrestore(&buf_list->spinlock, flags);
}
/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
* @buf_list: pointer to buffer list
* @buf: pointer to buffer
*/
static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
{
unsigned long flags;
spin_lock_irqsave(&buf_list->spinlock, flags);
list_add_tail(&buf->list_item, &buf_list->list);
buf_list->count++;
spin_unlock_irqrestore(&buf_list->spinlock, flags);
} /* end of n_hdlc_buf_put() */
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
* @buf_list: pointer to HDLC buffer list
*
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
* Returns a pointer to HDLC buffer if available, otherwise %NULL.
*/
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
{
unsigned long flags;
struct n_hdlc_buf *buf;
spin_lock_irqsave(&buf_list->spinlock, flags);
buf = list_first_entry_or_null(&buf_list->list,
struct n_hdlc_buf, list_item);
if (buf) {
list_del(&buf->list_item);
buf_list->count--;
}
spin_unlock_irqrestore(&buf_list->spinlock, flags);
return buf;
} /* end of n_hdlc_buf_get() */
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
.num = N_HDLC,
.name = "hdlc",
.open = n_hdlc_tty_open,
.close = n_hdlc_tty_close,
.read = n_hdlc_tty_read,
.write = n_hdlc_tty_write,
.ioctl = n_hdlc_tty_ioctl,
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
.write_wakeup = n_hdlc_tty_wakeup,
.flush_buffer = flush_rx_queue,
};
static int __init n_hdlc_init(void)
{
int status;
/* range check maxframe arg */
maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE);
status = tty_register_ldisc(&n_hdlc_ldisc);
if (!status)
pr_info("N_HDLC line discipline registered with maxframe=%d\n",
maxframe);
else
pr_err("N_HDLC: error registering line discipline: %d\n",
status);
return status;
} /* end of init_module() */
static void __exit n_hdlc_exit(void)
{
tty_unregister_ldisc(&n_hdlc_ldisc);
}
module_init(n_hdlc_init);
module_exit(n_hdlc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Fulghum [email protected]");
module_param(maxframe, int, 0);
MODULE_ALIAS_LDISC(N_HDLC);
| linux-master | drivers/tty/n_hdlc.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* n_tty.c --- implements the N_TTY line discipline.
*
* This code used to be in tty_io.c, but things are getting hairy
* enough that it made sense to split things off. (The N_TTY
* processing has changed so much that it's hardly recognizable,
* anyway...)
*
* Note that the open routine for N_TTY is guaranteed never to return
* an error. This is because Linux will fall back to setting a line
* to N_TTY if it can not switch to any other line discipline.
*
* Written by Theodore Ts'o, Copyright 1994.
*
* This file also contains code originally written by Linus Torvalds,
* Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994.
*
* Reduced memory usage for older ARM systems - Russell King.
*
* 2000/01/20 Fixed SMP locking on put_tty_queue using bits of
* the patch by Andrew J. Kroll <[email protected]>
* who actually finally proved there really was a race.
*
* 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to
* waiting writing processes-Sapan Bhatia <[email protected]>.
* Also fixed a bug in BLOCKING mode where n_tty_write returns
* EAGAIN
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/jiffies.h>
#include <linux/math.h>
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include "tty.h"
/*
* Until this number of characters is queued in the xmit buffer, select will
* return "we have room for writes".
*/
#define WAKEUP_CHARS 256
/*
* This defines the low- and high-watermarks for throttling and
* unthrottling the TTY driver. These watermarks are used for
* controlling the space in the read buffer.
*/
#define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */
#define TTY_THRESHOLD_UNTHROTTLE 128
/*
* Special byte codes used in the echo buffer to represent operations
* or special handling of characters. Bytes in the echo buffer that
* are not part of such special blocks are treated as normal character
* codes.
*/
#define ECHO_OP_START 0xff
#define ECHO_OP_MOVE_BACK_COL 0x80
#define ECHO_OP_SET_CANON_COL 0x81
#define ECHO_OP_ERASE_TAB 0x82
#define ECHO_COMMIT_WATERMARK 256
#define ECHO_BLOCK 256
#define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32)
#undef N_TTY_TRACE
#ifdef N_TTY_TRACE
# define n_tty_trace(f, args...) trace_printk(f, ##args)
#else
# define n_tty_trace(f, args...) no_printk(f, ##args)
#endif
struct n_tty_data {
/* producer-published */
size_t read_head;
size_t commit_head;
size_t canon_head;
size_t echo_head;
size_t echo_commit;
size_t echo_mark;
DECLARE_BITMAP(char_map, 256);
/* private to n_tty_receive_overrun (single-threaded) */
unsigned long overrun_time;
unsigned int num_overrun;
/* non-atomic */
bool no_room;
/* must hold exclusive termios_rwsem to reset these */
unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
unsigned char push:1;
/* shared by producer and consumer */
u8 read_buf[N_TTY_BUF_SIZE];
DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE);
u8 echo_buf[N_TTY_BUF_SIZE];
/* consumer-published */
size_t read_tail;
size_t line_start;
/* # of chars looked ahead (to find software flow control chars) */
size_t lookahead_count;
/* protected by output lock */
unsigned int column;
unsigned int canon_column;
size_t echo_tail;
struct mutex atomic_read_lock;
struct mutex output_lock;
};
#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
}
static inline u8 read_buf(struct n_tty_data *ldata, size_t i)
{
return ldata->read_buf[MASK(i)];
}
static inline u8 *read_buf_addr(struct n_tty_data *ldata, size_t i)
{
return &ldata->read_buf[MASK(i)];
}
static inline u8 echo_buf(struct n_tty_data *ldata, size_t i)
{
smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
return ldata->echo_buf[MASK(i)];
}
static inline u8 *echo_buf_addr(struct n_tty_data *ldata, size_t i)
{
return &ldata->echo_buf[MASK(i)];
}
/* If we are not echoing the data, perhaps this is a secret so erase it */
static void zero_buffer(const struct tty_struct *tty, u8 *buffer, size_t size)
{
if (L_ICANON(tty) && !L_ECHO(tty))
memset(buffer, 0, size);
}
static void tty_copy(const struct tty_struct *tty, void *to, size_t tail,
size_t n)
{
struct n_tty_data *ldata = tty->disc_data;
size_t size = N_TTY_BUF_SIZE - tail;
void *from = read_buf_addr(ldata, tail);
if (n > size) {
tty_audit_add_data(tty, from, size);
memcpy(to, from, size);
zero_buffer(tty, from, size);
to += size;
n -= size;
from = ldata->read_buf;
}
tty_audit_add_data(tty, from, n);
memcpy(to, from, n);
zero_buffer(tty, from, n);
}
/**
* n_tty_kick_worker - start input worker (if required)
* @tty: terminal
*
* Re-schedules the flip buffer work if it may have stopped.
*
* Locking:
* * Caller holds exclusive %termios_rwsem, or
* * n_tty_read()/consumer path:
* holds non-exclusive %termios_rwsem
*/
static void n_tty_kick_worker(const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
/* Did the input worker stop? Restart it */
if (unlikely(READ_ONCE(ldata->no_room))) {
WRITE_ONCE(ldata->no_room, 0);
WARN_RATELIMIT(tty->port->itty == NULL,
"scheduling with invalid itty\n");
/* see if ldisc has been killed - if so, this means that
* even though the ldisc has been halted and ->buf.work
* cancelled, ->buf.work is about to be rescheduled
*/
WARN_RATELIMIT(test_bit(TTY_LDISC_HALTED, &tty->flags),
"scheduling buffer work for halted ldisc\n");
tty_buffer_restart_work(tty->port);
}
}
static ssize_t chars_in_buffer(const struct tty_struct *tty)
{
const struct n_tty_data *ldata = tty->disc_data;
size_t head = ldata->icanon ? ldata->canon_head : ldata->commit_head;
return head - ldata->read_tail;
}
/**
* n_tty_write_wakeup - asynchronous I/O notifier
* @tty: tty device
*
* Required for the ptys, serial driver etc. since processes that attach
* themselves to the master and rely on ASYNC IO must be woken up.
*/
static void n_tty_write_wakeup(struct tty_struct *tty)
{
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
}
static void n_tty_check_throttle(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
/*
* Check the remaining room for the input canonicalization
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
if (ldata->icanon && ldata->canon_head == ldata->read_tail)
return;
while (1) {
int throttled;
tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
if (N_TTY_BUF_SIZE - read_cnt(ldata) >= TTY_THRESHOLD_THROTTLE)
break;
throttled = tty_throttle_safe(tty);
if (!throttled)
break;
}
__tty_set_flow_change(tty, 0);
}
static void n_tty_check_unthrottle(struct tty_struct *tty)
{
if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
return;
n_tty_kick_worker(tty);
tty_wakeup(tty->link);
return;
}
/* If there is enough space in the read buffer now, let the
* low-level driver know. We use chars_in_buffer() to
* check the buffer, as it now knows about canonical mode.
* Otherwise, if the driver is throttled and the line is
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
while (1) {
int unthrottled;
tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
break;
n_tty_kick_worker(tty);
unthrottled = tty_unthrottle_safe(tty);
if (!unthrottled)
break;
}
__tty_set_flow_change(tty, 0);
}
/**
* put_tty_queue - add character to tty
* @c: character
* @ldata: n_tty data
*
* Add a character to the tty read_buf queue.
*
* Locking:
* * n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
static inline void put_tty_queue(u8 c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
ldata->read_head++;
}
/**
* reset_buffer_flags - reset buffer state
* @ldata: line disc data to reset
*
* Reset the read buffer counters and clear the flags. Called from
* n_tty_open() and n_tty_flush_buffer().
*
* Locking:
* * caller holds exclusive %termios_rwsem, or
* * (locking is not required)
*/
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
ldata->commit_head = 0;
ldata->line_start = 0;
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->push = 0;
ldata->lookahead_count = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
{
unsigned long flags;
if (tty->link->ctrl.packet) {
spin_lock_irqsave(&tty->ctrl.lock, flags);
tty->ctrl.pktstatus |= TIOCPKT_FLUSHREAD;
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible(&tty->link->read_wait);
}
}
/**
* n_tty_flush_buffer - clean input queue
* @tty: terminal device
*
* Flush the input buffer. Called when the tty layer wants the buffer flushed
* (eg at hangup) or when the %N_TTY line discipline internally has to clean
* the pending queue (for example some signals).
*
* Holds %termios_rwsem to exclude producer/consumer while buffer indices are
* reset.
*
* Locking: %ctrl.lock, exclusive %termios_rwsem
*/
static void n_tty_flush_buffer(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
reset_buffer_flags(tty->disc_data);
n_tty_kick_worker(tty);
if (tty->link)
n_tty_packet_mode_flush(tty);
up_write(&tty->termios_rwsem);
}
/**
* is_utf8_continuation - utf8 multibyte check
* @c: byte to check
*
* Returns: true if the utf8 character @c is a multibyte continuation
* character. We use this to correctly compute the on-screen size of the
* character when printing.
*/
static inline int is_utf8_continuation(u8 c)
{
return (c & 0xc0) == 0x80;
}
/**
* is_continuation - multibyte check
* @c: byte to check
* @tty: terminal device
*
* Returns: true if the utf8 character @c is a multibyte continuation character
* and the terminal is in unicode mode.
*/
static inline int is_continuation(u8 c, const struct tty_struct *tty)
{
return I_IUTF8(tty) && is_utf8_continuation(c);
}
/**
* do_output_char - output one character
* @c: character (or partial unicode symbol)
* @tty: terminal device
* @space: space available in tty driver write buffer
*
* This is a helper function that handles one output character (including
* special characters like TAB, CR, LF, etc.), doing OPOST processing and
* putting the results in the tty driver's write buffer.
*
* Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY and NLDLY.
* They simply aren't relevant in the world today. If you ever need them, add
* them here.
*
* Returns: the number of bytes of buffer space used or -1 if no space left.
*
* Locking: should be called under the %output_lock to protect the column state
* and space left in the buffer.
*/
static int do_output_char(u8 c, struct tty_struct *tty, int space)
{
struct n_tty_data *ldata = tty->disc_data;
int spaces;
if (!space)
return -1;
switch (c) {
case '\n':
if (O_ONLRET(tty))
ldata->column = 0;
if (O_ONLCR(tty)) {
if (space < 2)
return -1;
ldata->canon_column = ldata->column = 0;
tty->ops->write(tty, "\r\n", 2);
return 2;
}
ldata->canon_column = ldata->column;
break;
case '\r':
if (O_ONOCR(tty) && ldata->column == 0)
return 0;
if (O_OCRNL(tty)) {
c = '\n';
if (O_ONLRET(tty))
ldata->canon_column = ldata->column = 0;
break;
}
ldata->canon_column = ldata->column = 0;
break;
case '\t':
spaces = 8 - (ldata->column & 7);
if (O_TABDLY(tty) == XTABS) {
if (space < spaces)
return -1;
ldata->column += spaces;
tty->ops->write(tty, " ", spaces);
return spaces;
}
ldata->column += spaces;
break;
case '\b':
if (ldata->column > 0)
ldata->column--;
break;
default:
if (!iscntrl(c)) {
if (O_OLCUC(tty))
c = toupper(c);
if (!is_continuation(c, tty))
ldata->column++;
}
break;
}
tty_put_char(tty, c);
return 1;
}
/**
* process_output - output post processor
* @c: character (or partial unicode symbol)
* @tty: terminal device
*
* Output one character with OPOST processing.
*
* Returns: -1 when the output device is full and the character must be
* retried.
*
* Locking: %output_lock to protect column state and space left (also, this is
*called from n_tty_write() under the tty layer write lock).
*/
static int process_output(u8 c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
int space, retval;
mutex_lock(&ldata->output_lock);
space = tty_write_room(tty);
retval = do_output_char(c, tty, space);
mutex_unlock(&ldata->output_lock);
if (retval < 0)
return -1;
else
return 0;
}
/**
* process_output_block - block post processor
* @tty: terminal device
* @buf: character buffer
* @nr: number of bytes to output
*
* Output a block of characters with OPOST processing.
*
* This path is used to speed up block console writes, among other things when
* processing blocks of output data. It handles only the simple cases normally
* found and helps to generate blocks of symbols for the console driver and
* thus improve performance.
*
* Returns: the number of characters output.
*
* Locking: %output_lock to protect column state and space left (also, this is
* called from n_tty_write() under the tty layer write lock).
*/
static ssize_t process_output_block(struct tty_struct *tty,
const u8 *buf, unsigned int nr)
{
struct n_tty_data *ldata = tty->disc_data;
int space;
int i;
const u8 *cp;
mutex_lock(&ldata->output_lock);
space = tty_write_room(tty);
if (space <= 0) {
mutex_unlock(&ldata->output_lock);
return space;
}
if (nr > space)
nr = space;
for (i = 0, cp = buf; i < nr; i++, cp++) {
u8 c = *cp;
switch (c) {
case '\n':
if (O_ONLRET(tty))
ldata->column = 0;
if (O_ONLCR(tty))
goto break_out;
ldata->canon_column = ldata->column;
break;
case '\r':
if (O_ONOCR(tty) && ldata->column == 0)
goto break_out;
if (O_OCRNL(tty))
goto break_out;
ldata->canon_column = ldata->column = 0;
break;
case '\t':
goto break_out;
case '\b':
if (ldata->column > 0)
ldata->column--;
break;
default:
if (!iscntrl(c)) {
if (O_OLCUC(tty))
goto break_out;
if (!is_continuation(c, tty))
ldata->column++;
}
break;
}
}
break_out:
i = tty->ops->write(tty, buf, i);
mutex_unlock(&ldata->output_lock);
return i;
}
static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
int space)
{
struct n_tty_data *ldata = tty->disc_data;
u8 op;
/*
* Since add_echo_byte() is called without holding output_lock, we
* might see only portion of multi-byte operation.
*/
if (MASK(ldata->echo_commit) == MASK(*tail + 1))
return -ENODATA;
/*
* If the buffer byte is the start of a multi-byte operation, get the
* next byte, which is either the op code or a control character value.
*/
op = echo_buf(ldata, *tail + 1);
switch (op) {
case ECHO_OP_ERASE_TAB: {
unsigned int num_chars, num_bs;
if (MASK(ldata->echo_commit) == MASK(*tail + 2))
return -ENODATA;
num_chars = echo_buf(ldata, *tail + 2);
/*
* Determine how many columns to go back in order to erase the
* tab. This depends on the number of columns used by other
* characters within the tab area. If this (modulo 8) count is
* from the start of input rather than from a previous tab, we
* offset by canon column. Otherwise, tab spacing is normal.
*/
if (!(num_chars & 0x80))
num_chars += ldata->canon_column;
num_bs = 8 - (num_chars & 7);
if (num_bs > space)
return -ENOSPC;
space -= num_bs;
while (num_bs--) {
tty_put_char(tty, '\b');
if (ldata->column > 0)
ldata->column--;
}
*tail += 3;
break;
}
case ECHO_OP_SET_CANON_COL:
ldata->canon_column = ldata->column;
*tail += 2;
break;
case ECHO_OP_MOVE_BACK_COL:
if (ldata->column > 0)
ldata->column--;
*tail += 2;
break;
case ECHO_OP_START:
/* This is an escaped echo op start code */
if (!space)
return -ENOSPC;
tty_put_char(tty, ECHO_OP_START);
ldata->column++;
space--;
*tail += 2;
break;
default:
/*
* If the op is not a special byte code, it is a ctrl char
* tagged to be echoed as "^X" (where X is the letter
* representing the control char). Note that we must ensure
* there is enough space for the whole ctrl pair.
*/
if (space < 2)
return -ENOSPC;
tty_put_char(tty, '^');
tty_put_char(tty, op ^ 0100);
ldata->column += 2;
space -= 2;
*tail += 2;
break;
}
return space;
}
/**
* __process_echoes - write pending echo characters
* @tty: terminal device
*
* Write previously buffered echo (and other ldisc-generated) characters to the
* tty.
*
* Characters generated by the ldisc (including echoes) need to be buffered
* because the driver's write buffer can fill during heavy program output.
* Echoing straight to the driver will often fail under these conditions,
* causing lost characters and resulting mismatches of ldisc state information.
*
* Since the ldisc state must represent the characters actually sent to the
* driver at the time of the write, operations like certain changes in column
* state are also saved in the buffer and executed here.
*
* A circular fifo buffer is used so that the most recent characters are
* prioritized. Also, when control characters are echoed with a prefixed "^",
* the pair is treated atomically and thus not separated.
*
* Locking: callers must hold %output_lock.
*/
static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
int space, old_space;
size_t tail;
u8 c;
old_space = space = tty_write_room(tty);
tail = ldata->echo_tail;
while (MASK(ldata->echo_commit) != MASK(tail)) {
c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
int ret = n_tty_process_echo_ops(tty, &tail, space);
if (ret == -ENODATA)
goto not_yet_stored;
if (ret < 0)
break;
space = ret;
} else {
if (O_OPOST(tty)) {
int retval = do_output_char(c, tty, space);
if (retval < 0)
break;
space -= retval;
} else {
if (!space)
break;
tty_put_char(tty, c);
space -= 1;
}
tail += 1;
}
}
/* If the echo buffer is nearly full (so that the possibility exists
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
while (ldata->echo_commit > tail &&
ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
tail += 3;
else
tail += 2;
} else
tail++;
}
not_yet_stored:
ldata->echo_tail = tail;
return old_space - space;
}
static void commit_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
size_t nr, old, echoed;
size_t head;
mutex_lock(&ldata->output_lock);
head = ldata->echo_head;
ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
/* Process committed echoes if the accumulated # of bytes
* is over the threshold (and try again each time another
* block is accumulated) */
nr = head - ldata->echo_tail;
if (nr < ECHO_COMMIT_WATERMARK ||
(nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
mutex_unlock(&ldata->output_lock);
return;
}
ldata->echo_commit = head;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
if (echoed && tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
static void process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
ldata->echo_commit = ldata->echo_mark;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
if (echoed && tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
/* NB: echo_mark and echo_head should be equivalent here */
static void flush_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
ldata->echo_commit == ldata->echo_head)
return;
mutex_lock(&ldata->output_lock);
ldata->echo_commit = ldata->echo_head;
__process_echoes(tty);
mutex_unlock(&ldata->output_lock);
}
/**
* add_echo_byte - add a byte to the echo buffer
* @c: unicode byte to echo
* @ldata: n_tty data
*
* Add a character or operation byte to the echo buffer.
*/
static inline void add_echo_byte(u8 c, struct n_tty_data *ldata)
{
*echo_buf_addr(ldata, ldata->echo_head) = c;
smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
ldata->echo_head++;
}
/**
* echo_move_back_col - add operation to move back a column
* @ldata: n_tty data
*
* Add an operation to the echo buffer to move back one column.
*/
static void echo_move_back_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_MOVE_BACK_COL, ldata);
}
/**
* echo_set_canon_col - add operation to set the canon column
* @ldata: n_tty data
*
* Add an operation to the echo buffer to set the canon column to the current
* column.
*/
static void echo_set_canon_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_SET_CANON_COL, ldata);
}
/**
* echo_erase_tab - add operation to erase a tab
* @num_chars: number of character columns already used
* @after_tab: true if num_chars starts after a previous tab
* @ldata: n_tty data
*
* Add an operation to the echo buffer to erase a tab.
*
* Called by the eraser function, which knows how many character columns have
* been used since either a previous tab or the start of input. This
* information will be used later, along with canon column (if applicable), to
* go back the correct number of columns.
*/
static void echo_erase_tab(unsigned int num_chars, int after_tab,
struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_ERASE_TAB, ldata);
/* We only need to know this modulo 8 (tab spacing) */
num_chars &= 7;
/* Set the high bit as a flag if num_chars is after a previous tab */
if (after_tab)
num_chars |= 0x80;
add_echo_byte(num_chars, ldata);
}
/**
* echo_char_raw - echo a character raw
* @c: unicode byte to echo
* @ldata: line disc data
*
* Echo user input back onto the screen. This must be called only when
* L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
* This variant does not treat control characters specially.
*/
static void echo_char_raw(u8 c, struct n_tty_data *ldata)
{
if (c == ECHO_OP_START) {
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_START, ldata);
} else {
add_echo_byte(c, ldata);
}
}
/**
* echo_char - echo a character
* @c: unicode byte to echo
* @tty: terminal device
*
* Echo user input back onto the screen. This must be called only when
* L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
* This variant tags control characters to be echoed as "^X" (where X is the
* letter representing the control char).
*/
static void echo_char(u8 c, const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
if (c == ECHO_OP_START) {
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_START, ldata);
} else {
if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t')
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(c, ldata);
}
}
/**
* finish_erasing - complete erase
* @ldata: n_tty data
*/
static inline void finish_erasing(struct n_tty_data *ldata)
{
if (ldata->erasing) {
echo_char_raw('/', ldata);
ldata->erasing = 0;
}
}
/**
* eraser - handle erase function
* @c: character input
* @tty: terminal device
*
* Perform erase and necessary output when an erase character is present in the
* stream from the driver layer. Handles the complexities of UTF-8 multibyte
* symbols.
*
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
static void eraser(u8 c, const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
enum { ERASE, WERASE, KILL } kill_type;
size_t head;
size_t cnt;
int seen_alnums;
if (ldata->read_head == ldata->canon_head) {
/* process_output('\a', tty); */ /* what do you think? */
return;
}
if (c == ERASE_CHAR(tty))
kill_type = ERASE;
else if (c == WERASE_CHAR(tty))
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
ldata->read_head = ldata->canon_head;
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
ldata->read_head = ldata->canon_head;
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
if (L_ECHOK(tty))
echo_char_raw('\n', ldata);
return;
}
kill_type = KILL;
}
seen_alnums = 0;
while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
head = ldata->read_head;
/* erase a single possibly multibyte character */
do {
head--;
c = read_buf(ldata, head);
} while (is_continuation(c, tty) &&
MASK(head) != MASK(ldata->canon_head));
/* do not partially erase */
if (is_continuation(c, tty))
break;
if (kill_type == WERASE) {
/* Equivalent to BSD's ALTWERASE. */
if (isalnum(c) || c == '_')
seen_alnums++;
else if (seen_alnums)
break;
}
cnt = ldata->read_head - head;
ldata->read_head = head;
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
echo_char_raw('\\', ldata);
ldata->erasing = 1;
}
/* if cnt > 1, output a multi-byte character */
echo_char(c, tty);
while (--cnt > 0) {
head++;
echo_char_raw(read_buf(ldata, head), ldata);
echo_move_back_col(ldata);
}
} else if (kill_type == ERASE && !L_ECHOE(tty)) {
echo_char(ERASE_CHAR(tty), tty);
} else if (c == '\t') {
unsigned int num_chars = 0;
int after_tab = 0;
size_t tail = ldata->read_head;
/*
* Count the columns used for characters
* since the start of input or after a
* previous tab.
* This info is used to go back the correct
* number of columns.
*/
while (MASK(tail) != MASK(ldata->canon_head)) {
tail--;
c = read_buf(ldata, tail);
if (c == '\t') {
after_tab = 1;
break;
} else if (iscntrl(c)) {
if (L_ECHOCTL(tty))
num_chars += 2;
} else if (!is_continuation(c, tty)) {
num_chars++;
}
}
echo_erase_tab(num_chars, after_tab, ldata);
} else {
if (iscntrl(c) && L_ECHOCTL(tty)) {
echo_char_raw('\b', ldata);
echo_char_raw(' ', ldata);
echo_char_raw('\b', ldata);
}
if (!iscntrl(c) || L_ECHOCTL(tty)) {
echo_char_raw('\b', ldata);
echo_char_raw(' ', ldata);
echo_char_raw('\b', ldata);
}
}
}
if (kill_type == ERASE)
break;
}
if (ldata->read_head == ldata->canon_head && L_ECHO(tty))
finish_erasing(ldata);
}
static void __isig(int sig, struct tty_struct *tty)
{
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, sig, 1);
put_pid(tty_pgrp);
}
}
/**
* isig - handle the ISIG optio
* @sig: signal
* @tty: terminal
*
* Called when a signal is being sent due to terminal input. Called from the
* &tty_driver.receive_buf() path, so serialized.
*
* Performs input and output flush if !NOFLSH. In this context, the echo
* buffer is 'output'. The signal is processed first to alert any current
* readers or writers to discontinue and exit their i/o loops.
*
* Locking: %ctrl.lock
*/
static void isig(int sig, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
if (L_NOFLSH(tty)) {
/* signal only */
__isig(sig, tty);
} else { /* signal and flush */
up_read(&tty->termios_rwsem);
down_write(&tty->termios_rwsem);
__isig(sig, tty);
/* clear echo buffer */
mutex_lock(&ldata->output_lock);
ldata->echo_head = ldata->echo_tail = 0;
ldata->echo_mark = ldata->echo_commit = 0;
mutex_unlock(&ldata->output_lock);
/* clear output buffer */
tty_driver_flush_buffer(tty);
/* clear input buffer */
reset_buffer_flags(tty->disc_data);
/* notify pty master of flush */
if (tty->link)
n_tty_packet_mode_flush(tty);
up_write(&tty->termios_rwsem);
down_read(&tty->termios_rwsem);
}
}
/**
* n_tty_receive_break - handle break
* @tty: terminal
*
* An RS232 break event has been hit in the incoming bitstream. This can cause
* a variety of events depending upon the termios settings.
*
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
*
* Note: may get exclusive %termios_rwsem if flushing input buffer
*/
static void n_tty_receive_break(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
if (I_IGNBRK(tty))
return;
if (I_BRKINT(tty)) {
isig(SIGINT, tty);
return;
}
if (I_PARMRK(tty)) {
put_tty_queue('\377', ldata);
put_tty_queue('\0', ldata);
}
put_tty_queue('\0', ldata);
}
/**
* n_tty_receive_overrun - handle overrun reporting
* @tty: terminal
*
* Data arrived faster than we could process it. While the tty driver has
* flagged this the bits that were missed are gone forever.
*
* Called from the receive_buf path so single threaded. Does not need locking
* as num_overrun and overrun_time are function private.
*/
static void n_tty_receive_overrun(const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
ldata->num_overrun++;
if (time_is_before_jiffies(ldata->overrun_time + HZ)) {
tty_warn(tty, "%u input overrun(s)\n", ldata->num_overrun);
ldata->overrun_time = jiffies;
ldata->num_overrun = 0;
}
}
/**
* n_tty_receive_parity_error - error notifier
* @tty: terminal device
* @c: character
*
* Process a parity error and queue the right data to indicate the error case
* if necessary.
*
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
static void n_tty_receive_parity_error(const struct tty_struct *tty,
u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
if (I_INPCK(tty)) {
if (I_IGNPAR(tty))
return;
if (I_PARMRK(tty)) {
put_tty_queue('\377', ldata);
put_tty_queue('\0', ldata);
put_tty_queue(c, ldata);
} else
put_tty_queue('\0', ldata);
} else
put_tty_queue(c, ldata);
}
static void
n_tty_receive_signal_char(struct tty_struct *tty, int signal, u8 c)
{
isig(signal, tty);
if (I_IXON(tty))
start_tty(tty);
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
} else
process_echoes(tty);
}
static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, u8 c)
{
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
/**
* n_tty_receive_char_flow_ctrl - receive flow control chars
* @tty: terminal device
* @c: character
* @lookahead_done: lookahead has processed this character already
*
* Receive and process flow control character actions.
*
* In case lookahead for flow control chars already handled the character in
* advance to the normal receive, the actions are skipped during normal
* receive.
*
* Returns true if @c is consumed as flow-control character, the character
* must not be treated as normal character.
*/
static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
return false;
if (lookahead_done)
return true;
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
return true;
}
/* STOP_CHAR */
stop_tty(tty);
return true;
}
static void n_tty_receive_handle_newline(struct tty_struct *tty, u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
set_bit(MASK(ldata->read_head), ldata->read_flags);
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
static bool n_tty_receive_char_canon(struct tty_struct *tty, u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
(c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
eraser(c, tty);
commit_echoes(tty);
return true;
}
if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
ldata->lnext = 1;
if (L_ECHO(tty)) {
finish_erasing(ldata);
if (L_ECHOCTL(tty)) {
echo_char_raw('^', ldata);
echo_char_raw('\b', ldata);
commit_echoes(tty);
}
}
return true;
}
if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) {
size_t tail = ldata->canon_head;
finish_erasing(ldata);
echo_char(c, tty);
echo_char_raw('\n', ldata);
while (MASK(tail) != MASK(ldata->read_head)) {
echo_char(read_buf(ldata, tail), tty);
tail++;
}
commit_echoes(tty);
return true;
}
if (c == '\n') {
if (L_ECHO(tty) || L_ECHONL(tty)) {
echo_char_raw('\n', ldata);
commit_echoes(tty);
}
n_tty_receive_handle_newline(tty, c);
return true;
}
if (c == EOF_CHAR(tty)) {
c = __DISABLED_CHAR;
n_tty_receive_handle_newline(tty, c);
return true;
}
if ((c == EOL_CHAR(tty)) ||
(c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
/*
* XXX are EOL_CHAR and EOL2_CHAR echoed?!?
*/
if (L_ECHO(tty)) {
/* Record the column of first canon char. */
if (ldata->canon_head == ldata->read_head)
echo_set_canon_col(ldata);
echo_char(c, tty);
commit_echoes(tty);
}
/*
* XXX does PARMRK doubling happen for
* EOL_CHAR and EOL2_CHAR?
*/
if (c == '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
n_tty_receive_handle_newline(tty, c);
return true;
}
return false;
}
static void n_tty_receive_char_special(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done))
return;
if (L_ISIG(tty)) {
if (c == INTR_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGINT, c);
return;
} else if (c == QUIT_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGQUIT, c);
return;
} else if (c == SUSP_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGTSTP, c);
return;
}
}
if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
process_echoes(tty);
}
if (c == '\r') {
if (I_IGNCR(tty))
return;
if (I_ICRNL(tty))
c = '\n';
} else if (c == '\n' && I_INLCR(tty))
c = '\r';
if (ldata->icanon && n_tty_receive_char_canon(tty, c))
return;
if (L_ECHO(tty)) {
finish_erasing(ldata);
if (c == '\n')
echo_char_raw('\n', ldata);
else {
/* Record the column of first canon char. */
if (ldata->canon_head == ldata->read_head)
echo_set_canon_col(ldata);
echo_char(c, tty);
}
commit_echoes(tty);
}
/* PARMRK doubling check */
if (c == '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
}
/**
* n_tty_receive_char - perform processing
* @tty: terminal device
* @c: character
*
* Process an individual character of input received from the driver. This is
* serialized with respect to itself by the rules for the driver above.
*
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
* publishes canon_head if canonical mode is active
*/
static void n_tty_receive_char(struct tty_struct *tty, u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
process_echoes(tty);
}
if (L_ECHO(tty)) {
finish_erasing(ldata);
/* Record the column of first canon char. */
if (ldata->canon_head == ldata->read_head)
echo_set_canon_col(ldata);
echo_char(c, tty);
commit_echoes(tty);
}
/* PARMRK doubling check */
if (c == '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
}
static void n_tty_receive_char_closing(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
if (I_ISTRIP(tty))
c &= 0x7f;
if (I_IUCLC(tty) && L_IEXTEN(tty))
c = tolower(c);
if (I_IXON(tty)) {
if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) &&
tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
c != SUSP_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
}
}
}
static void
n_tty_receive_char_flagged(struct tty_struct *tty, u8 c, u8 flag)
{
switch (flag) {
case TTY_BREAK:
n_tty_receive_break(tty);
break;
case TTY_PARITY:
case TTY_FRAME:
n_tty_receive_parity_error(tty, c);
break;
case TTY_OVERRUN:
n_tty_receive_overrun(tty);
break;
default:
tty_err(tty, "unknown flag %u\n", flag);
break;
}
}
static void
n_tty_receive_char_lnext(struct tty_struct *tty, u8 c, u8 flag)
{
struct n_tty_data *ldata = tty->disc_data;
ldata->lnext = 0;
if (likely(flag == TTY_NORMAL)) {
if (I_ISTRIP(tty))
c &= 0x7f;
if (I_IUCLC(tty) && L_IEXTEN(tty))
c = tolower(c);
n_tty_receive_char(tty, c);
} else
n_tty_receive_char_flagged(tty, c, flag);
}
/* Caller must ensure count > 0 */
static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
u8 flag = TTY_NORMAL;
ldata->lookahead_count += count;
if (!I_IXON(tty))
return;
while (count--) {
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
n_tty_receive_char_flow_ctrl(tty, *cp, false);
cp++;
}
}
static void
n_tty_receive_buf_real_raw(const struct tty_struct *tty, const u8 *cp,
size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
/* handle buffer wrap-around by a loop */
for (unsigned int i = 0; i < 2; i++) {
size_t head = MASK(ldata->read_head);
size_t n = min(count, N_TTY_BUF_SIZE - head);
memcpy(read_buf_addr(ldata, head), cp, n);
ldata->read_head += n;
cp += n;
count -= n;
}
}
static void
n_tty_receive_buf_raw(struct tty_struct *tty, const u8 *cp, const u8 *fp,
size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
u8 flag = TTY_NORMAL;
while (count--) {
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
put_tty_queue(*cp++, ldata);
else
n_tty_receive_char_flagged(tty, *cp++, flag);
}
}
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const u8 *cp, const u8 *fp,
size_t count, bool lookahead_done)
{
u8 flag = TTY_NORMAL;
while (count--) {
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
n_tty_receive_char_closing(tty, *cp++, lookahead_done);
}
}
static void n_tty_receive_buf_standard(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count,
bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
u8 flag = TTY_NORMAL;
while (count--) {
u8 c = *cp++;
if (fp)
flag = *fp++;
if (ldata->lnext) {
n_tty_receive_char_lnext(tty, c, flag);
continue;
}
if (unlikely(flag != TTY_NORMAL)) {
n_tty_receive_char_flagged(tty, c, flag);
continue;
}
if (I_ISTRIP(tty))
c &= 0x7f;
if (I_IUCLC(tty) && L_IEXTEN(tty))
c = tolower(c);
if (L_EXTPROC(tty)) {
put_tty_queue(c, ldata);
continue;
}
if (test_bit(c, ldata->char_map))
n_tty_receive_char_special(tty, c, lookahead_done);
else
n_tty_receive_char(tty, c);
}
}
static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
size_t la_count = min(ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
if (la_count > 0)
n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
if (count > la_count)
n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
} else {
if (la_count > 0)
n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
if (count > la_count)
n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
ldata->lookahead_count -= la_count;
if (ldata->icanon && !L_EXTPROC(tty))
return;
/* publish read_head to consumer */
smp_store_release(&ldata->commit_head, ldata->read_head);
if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
}
/**
* n_tty_receive_buf_common - process input
* @tty: device to receive input
* @cp: input chars
* @fp: flags for each char (if %NULL, all chars are %TTY_NORMAL)
* @count: number of input chars in @cp
* @flow: enable flow control
*
* Called by the terminal driver when a block of characters has been received.
* This function must be called from soft contexts not from interrupt context.
* The driver is responsible for making calls one at a time and in order (or
* using flush_to_ldisc()).
*
* Returns: the # of input chars from @cp which were processed.
*
* In canonical mode, the maximum line length is 4096 chars (including the line
* termination char); lines longer than 4096 chars are truncated. After 4095
* chars, input data is still processed but not stored. Overflow processing
* ensures the tty can always receive more input until at least one line can be
* read.
*
* In non-canonical mode, the read buffer will only accept 4095 chars; this
* provides the necessary space for a newline char if the input mode is
* switched to canonical.
*
* Note it is possible for the read buffer to _contain_ 4096 chars in
* non-canonical mode: the read buffer could already contain the maximum canon
* line of 4096 chars when the mode is switched to non-canonical.
*
* Locking: n_tty_receive_buf()/producer path:
* claims non-exclusive %termios_rwsem
* publishes commit_head or canon_head
*/
static size_t
n_tty_receive_buf_common(struct tty_struct *tty, const u8 *cp, const u8 *fp,
size_t count, bool flow)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n, rcvd = 0;
int room, overflow;
down_read(&tty->termios_rwsem);
do {
/*
* When PARMRK is set, each input char may take up to 3 chars
* in the read buf; reduce the buffer space avail by 3x
*
* If we are doing input canonicalization, and there are no
* pending newlines, let characters through without limit, so
* that erase characters will be handled. Other excess
* characters will be beeped.
*
* paired with store in *_copy_from_read_buf() -- guarantees
* the consumer has loaded the data in read_buf up to the new
* read_tail (so this producer will not overwrite unread data)
*/
size_t tail = smp_load_acquire(&ldata->read_tail);
room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
if (I_PARMRK(tty))
room = DIV_ROUND_UP(room, 3);
room--;
if (room <= 0) {
overflow = ldata->icanon && ldata->canon_head == tail;
if (overflow && room < 0)
ldata->read_head--;
room = overflow;
WRITE_ONCE(ldata->no_room, flow && !room);
} else
overflow = 0;
n = min_t(size_t, count, room);
if (!n)
break;
/* ignore parity errors if handling overflow */
if (!overflow || !fp || *fp != TTY_PARITY)
__receive_buf(tty, cp, fp, n);
cp += n;
if (fp)
fp += n;
count -= n;
rcvd += n;
} while (!test_bit(TTY_LDISC_CHANGING, &tty->flags));
tty->receive_room = room;
/* Unthrottle if handling overflow on pty */
if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (overflow) {
tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
tty_unthrottle_safe(tty);
__tty_set_flow_change(tty, 0);
}
} else
n_tty_check_throttle(tty);
if (unlikely(ldata->no_room)) {
/*
* Barrier here is to ensure to read the latest read_tail in
* chars_in_buffer() and to make sure that read_tail is not loaded
* before ldata->no_room is set.
*/
smp_mb();
if (!chars_in_buffer(tty))
n_tty_kick_worker(tty);
}
up_read(&tty->termios_rwsem);
return rcvd;
}
static void n_tty_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
n_tty_receive_buf_common(tty, cp, fp, count, false);
}
static size_t n_tty_receive_buf2(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
return n_tty_receive_buf_common(tty, cp, fp, count, true);
}
/**
* n_tty_set_termios - termios data changed
* @tty: terminal
* @old: previous data
*
* Called by the tty layer when the user changes termios flags so that the line
* discipline can plan ahead. This function cannot sleep and is protected from
* re-entry by the tty layer. The user is guaranteed that this function will
* not be re-entered or in progress when the ldisc is closed.
*
* Locking: Caller holds @tty->termios_rwsem
*/
static void n_tty_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->read_tail;
if (!L_ICANON(tty) || !read_cnt(ldata)) {
ldata->canon_head = ldata->read_tail;
ldata->push = 0;
} else {
set_bit(MASK(ldata->read_head - 1), ldata->read_flags);
ldata->canon_head = ldata->read_head;
ldata->push = 1;
}
ldata->commit_head = ldata->read_head;
ldata->erasing = 0;
ldata->lnext = 0;
}
ldata->icanon = (L_ICANON(tty) != 0);
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) ||
I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) ||
I_PARMRK(tty)) {
bitmap_zero(ldata->char_map, 256);
if (I_IGNCR(tty) || I_ICRNL(tty))
set_bit('\r', ldata->char_map);
if (I_INLCR(tty))
set_bit('\n', ldata->char_map);
if (L_ICANON(tty)) {
set_bit(ERASE_CHAR(tty), ldata->char_map);
set_bit(KILL_CHAR(tty), ldata->char_map);
set_bit(EOF_CHAR(tty), ldata->char_map);
set_bit('\n', ldata->char_map);
set_bit(EOL_CHAR(tty), ldata->char_map);
if (L_IEXTEN(tty)) {
set_bit(WERASE_CHAR(tty), ldata->char_map);
set_bit(LNEXT_CHAR(tty), ldata->char_map);
set_bit(EOL2_CHAR(tty), ldata->char_map);
if (L_ECHO(tty))
set_bit(REPRINT_CHAR(tty),
ldata->char_map);
}
}
if (I_IXON(tty)) {
set_bit(START_CHAR(tty), ldata->char_map);
set_bit(STOP_CHAR(tty), ldata->char_map);
}
if (L_ISIG(tty)) {
set_bit(INTR_CHAR(tty), ldata->char_map);
set_bit(QUIT_CHAR(tty), ldata->char_map);
set_bit(SUSP_CHAR(tty), ldata->char_map);
}
clear_bit(__DISABLED_CHAR, ldata->char_map);
ldata->raw = 0;
ldata->real_raw = 0;
} else {
ldata->raw = 1;
if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) &&
(I_IGNPAR(tty) || !I_INPCK(tty)) &&
(tty->driver->flags & TTY_DRIVER_REAL_RAW))
ldata->real_raw = 1;
else
ldata->real_raw = 0;
}
/*
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow.tco_stopped) {
start_tty(tty);
process_echoes(tty);
}
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
}
/**
* n_tty_close - close the ldisc for this tty
* @tty: device
*
* Called from the terminal layer when this line discipline is being shut down,
* either because of a close or becsuse of a discipline change. The function
* will not be called while other ldisc methods are in progress.
*/
static void n_tty_close(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
if (tty->link)
n_tty_packet_mode_flush(tty);
down_write(&tty->termios_rwsem);
vfree(ldata);
tty->disc_data = NULL;
up_write(&tty->termios_rwsem);
}
/**
* n_tty_open - open an ldisc
* @tty: terminal to open
*
* Called when this line discipline is being attached to the terminal device.
* Can sleep. Called serialized so that no other events will occur in parallel.
* No further open will occur until a close.
*/
static int n_tty_open(struct tty_struct *tty)
{
struct n_tty_data *ldata;
/* Currently a malloc failure here can panic */
ldata = vzalloc(sizeof(*ldata));
if (!ldata)
return -ENOMEM;
ldata->overrun_time = jiffies;
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
tty->disc_data = ldata;
tty->closing = 0;
/* indicate buffer work may resume */
clear_bit(TTY_LDISC_HALTED, &tty->flags);
n_tty_set_termios(tty, NULL);
tty_unthrottle(tty);
return 0;
}
static inline int input_available_p(const struct tty_struct *tty, int poll)
{
const struct n_tty_data *ldata = tty->disc_data;
int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty))
return ldata->canon_head != ldata->read_tail;
else
return ldata->commit_head - ldata->read_tail >= amt;
}
/**
* copy_from_read_buf - copy read data directly
* @tty: terminal device
* @kbp: data
* @nr: size of data
*
* Helper function to speed up n_tty_read(). It is only called when %ICANON is
* off; it copies characters straight from the tty queue.
*
* Returns: true if it successfully copied data, but there is still more data
* to be had.
*
* Locking:
* * called under the @ldata->atomic_read_lock sem
* * n_tty_read()/consumer path:
* caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
static bool copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n;
bool is_eof;
size_t head = smp_load_acquire(&ldata->commit_head);
size_t tail = MASK(ldata->read_tail);
n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
n = min(*nr, n);
if (n) {
u8 *from = read_buf_addr(ldata, tail);
memcpy(*kbp, from, n);
is_eof = n == 1 && *from == EOF_CHAR(tty);
tty_audit_add_data(tty, from, n);
zero_buffer(tty, from, n);
smp_store_release(&ldata->read_tail, ldata->read_tail + n);
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
(head == ldata->read_tail))
return false;
*kbp += n;
*nr -= n;
/* If we have more to copy, let the caller know */
return head != ldata->read_tail;
}
return false;
}
/**
* canon_copy_from_read_buf - copy read data in canonical mode
* @tty: terminal device
* @kbp: data
* @nr: size of data
*
* Helper function for n_tty_read(). It is only called when %ICANON is on; it
* copies one line of input up to and including the line-delimiting character
* into the result buffer.
*
* Note: When termios is changed from non-canonical to canonical mode and the
* read buffer contains data, n_tty_set_termios() simulates an EOF push (as if
* C-d were input) _without_ the %DISABLED_CHAR in the buffer. This causes data
* already processed as input to be immediately available as input although a
* newline has not been received.
*
* Locking:
* * called under the %atomic_read_lock mutex
* * n_tty_read()/consumer path:
* caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n, size, more, c;
size_t eol;
size_t tail, canon_head;
int found = 0;
/* N.B. avoid overrun if nr == 0 */
if (!*nr)
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
n = min(*nr, canon_head - ldata->read_tail);
tail = MASK(ldata->read_tail);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n",
__func__, *nr, tail, n, size);
eol = find_next_bit(ldata->read_flags, size, tail);
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
/* scan wrapped without finding set bit */
eol = find_first_bit(ldata->read_flags, more);
found = eol != more;
} else
found = eol != size;
n = eol - tail;
if (n > N_TTY_BUF_SIZE)
n += N_TTY_BUF_SIZE;
c = n + found;
if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
tty_copy(tty, *kbp, tail, n);
*kbp += n;
*nr -= n;
if (found)
clear_bit(eol, ldata->read_flags);
smp_store_release(&ldata->read_tail, ldata->read_tail + c);
if (found) {
if (!ldata->push)
ldata->line_start = ldata->read_tail;
else
ldata->push = 0;
tty_audit_push();
return false;
}
/* No EOL found - do a continuation retry if there is more data */
return ldata->read_tail != canon_head;
}
/*
* If we finished a read at the exact location of an
* EOF (special EOL character that's a __DISABLED_CHAR)
* in the stream, silently eat the EOF.
*/
static void canon_skip_eof(struct n_tty_data *ldata)
{
size_t tail, canon_head;
canon_head = smp_load_acquire(&ldata->canon_head);
tail = ldata->read_tail;
// No data?
if (tail == canon_head)
return;
// See if the tail position is EOF in the circular buffer
tail &= (N_TTY_BUF_SIZE - 1);
if (!test_bit(tail, ldata->read_flags))
return;
if (read_buf(ldata, tail) != __DISABLED_CHAR)
return;
// Clear the EOL bit, skip the EOF char.
clear_bit(tail, ldata->read_flags);
smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
}
/**
* job_control - check job control
* @tty: tty
* @file: file handle
*
* Perform job control management checks on this @file/@tty descriptor and if
* appropriate send any needed signals and return a negative error code if
* action should be taken.
*
* Locking:
* * redirected write test is safe
* * current->signal->tty check is safe
* * ctrl.lock to safely reference @tty->ctrl.pgrp
*/
static int job_control(struct tty_struct *tty, struct file *file)
{
/* Job control check -- must be done at start and after
every sleep (POSIX.1 7.1.1.4). */
/* NOTE: not yet done after every sleep pending a thorough
check of the logic of this change. -- jlc */
/* don't stop on /dev/console */
if (file->f_op->write_iter == redirected_tty_write)
return 0;
return __tty_check_change(tty, SIGTTIN);
}
/**
* n_tty_read - read function for tty
* @tty: tty device
* @file: file object
* @kbuf: kernelspace buffer pointer
* @nr: size of I/O
* @cookie: if non-%NULL, this is a continuation read
* @offset: where to continue reading from (unused in n_tty)
*
* Perform reads for the line discipline. We are guaranteed that the line
* discipline will not be closed under us but we may get multiple parallel
* readers and must handle this ourselves. We may also get a hangup. Always
* called in user context, may sleep.
*
* This code must be sure never to sleep through a hangup.
*
* Locking: n_tty_read()/consumer path:
* claims non-exclusive termios_rwsem;
* publishes read_tail
*/
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
size_t nr, void **cookie, unsigned long offset)
{
struct n_tty_data *ldata = tty->disc_data;
u8 *kb = kbuf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int c;
int minimum, time;
ssize_t retval = 0;
long timeout;
bool packet;
size_t old_tail;
/*
* Is this a continuation of a read started earler?
*
* If so, we still hold the atomic_read_lock and the
* termios_rwsem, and can just continue to copy data.
*/
if (*cookie) {
if (ldata->icanon && !L_EXTPROC(tty)) {
/*
* If we have filled the user buffer, see
* if we should skip an EOF character before
* releasing the lock and returning done.
*/
if (!nr)
canon_skip_eof(ldata);
else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
if (copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
}
/* No more data - release locks and stop retries */
n_tty_kick_worker(tty);
n_tty_check_unthrottle(tty);
up_read(&tty->termios_rwsem);
mutex_unlock(&ldata->atomic_read_lock);
*cookie = NULL;
return kb - kbuf;
}
c = job_control(tty, file);
if (c < 0)
return c;
/*
* Internal serialization of reads.
*/
if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&ldata->atomic_read_lock))
return -EAGAIN;
} else {
if (mutex_lock_interruptible(&ldata->atomic_read_lock))
return -ERESTARTSYS;
}
down_read(&tty->termios_rwsem);
minimum = time = 0;
timeout = MAX_SCHEDULE_TIMEOUT;
if (!ldata->icanon) {
minimum = MIN_CHAR(tty);
if (minimum) {
time = (HZ / 10) * TIME_CHAR(tty);
} else {
timeout = (HZ / 10) * TIME_CHAR(tty);
minimum = 1;
}
}
packet = tty->ctrl.packet;
old_tail = ldata->read_tail;
add_wait_queue(&tty->read_wait, &wait);
while (nr) {
/* First test for status change. */
if (packet && tty->link->ctrl.pktstatus) {
u8 cs;
if (kb != kbuf)
break;
spin_lock_irq(&tty->link->ctrl.lock);
cs = tty->link->ctrl.pktstatus;
tty->link->ctrl.pktstatus = 0;
spin_unlock_irq(&tty->link->ctrl.lock);
*kb++ = cs;
nr--;
break;
}
if (!input_available_p(tty, 0)) {
up_read(&tty->termios_rwsem);
tty_buffer_flush_work(tty->port);
down_read(&tty->termios_rwsem);
if (!input_available_p(tty, 0)) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
retval = -EIO;
break;
}
if (tty_hung_up_p(file))
break;
/*
* Abort readers for ttys which never actually
* get hung up. See __tty_hangup().
*/
if (test_bit(TTY_HUPPING, &tty->flags))
break;
if (!timeout)
break;
if (tty_io_nonblock(tty, file)) {
retval = -EAGAIN;
break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
up_read(&tty->termios_rwsem);
timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
timeout);
down_read(&tty->termios_rwsem);
continue;
}
}
if (ldata->icanon && !L_EXTPROC(tty)) {
if (canon_copy_from_read_buf(tty, &kb, &nr))
goto more_to_be_read;
} else {
/* Deal with packet mode. */
if (packet && kb == kbuf) {
*kb++ = TIOCPKT_DATA;
nr--;
}
/*
* Copy data, and if there is more to be had
* and we have nothing more to wait for, then
* let's mark us for retries.
*
* NOTE! We return here with both the termios_sem
* and atomic_read_lock still held, the retries
* will release them when done.
*/
if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) {
more_to_be_read:
remove_wait_queue(&tty->read_wait, &wait);
*cookie = cookie;
return kb - kbuf;
}
}
n_tty_check_unthrottle(tty);
if (kb - kbuf >= minimum)
break;
if (time)
timeout = time;
}
if (old_tail != ldata->read_tail) {
/*
* Make sure no_room is not read in n_tty_kick_worker()
* before setting ldata->read_tail in copy_from_read_buf().
*/
smp_mb();
n_tty_kick_worker(tty);
}
up_read(&tty->termios_rwsem);
remove_wait_queue(&tty->read_wait, &wait);
mutex_unlock(&ldata->atomic_read_lock);
if (kb - kbuf)
retval = kb - kbuf;
return retval;
}
/**
* n_tty_write - write function for tty
* @tty: tty device
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
*
* Write function of the terminal device. This is serialized with respect to
* other write callers but not to termios changes, reads and other such events.
* Since the receive code will echo characters, thus calling driver write
* methods, the %output_lock is used in the output processing functions called
* here as well as in the echo processing function to protect the column state
* and space left in the buffer.
*
* This code must be sure never to sleep through a hangup.
*
* Locking: output_lock to protect column state and space left
* (note that the process_output*() functions take this lock themselves)
*/
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const u8 *buf, size_t nr)
{
const u8 *b = buf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ssize_t num, retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
retval = tty_check_change(tty);
if (retval)
return retval;
}
down_read(&tty->termios_rwsem);
/* Write out any echoed characters that are still pending */
process_echoes(tty);
add_wait_queue(&tty->write_wait, &wait);
while (1) {
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
if (O_OPOST(tty)) {
while (nr > 0) {
num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b += num;
nr -= num;
if (nr == 0)
break;
if (process_output(*b, tty) < 0)
break;
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
struct n_tty_data *ldata = tty->disc_data;
while (nr > 0) {
mutex_lock(&ldata->output_lock);
num = tty->ops->write(tty, b, nr);
mutex_unlock(&ldata->output_lock);
if (num < 0) {
retval = num;
goto break_out;
}
if (!num)
break;
b += num;
nr -= num;
}
}
if (!nr)
break;
if (tty_io_nonblock(tty, file)) {
retval = -EAGAIN;
break;
}
up_read(&tty->termios_rwsem);
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
down_read(&tty->termios_rwsem);
}
break_out:
remove_wait_queue(&tty->write_wait, &wait);
if (nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
up_read(&tty->termios_rwsem);
return (b - buf) ? b - buf : retval;
}
/**
* n_tty_poll - poll method for N_TTY
* @tty: terminal device
* @file: file accessing it
* @wait: poll table
*
* Called when the line discipline is asked to poll() for data or for special
* events. This code is not serialized with respect to other events save
* open/close.
*
* This code must be sure never to sleep through a hangup.
*
* Locking: called without the kernel lock held -- fine.
*/
static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
__poll_t mask = 0;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
if (input_available_p(tty, 1))
mask |= EPOLLIN | EPOLLRDNORM;
else {
tty_buffer_flush_work(tty->port);
if (input_available_p(tty, 1))
mask |= EPOLLIN | EPOLLRDNORM;
}
if (tty->ctrl.packet && tty->link->ctrl.pktstatus)
mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
if (tty->ops->write && !tty_is_writelocked(tty) &&
tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
static unsigned long inq_canon(struct n_tty_data *ldata)
{
size_t nr, head, tail;
if (ldata->canon_head == ldata->read_tail)
return 0;
head = ldata->canon_head;
tail = ldata->read_tail;
nr = head - tail;
/* Skip EOF-chars.. */
while (MASK(head) != MASK(tail)) {
if (test_bit(MASK(tail), ldata->read_flags) &&
read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
tail++;
}
return nr;
}
static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct n_tty_data *ldata = tty->disc_data;
int retval;
switch (cmd) {
case TIOCOUTQ:
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
down_write(&tty->termios_rwsem);
if (L_ICANON(tty) && !L_EXTPROC(tty))
retval = inq_canon(ldata);
else
retval = read_cnt(ldata);
up_write(&tty->termios_rwsem);
return put_user(retval, (unsigned int __user *) arg);
default:
return n_tty_ioctl_helper(tty, cmd, arg);
}
}
static struct tty_ldisc_ops n_tty_ops = {
.owner = THIS_MODULE,
.num = N_TTY,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
.flush_buffer = n_tty_flush_buffer,
.read = n_tty_read,
.write = n_tty_write,
.ioctl = n_tty_ioctl,
.set_termios = n_tty_set_termios,
.poll = n_tty_poll,
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
.lookahead_buf = n_tty_lookahead_flow_ctrl,
};
/**
* n_tty_inherit_ops - inherit N_TTY methods
* @ops: struct tty_ldisc_ops where to save N_TTY methods
*
* Enables a 'subclass' line discipline to 'inherit' N_TTY methods.
*/
void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = n_tty_ops;
ops->owner = NULL;
}
EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
void __init n_tty_init(void)
{
tty_register_ldisc(&n_tty_ops);
}
| linux-master | drivers/tty/n_tty.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/semaphore.h>
#include <linux/sched.h>
#include "tty.h"
/* Legacy tty mutex glue */
/*
* Getting the big tty mutex.
*/
void tty_lock(struct tty_struct *tty)
{
tty_kref_get(tty);
mutex_lock(&tty->legacy_mutex);
}
EXPORT_SYMBOL(tty_lock);
int tty_lock_interruptible(struct tty_struct *tty)
{
int ret;
tty_kref_get(tty);
ret = mutex_lock_interruptible(&tty->legacy_mutex);
if (ret)
tty_kref_put(tty);
return ret;
}
void tty_unlock(struct tty_struct *tty)
{
mutex_unlock(&tty->legacy_mutex);
tty_kref_put(tty);
}
EXPORT_SYMBOL(tty_unlock);
void tty_lock_slave(struct tty_struct *tty)
{
if (tty && tty != tty->link)
tty_lock(tty);
}
void tty_unlock_slave(struct tty_struct *tty)
{
if (tty && tty != tty->link)
tty_unlock(tty);
}
void tty_set_lock_subclass(struct tty_struct *tty)
{
lockdep_set_subclass(&tty->legacy_mutex, TTY_LOCK_SLAVE);
}
| linux-master | drivers/tty/tty_mutex.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial driver for the amiga builtin port.
*
* This code was created by taking serial.c version 4.30 from kernel
* release 2.3.22, replacing all hardware related stuff with the
* corresponding amiga hardware actions, and removing all irrelevant
* code. As a consequence, it uses many of the constants and names
* associated with the registers and bits of 16550 compatible UARTS -
* but only to keep track of status, etc in the state variables. It
* was done this was to make it easier to keep the code in line with
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
* therefore other ports should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997,
* 1998, 1999 Theodore Ts'o
*
*/
/* Set of debugging defines */
#undef SERIAL_DEBUG_INTR
#undef SERIAL_DEBUG_OPEN
#undef SERIAL_DEBUG_FLOW
#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
/*
* End of serial driver configuration section.
*/
#include <linux/bitops.h>
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/serial_core.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/tty_flip.h>
#include <linux/tty.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
#include <asm/setup.h>
struct serial_state {
struct tty_port tport;
struct circ_buf xmit;
struct async_icount icount;
unsigned long port;
int baud_base;
int custom_divisor;
int read_status_mask;
int ignore_status_mask;
int timeout;
int quot;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
int x_char; /* xon/xoff character */
};
static struct tty_driver *serial_driver;
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
#define XMIT_FIFO_SIZE 1
static unsigned char current_ctl_bits;
static void change_speed(struct tty_struct *tty, struct serial_state *info,
const struct ktermios *old);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
static struct serial_state serial_state;
/* some serial hardware definitions */
#define SDR_OVRUN (1<<15)
#define SDR_RBF (1<<14)
#define SDR_TBE (1<<13)
#define SDR_TSRE (1<<12)
#define SERPER_PARENB (1<<15)
#define AC_SETCLR (1<<15)
#define AC_UARTBRK (1<<11)
#define SER_DTR (1<<7)
#define SER_RTS (1<<6)
#define SER_DCD (1<<5)
#define SER_CTS (1<<4)
#define SER_DSR (1<<3)
static __inline__ void rtsdtr_ctrl(int bits)
{
ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR));
}
/*
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
* This routines are called before setting or resetting tty->flow.stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
static void rs_stop(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
local_irq_save(flags);
if (info->IER & UART_IER_THRI) {
info->IER &= ~UART_IER_THRI;
/* disable Tx interrupt and remove any pending interrupts */
amiga_custom.intena = IF_TBE;
mb();
amiga_custom.intreq = IF_TBE;
mb();
}
local_irq_restore(flags);
}
static void rs_start(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
local_irq_save(flags);
if (info->xmit.head != info->xmit.tail
&& info->xmit.buf
&& !(info->IER & UART_IER_THRI)) {
info->IER |= UART_IER_THRI;
amiga_custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
amiga_custom.intreq = IF_SETCLR | IF_TBE;
mb();
}
local_irq_restore(flags);
}
/*
* ----------------------------------------------------------------------
*
* Here start the interrupt handling routines.
*
* -----------------------------------------------------------------------
*/
static void receive_chars(struct serial_state *info)
{
int status;
int serdatr;
unsigned char ch, flag;
struct async_icount *icount;
int oe = 0;
icount = &info->icount;
status = UART_LSR_DR; /* We obviously have a character! */
serdatr = amiga_custom.serdatr;
mb();
amiga_custom.intreq = IF_RBF;
mb();
if((serdatr & 0x1ff) == 0)
status |= UART_LSR_BI;
if(serdatr & SDR_OVRUN)
status |= UART_LSR_OE;
ch = serdatr & 0xff;
icount->rx++;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, status);
#endif
flag = TTY_NORMAL;
/*
* We don't handle parity or frame errors - but I have left
* the code in, since I'm not sure that the errors can't be
* detected.
*/
if (status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE)) {
/*
* For statistics only
*/
if (status & UART_LSR_BI) {
status &= ~(UART_LSR_FE | UART_LSR_PE);
icount->brk++;
} else if (status & UART_LSR_PE)
icount->parity++;
else if (status & UART_LSR_FE)
icount->frame++;
if (status & UART_LSR_OE)
icount->overrun++;
/*
* Now check to see if character should be
* ignored, and mask off conditions which
* should be ignored.
*/
if (status & info->ignore_status_mask)
goto out;
status &= info->read_status_mask;
if (status & (UART_LSR_BI)) {
#ifdef SERIAL_DEBUG_INTR
printk("handling break....");
#endif
flag = TTY_BREAK;
if (info->tport.flags & ASYNC_SAK)
do_SAK(info->tport.tty);
} else if (status & UART_LSR_PE)
flag = TTY_PARITY;
else if (status & UART_LSR_FE)
flag = TTY_FRAME;
if (status & UART_LSR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
oe = 1;
}
}
tty_insert_flip_char(&info->tport, ch, flag);
if (oe == 1)
tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(&info->tport);
out:
return;
}
static void transmit_chars(struct serial_state *info)
{
amiga_custom.intreq = IF_TBE;
mb();
if (info->x_char) {
amiga_custom.serdat = info->x_char | 0x100;
mb();
info->icount.tx++;
info->x_char = 0;
return;
}
if (info->xmit.head == info->xmit.tail
|| info->tport.tty->flow.stopped
|| info->tport.tty->hw_stopped) {
info->IER &= ~UART_IER_THRI;
amiga_custom.intena = IF_TBE;
mb();
return;
}
amiga_custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
info->xmit.tail = info->xmit.tail & (UART_XMIT_SIZE - 1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
UART_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
#endif
if (info->xmit.head == info->xmit.tail) {
amiga_custom.intena = IF_TBE;
mb();
info->IER &= ~UART_IER_THRI;
}
}
static void check_modem_status(struct serial_state *info)
{
struct tty_port *port = &info->tport;
unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
unsigned char dstatus;
struct async_icount *icount;
/* Determine bits that have changed */
dstatus = status ^ current_ctl_bits;
current_ctl_bits = status;
if (dstatus) {
icount = &info->icount;
/* update input line counters */
if (dstatus & SER_DSR)
icount->dsr++;
if (dstatus & SER_DCD) {
icount->dcd++;
}
if (dstatus & SER_CTS)
icount->cts++;
wake_up_interruptible(&port->delta_msr_wait);
}
if (tty_port_check_carrier(port) && (dstatus & SER_DCD)) {
#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
printk("ttyS%d CD now %s...", info->line,
(!(status & SER_DCD)) ? "on" : "off");
#endif
if (!(status & SER_DCD))
wake_up_interruptible(&port->open_wait);
else {
#ifdef SERIAL_DEBUG_OPEN
printk("doing serial hangup...");
#endif
if (port->tty)
tty_hangup(port->tty);
}
}
if (tty_port_cts_enabled(port)) {
if (port->tty->hw_stopped) {
if (!(status & SER_CTS)) {
#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
printk("CTS tx start...");
#endif
port->tty->hw_stopped = false;
info->IER |= UART_IER_THRI;
amiga_custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
amiga_custom.intreq = IF_SETCLR | IF_TBE;
mb();
tty_wakeup(port->tty);
return;
}
} else {
if ((status & SER_CTS)) {
#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
printk("CTS tx stop...");
#endif
port->tty->hw_stopped = true;
info->IER &= ~UART_IER_THRI;
/* disable Tx interrupt and remove any pending interrupts */
amiga_custom.intena = IF_TBE;
mb();
amiga_custom.intreq = IF_TBE;
mb();
}
}
}
}
static irqreturn_t ser_vbl_int( int irq, void *data)
{
/* vbl is just a periodic interrupt we tie into to update modem status */
struct serial_state *info = data;
/*
* TBD - is it better to unregister from this interrupt or to
* ignore it if MSI is clear ?
*/
if(info->IER & UART_IER_MSI)
check_modem_status(info);
return IRQ_HANDLED;
}
static irqreturn_t ser_rx_int(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
#ifdef SERIAL_DEBUG_INTR
printk("ser_rx_int...");
#endif
if (!info->tport.tty)
return IRQ_NONE;
receive_chars(info);
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
return IRQ_HANDLED;
}
static irqreturn_t ser_tx_int(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
if (amiga_custom.serdatr & SDR_TBE) {
#ifdef SERIAL_DEBUG_INTR
printk("ser_tx_int...");
#endif
if (!info->tport.tty)
return IRQ_NONE;
transmit_chars(info);
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
}
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
/*
* ---------------------------------------------------------------
* Low level utility subroutines for the serial driver: routines to
* figure out the appropriate timeout for an interrupt chain, routines
* to initialize and startup a serial port, and routines to shutdown a
* serial port. Useful stuff like that.
* ---------------------------------------------------------------
*/
static int startup(struct tty_struct *tty, struct serial_state *info)
{
struct tty_port *port = &info->tport;
unsigned long flags;
int retval=0;
unsigned long page;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
local_irq_save(flags);
if (tty_port_initialized(port)) {
free_page(page);
goto errout;
}
if (info->xmit.buf)
free_page(page);
else
info->xmit.buf = (unsigned char *) page;
#ifdef SERIAL_DEBUG_OPEN
printk("starting up ttys%d ...", info->line);
#endif
/* Clear anything in the input buffer */
amiga_custom.intreq = IF_RBF;
mb();
retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info);
if (retval) {
if (capable(CAP_SYS_ADMIN)) {
set_bit(TTY_IO_ERROR, &tty->flags);
retval = 0;
}
goto errout;
}
/* enable both Rx and Tx interrupts */
amiga_custom.intena = IF_SETCLR | IF_RBF | IF_TBE;
mb();
info->IER = UART_IER_MSI;
/* remember current state of the DCD and CTS bits */
current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
info->MCR = 0;
if (C_BAUD(tty))
info->MCR = SER_DTR | SER_RTS;
rtsdtr_ctrl(info->MCR);
clear_bit(TTY_IO_ERROR, &tty->flags);
info->xmit.head = info->xmit.tail = 0;
/*
* and set the speed of the serial port
*/
change_speed(tty, info, NULL);
tty_port_set_initialized(port, true);
local_irq_restore(flags);
return 0;
errout:
local_irq_restore(flags);
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void shutdown(struct tty_struct *tty, struct serial_state *info)
{
unsigned long flags;
if (!tty_port_initialized(&info->tport))
return;
#ifdef SERIAL_DEBUG_OPEN
printk("Shutting down serial port %d ....\n", info->line);
#endif
local_irq_save(flags); /* Disable interrupts */
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
*/
wake_up_interruptible(&info->tport.delta_msr_wait);
/*
* Free the IRQ, if necessary
*/
free_irq(IRQ_AMIGA_VERTB, info);
free_page((unsigned long)info->xmit.buf);
info->xmit.buf = NULL;
info->IER = 0;
amiga_custom.intena = IF_RBF | IF_TBE;
mb();
/* disable break condition */
amiga_custom.adkcon = AC_UARTBRK;
mb();
if (C_HUPCL(tty))
info->MCR &= ~(SER_DTR|SER_RTS);
rtsdtr_ctrl(info->MCR);
set_bit(TTY_IO_ERROR, &tty->flags);
tty_port_set_initialized(&info->tport, false);
local_irq_restore(flags);
}
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
static void change_speed(struct tty_struct *tty, struct serial_state *info,
const struct ktermios *old_termios)
{
struct tty_port *port = &info->tport;
int quot = 0, baud_base, baud;
unsigned cflag, cval = 0;
int bits;
unsigned long flags;
cflag = tty->termios.c_cflag;
/* Byte size is always 8 bits plus parity bit if requested */
cval = 3; bits = 10;
if (cflag & CSTOPB) {
cval |= 0x04;
bits++;
}
if (cflag & PARENB) {
cval |= UART_LCR_PARITY;
bits++;
}
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600; /* B0 transition handled in rs_set_termios */
baud_base = info->baud_base;
if (baud == 38400 && (port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
quot = info->custom_divisor;
else {
if (baud == 134)
/* Special case since 134 is really 134.5 */
quot = (2*baud_base / 269);
else if (baud)
quot = baud_base / baud;
}
/* If the quotient is zero refuse the change */
if (!quot && old_termios) {
/* FIXME: Will need updating for new tty in the end */
tty->termios.c_cflag &= ~CBAUD;
tty->termios.c_cflag |= (old_termios->c_cflag & CBAUD);
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
if (baud == 38400 &&
(port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
quot = info->custom_divisor;
else {
if (baud == 134)
/* Special case since 134 is really 134.5 */
quot = (2*baud_base / 269);
else if (baud)
quot = baud_base / baud;
}
}
/* As a last resort, if the quotient is zero, default to 9600 bps */
if (!quot)
quot = baud_base / 9600;
info->quot = quot;
info->timeout = (XMIT_FIFO_SIZE*HZ*bits*quot) / baud_base;
info->timeout += HZ/50; /* Add .02 seconds of slop */
/* CTS flow control flag and modem status interrupts */
info->IER &= ~UART_IER_MSI;
if (port->flags & ASYNC_HARDPPS_CD)
info->IER |= UART_IER_MSI;
tty_port_set_cts_flow(port, cflag & CRTSCTS);
if (cflag & CRTSCTS)
info->IER |= UART_IER_MSI;
tty_port_set_check_carrier(port, ~cflag & CLOCAL);
if (~cflag & CLOCAL)
info->IER |= UART_IER_MSI;
/* TBD:
* Does clearing IER_MSI imply that we should disable the VBL interrupt ?
*/
/*
* Set up parity check flag
*/
info->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (I_INPCK(tty))
info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (I_BRKINT(tty) || I_PARMRK(tty))
info->read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
info->ignore_status_mask = 0;
if (I_IGNPAR(tty))
info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (I_IGNBRK(tty)) {
info->ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (I_IGNPAR(tty))
info->ignore_status_mask |= UART_LSR_OE;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((cflag & CREAD) == 0)
info->ignore_status_mask |= UART_LSR_DR;
local_irq_save(flags);
{
short serper;
/* Set up the baud rate */
serper = quot - 1;
/* Enable or disable parity bit */
if(cval & UART_LCR_PARITY)
serper |= (SERPER_PARENB);
amiga_custom.serper = serper;
mb();
}
local_irq_restore(flags);
}
static int rs_put_char(struct tty_struct *tty, u8 ch)
{
struct serial_state *info;
unsigned long flags;
info = tty->driver_data;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
UART_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
info->xmit.head &= UART_XMIT_SIZE - 1;
local_irq_restore(flags);
return 1;
}
static void rs_flush_chars(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (info->xmit.head == info->xmit.tail
|| tty->flow.stopped
|| tty->hw_stopped
|| !info->xmit.buf)
return;
local_irq_save(flags);
info->IER |= UART_IER_THRI;
amiga_custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
amiga_custom.intreq = IF_SETCLR | IF_TBE;
mb();
local_irq_restore(flags);
}
static ssize_t rs_write(struct tty_struct * tty, const u8 *buf, size_t count)
{
int c, ret = 0;
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = (info->xmit.head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
if (info->xmit.head != info->xmit.tail
&& !tty->flow.stopped
&& !tty->hw_stopped
&& !(info->IER & UART_IER_THRI)) {
info->IER |= UART_IER_THRI;
local_irq_disable();
amiga_custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
amiga_custom.intreq = IF_SETCLR | IF_TBE;
mb();
local_irq_restore(flags);
}
return ret;
}
static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
return CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
/* Check this ! */
local_irq_save(flags);
if(!(amiga_custom.intenar & IF_TBE)) {
amiga_custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
amiga_custom.intreq = IF_SETCLR | IF_TBE;
mb();
}
local_irq_restore(flags);
info->IER |= UART_IER_THRI;
}
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void rs_throttle(struct tty_struct * tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
printk("throttle %s ....\n", tty_name(tty));
#endif
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
if (C_CRTSCTS(tty))
info->MCR &= ~SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
static void rs_unthrottle(struct tty_struct * tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
printk("unthrottle %s ....\n", tty_name(tty));
#endif
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
if (C_CRTSCTS(tty))
info->MCR |= SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/*
* ------------------------------------------------------------
* rs_ioctl() and friends
* ------------------------------------------------------------
*/
static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct serial_state *state = tty->driver_data;
unsigned int close_delay, closing_wait;
tty_lock(tty);
close_delay = jiffies_to_msecs(state->tport.close_delay) / 10;
closing_wait = state->tport.closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = jiffies_to_msecs(closing_wait) / 10;
ss->line = tty->index;
ss->port = state->port;
ss->flags = state->tport.flags;
ss->xmit_fifo_size = XMIT_FIFO_SIZE;
ss->baud_base = state->baud_base;
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
ss->custom_divisor = state->custom_divisor;
tty_unlock(tty);
return 0;
}
static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
bool change_spd;
int retval = 0;
unsigned int close_delay, closing_wait;
tty_lock(tty);
change_spd = ((ss->flags ^ port->flags) & ASYNC_SPD_MASK) ||
ss->custom_divisor != state->custom_divisor;
if (ss->irq || ss->port != state->port ||
ss->xmit_fifo_size != XMIT_FIFO_SIZE) {
tty_unlock(tty);
return -EINVAL;
}
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait;
if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
closing_wait = msecs_to_jiffies(closing_wait * 10);
if (!capable(CAP_SYS_ADMIN)) {
if ((ss->baud_base != state->baud_base) ||
(close_delay != port->close_delay) ||
(closing_wait != port->closing_wait) ||
((ss->flags & ~ASYNC_USR_MASK) !=
(port->flags & ~ASYNC_USR_MASK))) {
tty_unlock(tty);
return -EPERM;
}
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
(ss->flags & ASYNC_USR_MASK));
state->custom_divisor = ss->custom_divisor;
goto check_and_exit;
}
if (ss->baud_base < 9600) {
tty_unlock(tty);
return -EINVAL;
}
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
state->baud_base = ss->baud_base;
port->flags = ((port->flags & ~ASYNC_FLAGS) |
(ss->flags & ASYNC_FLAGS));
state->custom_divisor = ss->custom_divisor;
port->close_delay = close_delay;
port->closing_wait = closing_wait;
check_and_exit:
if (tty_port_initialized(port)) {
if (change_spd) {
/* warn about deprecation unless clearing */
if (ss->flags & ASYNC_SPD_MASK)
dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
change_speed(tty, state, NULL);
}
} else
retval = startup(tty, state);
tty_unlock(tty);
return retval;
}
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int get_lsr_info(struct serial_state *info, unsigned int __user *value)
{
unsigned char status;
unsigned int result;
unsigned long flags;
local_irq_save(flags);
status = amiga_custom.serdatr;
mb();
local_irq_restore(flags);
result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0);
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
static int rs_tiocmget(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned char control, status;
unsigned long flags;
if (tty_io_error(tty))
return -EIO;
control = info->MCR;
local_irq_save(flags);
status = ciab.pra;
local_irq_restore(flags);
return ((control & SER_RTS) ? TIOCM_RTS : 0)
| ((control & SER_DTR) ? TIOCM_DTR : 0)
| (!(status & SER_DCD) ? TIOCM_CAR : 0)
| (!(status & SER_DSR) ? TIOCM_DSR : 0)
| (!(status & SER_CTS) ? TIOCM_CTS : 0);
}
static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (tty_io_error(tty))
return -EIO;
local_irq_save(flags);
if (set & TIOCM_RTS)
info->MCR |= SER_RTS;
if (set & TIOCM_DTR)
info->MCR |= SER_DTR;
if (clear & TIOCM_RTS)
info->MCR &= ~SER_RTS;
if (clear & TIOCM_DTR)
info->MCR &= ~SER_DTR;
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
return 0;
}
/*
* rs_break() --- routine which turns the break handling on or off
*/
static int rs_break(struct tty_struct *tty, int break_state)
{
unsigned long flags;
local_irq_save(flags);
if (break_state == -1)
amiga_custom.adkcon = AC_SETCLR | AC_UARTBRK;
else
amiga_custom.adkcon = AC_UARTBRK;
mb();
local_irq_restore(flags);
return 0;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int rs_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct serial_state *info = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
local_irq_save(flags);
cnow = info->icount;
local_irq_restore(flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
static int rs_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct serial_state *info = tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
void __user *argp = (void __user *)arg;
unsigned long flags;
DEFINE_WAIT(wait);
int ret;
if ((cmd != TIOCSERCONFIG) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty_io_error(tty))
return -EIO;
}
switch (cmd) {
case TIOCSERCONFIG:
return 0;
case TIOCSERGETLSR: /* Get line status register */
return get_lsr_info(info, argp);
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
local_irq_save(flags);
/* note the counters on entry */
cprev = info->icount;
local_irq_restore(flags);
while (1) {
prepare_to_wait(&info->tport.delta_msr_wait,
&wait, TASK_INTERRUPTIBLE);
local_irq_save(flags);
cnow = info->icount; /* atomic copy */
local_irq_restore(flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
ret = -EIO; /* no change => error */
break;
}
if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
ret = 0;
break;
}
schedule();
/* see if a signal did it */
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cprev = cnow;
}
finish_wait(&info->tport.delta_msr_wait, &wait);
return ret;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static void rs_set_termios(struct tty_struct *tty, const struct ktermios *old_termios)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
change_speed(tty, info, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
info->MCR &= ~(SER_DTR|SER_RTS);
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
info->MCR |= SER_DTR;
if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->MCR |= SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = false;
rs_start(tty);
}
#if 0
/*
* No need to wake up processes in open wait, since they
* sample the CLOCAL flag once, and don't recheck it.
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
wake_up_interruptible(&info->open_wait);
#endif
}
/*
* ------------------------------------------------------------
* rs_close()
*
* This routine is called when the serial port gets closed. First, we
* wait for the last remaining data to be sent. Then, we unlink its
* async structure from the interrupt chain if necessary, and we free
* that IRQ if nothing is left in the chain.
* ------------------------------------------------------------
*/
static void rs_close(struct tty_struct *tty, struct file * filp)
{
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
if (tty_port_close_start(port, tty, filp) == 0)
return;
/*
* At this point we stop accepting input. To do this, we
* disable the receive line status interrupts, and tell the
* interrupt driver to stop checking the data ready bit in the
* line status register.
*/
state->read_status_mask &= ~UART_LSR_DR;
if (tty_port_initialized(port)) {
/* disable receive interrupts */
amiga_custom.intena = IF_RBF;
mb();
/* clear any pending receive interrupt */
amiga_custom.intreq = IF_RBF;
mb();
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
rs_wait_until_sent(tty, state->timeout);
}
shutdown(tty, state);
rs_flush_buffer(tty);
tty_ldisc_flush(tty);
port->tty = NULL;
tty_port_close_end(port, tty);
}
/*
* rs_wait_until_sent() --- wait until the transmitter is empty
*/
static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct serial_state *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
int lsr;
orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (info->timeout - HZ/50) / XMIT_FIFO_SIZE;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
if (timeout)
char_time = min_t(unsigned long, char_time, timeout);
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than info->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*info->timeout.
*/
if (!timeout || timeout > 2*info->timeout)
timeout = 2*info->timeout;
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time);
printk("jiff=%lu...", jiffies);
#endif
while(!((lsr = amiga_custom.serdatr) & SDR_TSRE)) {
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("serdatr = %d (jiff=%lu)...", lsr, jiffies);
#endif
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
__set_current_state(TASK_RUNNING);
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
}
/*
* rs_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
tty_port_set_active(&info->tport, false);
info->tport.tty = NULL;
wake_up_interruptible(&info->tport.open_wait);
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int rs_open(struct tty_struct *tty, struct file * filp)
{
struct tty_port *port = tty->port;
struct serial_state *info = container_of(port, struct serial_state,
tport);
int retval;
port->count++;
port->tty = tty;
tty->driver_data = info;
retval = startup(tty, info);
if (retval) {
return retval;
}
return tty_port_block_til_ready(port, tty, filp);
}
/*
* /proc fs routines....
*/
static inline void line_info(struct seq_file *m, int line,
struct serial_state *state)
{
char stat_buf[30], control, status;
unsigned long flags;
seq_printf(m, "%d: uart:amiga_builtin", line);
local_irq_save(flags);
status = ciab.pra;
control = tty_port_initialized(&state->tport) ? state->MCR : status;
local_irq_restore(flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
if(!(control & SER_RTS))
strcat(stat_buf, "|RTS");
if(!(status & SER_CTS))
strcat(stat_buf, "|CTS");
if(!(control & SER_DTR))
strcat(stat_buf, "|DTR");
if(!(status & SER_DSR))
strcat(stat_buf, "|DSR");
if(!(status & SER_DCD))
strcat(stat_buf, "|CD");
if (state->quot)
seq_printf(m, " baud:%d", state->baud_base / state->quot);
seq_printf(m, " tx:%d rx:%d", state->icount.tx, state->icount.rx);
if (state->icount.frame)
seq_printf(m, " fe:%d", state->icount.frame);
if (state->icount.parity)
seq_printf(m, " pe:%d", state->icount.parity);
if (state->icount.brk)
seq_printf(m, " brk:%d", state->icount.brk);
if (state->icount.overrun)
seq_printf(m, " oe:%d", state->icount.overrun);
/*
* Last thing is the RS-232 status lines
*/
seq_printf(m, " %s\n", stat_buf+1);
}
static int rs_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "serinfo:1.0 driver:4.30\n");
line_info(m, 0, &serial_state);
return 0;
}
/*
* ---------------------------------------------------------------------
* rs_init() and friends
*
* rs_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
static const struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.set_termios = rs_set_termios,
.stop = rs_stop,
.start = rs_start,
.hangup = rs_hangup,
.break_ctl = rs_break,
.send_xchar = rs_send_xchar,
.wait_until_sent = rs_wait_until_sent,
.tiocmget = rs_tiocmget,
.tiocmset = rs_tiocmset,
.get_icount = rs_get_icount,
.set_serial = set_serial_info,
.get_serial = get_serial_info,
.proc_show = rs_proc_show,
};
static bool amiga_carrier_raised(struct tty_port *port)
{
return !(ciab.pra & SER_DCD);
}
static void amiga_dtr_rts(struct tty_port *port, bool active)
{
struct serial_state *info = container_of(port, struct serial_state,
tport);
unsigned long flags;
if (active)
info->MCR |= SER_DTR|SER_RTS;
else
info->MCR &= ~(SER_DTR|SER_RTS);
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
static const struct tty_port_operations amiga_port_ops = {
.carrier_raised = amiga_carrier_raised,
.dtr_rts = amiga_dtr_rts,
};
/*
* The serial driver boot-time initialization code!
*/
static int __init amiga_serial_probe(struct platform_device *pdev)
{
struct serial_state *state = &serial_state;
struct tty_driver *driver;
unsigned long flags;
int error;
driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
/* Initialize the tty_driver structure */
driver->driver_name = "amiserial";
driver->name = "ttyS";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SERIAL;
driver->subtype = SERIAL_TYPE_NORMAL;
driver->init_termios = tty_std_termios;
driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
tty_set_operations(driver, &serial_ops);
memset(state, 0, sizeof(*state));
state->port = (int)&amiga_custom.serdatr; /* Just to give it a value */
tty_port_init(&state->tport);
state->tport.ops = &amiga_port_ops;
tty_port_link_device(&state->tport, driver, 0);
error = tty_register_driver(driver);
if (error)
goto fail_tty_driver_kref_put;
printk(KERN_INFO "ttyS0 is the amiga builtin serial port\n");
/* Hardware set up */
state->baud_base = amiga_colorclock;
/* set ISRs, and then disable the rx interrupts */
error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state);
if (error)
goto fail_unregister;
error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, 0,
"serial RX", state);
if (error)
goto fail_free_irq;
local_irq_save(flags);
/* turn off Rx and Tx interrupts */
amiga_custom.intena = IF_RBF | IF_TBE;
mb();
/* clear any pending interrupt */
amiga_custom.intreq = IF_RBF | IF_TBE;
mb();
local_irq_restore(flags);
/*
* set the appropriate directions for the modem control flags,
* and clear RTS and DTR
*/
ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
platform_set_drvdata(pdev, state);
serial_driver = driver;
return 0;
fail_free_irq:
free_irq(IRQ_AMIGA_TBE, state);
fail_unregister:
tty_unregister_driver(driver);
fail_tty_driver_kref_put:
tty_port_destroy(&state->tport);
tty_driver_kref_put(driver);
return error;
}
static int __exit amiga_serial_remove(struct platform_device *pdev)
{
struct serial_state *state = platform_get_drvdata(pdev);
tty_unregister_driver(serial_driver);
tty_driver_kref_put(serial_driver);
tty_port_destroy(&state->tport);
free_irq(IRQ_AMIGA_TBE, state);
free_irq(IRQ_AMIGA_RBF, state);
return 0;
}
static struct platform_driver amiga_serial_driver = {
.remove = __exit_p(amiga_serial_remove),
.driver = {
.name = "amiga-serial",
},
};
module_platform_driver_probe(amiga_serial_driver, amiga_serial_probe);
#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
/*
* ------------------------------------------------------------
* Serial console driver
* ------------------------------------------------------------
*/
static void amiga_serial_putc(char c)
{
amiga_custom.serdat = (unsigned char)c | 0x100;
while (!(amiga_custom.serdatr & 0x2000))
barrier();
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console must be locked when we get here.
*/
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
unsigned short intena = amiga_custom.intenar;
amiga_custom.intena = IF_TBE;
while (count--) {
if (*s == '\n')
amiga_serial_putc('\r');
amiga_serial_putc(*s++);
}
amiga_custom.intena = IF_SETCLR | (intena & IF_TBE);
}
static struct tty_driver *serial_console_device(struct console *c, int *index)
{
*index = 0;
return serial_driver;
}
static struct console sercons = {
.name = "ttyS",
.write = serial_console_write,
.device = serial_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/*
* Register console.
*/
static int __init amiserial_console_init(void)
{
if (!MACH_IS_AMIGA)
return -ENODEV;
register_console(&sercons);
return 0;
}
console_initcall(amiserial_console_init);
#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:amiga-serial");
| linux-master | drivers/tty/amiserial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TTY driver for MIPS EJTAG Fast Debug Channels.
*
* Copyright (C) 2007-2015 Imagination Technologies Ltd
*/
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/uaccess.h>
#include <asm/cdmm.h>
#include <asm/irq.h>
/* Register offsets */
#define REG_FDACSR 0x00 /* FDC Access Control and Status Register */
#define REG_FDCFG 0x08 /* FDC Configuration Register */
#define REG_FDSTAT 0x10 /* FDC Status Register */
#define REG_FDRX 0x18 /* FDC Receive Register */
#define REG_FDTX(N) (0x20+0x8*(N)) /* FDC Transmit Register n (0..15) */
/* Register fields */
#define REG_FDCFG_TXINTTHRES_SHIFT 18
#define REG_FDCFG_TXINTTHRES (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
#define REG_FDCFG_TXINTTHRES_DISABLED (0x0 << REG_FDCFG_TXINTTHRES_SHIFT)
#define REG_FDCFG_TXINTTHRES_EMPTY (0x1 << REG_FDCFG_TXINTTHRES_SHIFT)
#define REG_FDCFG_TXINTTHRES_NOTFULL (0x2 << REG_FDCFG_TXINTTHRES_SHIFT)
#define REG_FDCFG_TXINTTHRES_NEAREMPTY (0x3 << REG_FDCFG_TXINTTHRES_SHIFT)
#define REG_FDCFG_RXINTTHRES_SHIFT 16
#define REG_FDCFG_RXINTTHRES (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
#define REG_FDCFG_RXINTTHRES_DISABLED (0x0 << REG_FDCFG_RXINTTHRES_SHIFT)
#define REG_FDCFG_RXINTTHRES_FULL (0x1 << REG_FDCFG_RXINTTHRES_SHIFT)
#define REG_FDCFG_RXINTTHRES_NOTEMPTY (0x2 << REG_FDCFG_RXINTTHRES_SHIFT)
#define REG_FDCFG_RXINTTHRES_NEARFULL (0x3 << REG_FDCFG_RXINTTHRES_SHIFT)
#define REG_FDCFG_TXFIFOSIZE_SHIFT 8
#define REG_FDCFG_TXFIFOSIZE (0xff << REG_FDCFG_TXFIFOSIZE_SHIFT)
#define REG_FDCFG_RXFIFOSIZE_SHIFT 0
#define REG_FDCFG_RXFIFOSIZE (0xff << REG_FDCFG_RXFIFOSIZE_SHIFT)
#define REG_FDSTAT_TXCOUNT_SHIFT 24
#define REG_FDSTAT_TXCOUNT (0xff << REG_FDSTAT_TXCOUNT_SHIFT)
#define REG_FDSTAT_RXCOUNT_SHIFT 16
#define REG_FDSTAT_RXCOUNT (0xff << REG_FDSTAT_RXCOUNT_SHIFT)
#define REG_FDSTAT_RXCHAN_SHIFT 4
#define REG_FDSTAT_RXCHAN (0xf << REG_FDSTAT_RXCHAN_SHIFT)
#define REG_FDSTAT_RXE BIT(3) /* Rx Empty */
#define REG_FDSTAT_RXF BIT(2) /* Rx Full */
#define REG_FDSTAT_TXE BIT(1) /* Tx Empty */
#define REG_FDSTAT_TXF BIT(0) /* Tx Full */
/* Default channel for the early console */
#define CONSOLE_CHANNEL 1
#define NUM_TTY_CHANNELS 16
#define RX_BUF_SIZE 1024
/*
* When the IRQ is unavailable, the FDC state must be polled for incoming data
* and space becoming available in TX FIFO.
*/
#define FDC_TTY_POLL (HZ / 50)
struct mips_ejtag_fdc_tty;
/**
* struct mips_ejtag_fdc_tty_port - Wrapper struct for FDC tty_port.
* @port: TTY port data
* @driver: TTY driver.
* @rx_lock: Lock for rx_buf.
* This protects between the hard interrupt and user
* context. It's also held during read SWITCH operations.
* @rx_buf: Read buffer.
* @xmit_lock: Lock for xmit_*, and port.xmit_buf.
* This protects between user context and kernel thread.
* It is used from chars_in_buffer()/write_room() TTY
* callbacks which are used during wait operations, so a
* mutex is unsuitable.
* @xmit_cnt: Size of xmit buffer contents.
* @xmit_head: Head of xmit buffer where data is written.
* @xmit_tail: Tail of xmit buffer where data is read.
* @xmit_empty: Completion for xmit buffer being empty.
*/
struct mips_ejtag_fdc_tty_port {
struct tty_port port;
struct mips_ejtag_fdc_tty *driver;
raw_spinlock_t rx_lock;
void *rx_buf;
spinlock_t xmit_lock;
unsigned int xmit_cnt;
unsigned int xmit_head;
unsigned int xmit_tail;
struct completion xmit_empty;
};
/**
* struct mips_ejtag_fdc_tty - Driver data for FDC as a whole.
* @dev: FDC device (for dev_*() logging).
* @driver: TTY driver.
* @cpu: CPU number for this FDC.
* @fdc_name: FDC name (not for base of channel names).
* @driver_name: Base of driver name.
* @ports: Per-channel data.
* @waitqueue: Wait queue for waiting for TX data, or for space in TX
* FIFO.
* @lock: Lock to protect FDCFG (interrupt enable).
* @thread: KThread for writing out data to FDC.
* @reg: FDC registers.
* @tx_fifo: TX FIFO size.
* @xmit_size: Size of each port's xmit buffer.
* @xmit_total: Total number of bytes (from all ports) to transmit.
* @xmit_next: Next port number to transmit from (round robin).
* @xmit_full: Indicates TX FIFO is full, we're waiting for space.
* @irq: IRQ number (negative if no IRQ).
* @removing: Indicates the device is being removed and @poll_timer
* should not be restarted.
* @poll_timer: Timer for polling for interrupt events when @irq < 0.
* @sysrq_pressed: Whether the magic sysrq key combination has been
* detected. See mips_ejtag_fdc_handle().
*/
struct mips_ejtag_fdc_tty {
struct device *dev;
struct tty_driver *driver;
unsigned int cpu;
char fdc_name[16];
char driver_name[16];
struct mips_ejtag_fdc_tty_port ports[NUM_TTY_CHANNELS];
wait_queue_head_t waitqueue;
raw_spinlock_t lock;
struct task_struct *thread;
void __iomem *reg;
u8 tx_fifo;
unsigned int xmit_size;
atomic_t xmit_total;
unsigned int xmit_next;
bool xmit_full;
int irq;
bool removing;
struct timer_list poll_timer;
#ifdef CONFIG_MAGIC_SYSRQ
bool sysrq_pressed;
#endif
};
/* Hardware access */
static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
unsigned int offs, unsigned int data)
{
__raw_writel(data, priv->reg + offs);
}
static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
unsigned int offs)
{
return __raw_readl(priv->reg + offs);
}
/* Encoding of byte stream in FDC words */
/**
* struct fdc_word - FDC word encoding some number of bytes of data.
* @word: Raw FDC word.
* @bytes: Number of bytes encoded by @word.
*/
struct fdc_word {
u32 word;
unsigned int bytes;
};
/*
* This is a compact encoding which allows every 1 byte, 2 byte, and 3 byte
* sequence to be encoded in a single word, while allowing the majority of 4
* byte sequences (including all ASCII and common binary data) to be encoded in
* a single word too.
* _______________________ _____________
* | FDC Word | |
* |31-24|23-16|15-8 | 7-0 | Bytes |
* |_____|_____|_____|_____|_____________|
* | | | | | |
* |0x80 |0x80 |0x80 | WW | WW |
* |0x81 |0x81 | XX | WW | WW XX |
* |0x82 | YY | XX | WW | WW XX YY |
* | ZZ | YY | XX | WW | WW XX YY ZZ |
* |_____|_____|_____|_____|_____________|
*
* Note that the 4-byte encoding can only be used where none of the other 3
* encodings match, otherwise it must fall back to the 3 byte encoding.
*/
/* ranges >= 1 && sizes[0] >= 1 */
static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs,
unsigned int *sizes,
unsigned int ranges)
{
struct fdc_word word = { 0, 0 };
const char **ptrs_end = ptrs + ranges;
for (; ptrs < ptrs_end; ++ptrs) {
const char *ptr = *(ptrs++);
const char *end = ptr + *(sizes++);
for (; ptr < end; ++ptr) {
word.word |= (u8)*ptr << (8*word.bytes);
++word.bytes;
if (word.bytes == 4)
goto done;
}
}
done:
/* Choose the appropriate encoding */
switch (word.bytes) {
case 4:
/* 4 byte encoding, but don't match the 1-3 byte encodings */
if ((word.word >> 8) != 0x808080 &&
(word.word >> 16) != 0x8181 &&
(word.word >> 24) != 0x82)
break;
/* Fall back to a 3 byte encoding */
word.bytes = 3;
word.word &= 0x00ffffff;
fallthrough;
case 3:
/* 3 byte encoding */
word.word |= 0x82000000;
break;
case 2:
/* 2 byte encoding */
word.word |= 0x81810000;
break;
case 1:
/* 1 byte encoding */
word.word |= 0x80808000;
break;
}
return word;
}
static unsigned int mips_ejtag_fdc_decode(u32 word, char *buf)
{
buf[0] = (u8)word;
word >>= 8;
if (word == 0x808080)
return 1;
buf[1] = (u8)word;
word >>= 8;
if (word == 0x8181)
return 2;
buf[2] = (u8)word;
word >>= 8;
if (word == 0x82)
return 3;
buf[3] = (u8)word;
return 4;
}
/* Console operations */
/**
* struct mips_ejtag_fdc_console - Wrapper struct for FDC consoles.
* @cons: Console object.
* @tty_drv: TTY driver associated with this console.
* @lock: Lock to protect concurrent access to other fields.
* This is raw because it may be used very early.
* @initialised: Whether the console is initialised.
* @regs: Registers base address for each CPU.
*/
struct mips_ejtag_fdc_console {
struct console cons;
struct tty_driver *tty_drv;
raw_spinlock_t lock;
bool initialised;
void __iomem *regs[NR_CPUS];
};
/* Low level console write shared by early console and normal console */
static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
unsigned int count)
{
struct mips_ejtag_fdc_console *cons =
container_of(c, struct mips_ejtag_fdc_console, cons);
void __iomem *regs;
struct fdc_word word;
unsigned long flags;
unsigned int i, buf_len, cpu;
bool done_cr = false;
char buf[4];
const char *buf_ptr = buf;
/* Number of bytes of input data encoded up to each byte in buf */
u8 inc[4];
local_irq_save(flags);
cpu = smp_processor_id();
regs = cons->regs[cpu];
/* First console output on this CPU? */
if (!regs) {
regs = mips_cdmm_early_probe(0xfd);
cons->regs[cpu] = regs;
}
/* Already tried and failed to find FDC on this CPU? */
if (IS_ERR(regs))
goto out;
while (count) {
/*
* Copy the next few characters to a buffer so we can inject
* carriage returns before newlines.
*/
for (buf_len = 0, i = 0; buf_len < 4 && i < count; ++buf_len) {
if (s[i] == '\n' && !done_cr) {
buf[buf_len] = '\r';
done_cr = true;
} else {
buf[buf_len] = s[i];
done_cr = false;
++i;
}
inc[buf_len] = i;
}
word = mips_ejtag_fdc_encode(&buf_ptr, &buf_len, 1);
count -= inc[word.bytes - 1];
s += inc[word.bytes - 1];
/* Busy wait until there's space in fifo */
while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
;
__raw_writel(word.word, regs + REG_FDTX(c->index));
}
out:
local_irq_restore(flags);
}
static struct tty_driver *mips_ejtag_fdc_console_device(struct console *c,
int *index)
{
struct mips_ejtag_fdc_console *cons =
container_of(c, struct mips_ejtag_fdc_console, cons);
*index = c->index;
return cons->tty_drv;
}
/* Initialise an FDC console (early or normal */
static int __init mips_ejtag_fdc_console_init(struct mips_ejtag_fdc_console *c)
{
void __iomem *regs;
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&c->lock, flags);
/* Don't init twice */
if (c->initialised)
goto out;
/* Look for the FDC device */
regs = mips_cdmm_early_probe(0xfd);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto out;
}
c->initialised = true;
c->regs[smp_processor_id()] = regs;
register_console(&c->cons);
out:
raw_spin_unlock_irqrestore(&c->lock, flags);
return ret;
}
static struct mips_ejtag_fdc_console mips_ejtag_fdc_con = {
.cons = {
.name = "fdc",
.write = mips_ejtag_fdc_console_write,
.device = mips_ejtag_fdc_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
},
.lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_con.lock),
};
/* TTY RX/TX operations */
/**
* mips_ejtag_fdc_put_chan() - Write out a block of channel data.
* @priv: Pointer to driver private data.
* @chan: Channel number.
*
* Write a single block of data out to the debug adapter. If the circular buffer
* is wrapped then only the first block is written.
*
* Returns: The number of bytes that were written.
*/
static unsigned int mips_ejtag_fdc_put_chan(struct mips_ejtag_fdc_tty *priv,
unsigned int chan)
{
struct mips_ejtag_fdc_tty_port *dport;
struct tty_struct *tty;
const char *ptrs[2];
unsigned int sizes[2] = { 0 };
struct fdc_word word = { .bytes = 0 };
unsigned long flags;
dport = &priv->ports[chan];
spin_lock(&dport->xmit_lock);
if (dport->xmit_cnt) {
ptrs[0] = dport->port.xmit_buf + dport->xmit_tail;
sizes[0] = min_t(unsigned int,
priv->xmit_size - dport->xmit_tail,
dport->xmit_cnt);
ptrs[1] = dport->port.xmit_buf;
sizes[1] = dport->xmit_cnt - sizes[0];
word = mips_ejtag_fdc_encode(ptrs, sizes, 1 + !!sizes[1]);
dev_dbg(priv->dev, "%s%u: out %08x: \"%*pE%*pE\"\n",
priv->driver_name, chan, word.word,
min_t(int, word.bytes, sizes[0]), ptrs[0],
max_t(int, 0, word.bytes - sizes[0]), ptrs[1]);
local_irq_save(flags);
/* Maybe we raced with the console and TX FIFO is full */
if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF)
word.bytes = 0;
else
mips_ejtag_fdc_write(priv, REG_FDTX(chan), word.word);
local_irq_restore(flags);
dport->xmit_cnt -= word.bytes;
if (!dport->xmit_cnt) {
/* Reset pointers to avoid wraps */
dport->xmit_head = 0;
dport->xmit_tail = 0;
complete(&dport->xmit_empty);
} else {
dport->xmit_tail += word.bytes;
if (dport->xmit_tail >= priv->xmit_size)
dport->xmit_tail -= priv->xmit_size;
}
atomic_sub(word.bytes, &priv->xmit_total);
}
spin_unlock(&dport->xmit_lock);
/* If we've made more data available, wake up tty */
if (sizes[0] && word.bytes) {
tty = tty_port_tty_get(&dport->port);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
}
}
return word.bytes;
}
/**
* mips_ejtag_fdc_put() - Kernel thread to write out channel data to FDC.
* @arg: Driver pointer.
*
* This kernel thread runs while @priv->xmit_total != 0, and round robins the
* channels writing out blocks of buffered data to the FDC TX FIFO.
*/
static int mips_ejtag_fdc_put(void *arg)
{
struct mips_ejtag_fdc_tty *priv = arg;
struct mips_ejtag_fdc_tty_port *dport;
unsigned int ret;
u32 cfg;
__set_current_state(TASK_RUNNING);
while (!kthread_should_stop()) {
/* Wait for data to actually write */
wait_event_interruptible(priv->waitqueue,
atomic_read(&priv->xmit_total) ||
kthread_should_stop());
if (kthread_should_stop())
break;
/* Wait for TX FIFO space to write data */
raw_spin_lock_irq(&priv->lock);
if (mips_ejtag_fdc_read(priv, REG_FDSTAT) & REG_FDSTAT_TXF) {
priv->xmit_full = true;
if (priv->irq >= 0) {
/* Enable TX interrupt */
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
cfg &= ~REG_FDCFG_TXINTTHRES;
cfg |= REG_FDCFG_TXINTTHRES_NOTFULL;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
}
}
raw_spin_unlock_irq(&priv->lock);
wait_event_interruptible(priv->waitqueue,
!(mips_ejtag_fdc_read(priv, REG_FDSTAT)
& REG_FDSTAT_TXF) ||
kthread_should_stop());
if (kthread_should_stop())
break;
/* Find next channel with data to output */
for (;;) {
dport = &priv->ports[priv->xmit_next];
spin_lock(&dport->xmit_lock);
ret = dport->xmit_cnt;
spin_unlock(&dport->xmit_lock);
if (ret)
break;
/* Round robin */
++priv->xmit_next;
if (priv->xmit_next >= NUM_TTY_CHANNELS)
priv->xmit_next = 0;
}
/* Try writing data to the chosen channel */
ret = mips_ejtag_fdc_put_chan(priv, priv->xmit_next);
/*
* If anything was output, move on to the next channel so as not
* to starve other channels.
*/
if (ret) {
++priv->xmit_next;
if (priv->xmit_next >= NUM_TTY_CHANNELS)
priv->xmit_next = 0;
}
}
return 0;
}
/**
* mips_ejtag_fdc_handle() - Handle FDC events.
* @priv: Pointer to driver private data.
*
* Handle FDC events, such as new incoming data which needs draining out of the
* RX FIFO and feeding into the appropriate TTY ports, and space becoming
* available in the TX FIFO which would allow more data to be written out.
*/
static void mips_ejtag_fdc_handle(struct mips_ejtag_fdc_tty *priv)
{
struct mips_ejtag_fdc_tty_port *dport;
unsigned int stat, channel, data, cfg, i, flipped;
int len;
char buf[4];
for (;;) {
/* Find which channel the next FDC word is destined for */
stat = mips_ejtag_fdc_read(priv, REG_FDSTAT);
if (stat & REG_FDSTAT_RXE)
break;
channel = (stat & REG_FDSTAT_RXCHAN) >> REG_FDSTAT_RXCHAN_SHIFT;
dport = &priv->ports[channel];
/* Read out the FDC word, decode it, and pass to tty layer */
raw_spin_lock(&dport->rx_lock);
data = mips_ejtag_fdc_read(priv, REG_FDRX);
len = mips_ejtag_fdc_decode(data, buf);
dev_dbg(priv->dev, "%s%u: in %08x: \"%*pE\"\n",
priv->driver_name, channel, data, len, buf);
flipped = 0;
for (i = 0; i < len; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ
#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
/* Support just Ctrl+C with KGDB channel */
if (channel == CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN) {
if (buf[i] == '\x03') { /* ^C */
handle_sysrq('g');
continue;
}
}
#endif
/* Support Ctrl+O for console channel */
if (channel == mips_ejtag_fdc_con.cons.index) {
if (buf[i] == '\x0f') { /* ^O */
priv->sysrq_pressed =
!priv->sysrq_pressed;
if (priv->sysrq_pressed)
continue;
} else if (priv->sysrq_pressed) {
handle_sysrq(buf[i]);
priv->sysrq_pressed = false;
continue;
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
/* Check the port isn't being shut down */
if (!dport->rx_buf)
continue;
flipped += tty_insert_flip_char(&dport->port, buf[i],
TTY_NORMAL);
}
if (flipped)
tty_flip_buffer_push(&dport->port);
raw_spin_unlock(&dport->rx_lock);
}
/* If TX FIFO no longer full we may be able to write more data */
raw_spin_lock(&priv->lock);
if (priv->xmit_full && !(stat & REG_FDSTAT_TXF)) {
priv->xmit_full = false;
/* Disable TX interrupt */
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
cfg &= ~REG_FDCFG_TXINTTHRES;
cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Wait the kthread so it can try writing more data */
wake_up_interruptible(&priv->waitqueue);
}
raw_spin_unlock(&priv->lock);
}
/**
* mips_ejtag_fdc_isr() - Interrupt handler.
* @irq: IRQ number.
* @dev_id: Pointer to driver private data.
*
* This is the interrupt handler, used when interrupts are enabled.
*
* It simply triggers the common FDC handler code.
*
* Returns: IRQ_HANDLED if an FDC interrupt was pending.
* IRQ_NONE otherwise.
*/
static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
{
struct mips_ejtag_fdc_tty *priv = dev_id;
/*
* We're not using proper per-cpu IRQs, so we must be careful not to
* handle IRQs on CPUs we're not interested in.
*
* Ideally proper per-cpu IRQ handlers could be used, but that doesn't
* fit well with the whole sharing of the main CPU IRQ lines. When we
* have something with a GIC that routes the FDC IRQs (i.e. no sharing
* between handlers) then support could be added more easily.
*/
if (smp_processor_id() != priv->cpu)
return IRQ_NONE;
/* If no FDC interrupt pending, it wasn't for us */
if (!(read_c0_cause() & CAUSEF_FDCI))
return IRQ_NONE;
mips_ejtag_fdc_handle(priv);
return IRQ_HANDLED;
}
/**
* mips_ejtag_fdc_tty_timer() - Poll FDC for incoming data.
* @opaque: Pointer to driver private data.
*
* This is the timer handler for when interrupts are disabled and polling the
* FDC state is required.
*
* It simply triggers the common FDC handler code and arranges for further
* polling.
*/
static void mips_ejtag_fdc_tty_timer(struct timer_list *t)
{
struct mips_ejtag_fdc_tty *priv = from_timer(priv, t, poll_timer);
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
}
/* TTY Port operations */
static int mips_ejtag_fdc_tty_port_activate(struct tty_port *port,
struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport =
container_of(port, struct mips_ejtag_fdc_tty_port, port);
void *rx_buf;
/* Allocate the buffer we use for writing data */
if (tty_port_alloc_xmit_buf(port) < 0)
goto err;
/* Allocate the buffer we use for reading data */
rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
if (!rx_buf)
goto err_free_xmit;
raw_spin_lock_irq(&dport->rx_lock);
dport->rx_buf = rx_buf;
raw_spin_unlock_irq(&dport->rx_lock);
return 0;
err_free_xmit:
tty_port_free_xmit_buf(port);
err:
return -ENOMEM;
}
static void mips_ejtag_fdc_tty_port_shutdown(struct tty_port *port)
{
struct mips_ejtag_fdc_tty_port *dport =
container_of(port, struct mips_ejtag_fdc_tty_port, port);
struct mips_ejtag_fdc_tty *priv = dport->driver;
void *rx_buf;
unsigned int count;
spin_lock(&dport->xmit_lock);
count = dport->xmit_cnt;
spin_unlock(&dport->xmit_lock);
if (count) {
/*
* There's still data to write out, so wake and wait for the
* writer thread to drain the buffer.
*/
wake_up_interruptible(&priv->waitqueue);
wait_for_completion(&dport->xmit_empty);
}
/* Null the read buffer (timer could still be running!) */
raw_spin_lock_irq(&dport->rx_lock);
rx_buf = dport->rx_buf;
dport->rx_buf = NULL;
raw_spin_unlock_irq(&dport->rx_lock);
/* Free the read buffer */
kfree(rx_buf);
/* Free the write buffer */
tty_port_free_xmit_buf(port);
}
static const struct tty_port_operations mips_ejtag_fdc_tty_port_ops = {
.activate = mips_ejtag_fdc_tty_port_activate,
.shutdown = mips_ejtag_fdc_tty_port_shutdown,
};
/* TTY operations */
static int mips_ejtag_fdc_tty_install(struct tty_driver *driver,
struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty *priv = driver->driver_state;
tty->driver_data = &priv->ports[tty->index];
return tty_port_install(&priv->ports[tty->index].port, driver, tty);
}
static int mips_ejtag_fdc_tty_open(struct tty_struct *tty, struct file *filp)
{
return tty_port_open(tty->port, tty, filp);
}
static void mips_ejtag_fdc_tty_close(struct tty_struct *tty, struct file *filp)
{
return tty_port_close(tty->port, tty, filp);
}
static void mips_ejtag_fdc_tty_hangup(struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
struct mips_ejtag_fdc_tty *priv = dport->driver;
/* Drop any data in the xmit buffer */
spin_lock(&dport->xmit_lock);
if (dport->xmit_cnt) {
atomic_sub(dport->xmit_cnt, &priv->xmit_total);
dport->xmit_cnt = 0;
dport->xmit_head = 0;
dport->xmit_tail = 0;
complete(&dport->xmit_empty);
}
spin_unlock(&dport->xmit_lock);
tty_port_hangup(tty->port);
}
static ssize_t mips_ejtag_fdc_tty_write(struct tty_struct *tty, const u8 *buf,
size_t total)
{
int count, block;
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
struct mips_ejtag_fdc_tty *priv = dport->driver;
/*
* Write to output buffer.
*
* The reason that we asynchronously write the buffer is because if we
* were to write the buffer synchronously then because the channels are
* per-CPU the buffer would be written to the channel of whatever CPU
* we're running on.
*
* What we actually want to happen is have all input and output done on
* one CPU.
*/
spin_lock(&dport->xmit_lock);
/* Work out how many bytes we can write to the xmit buffer */
total = min_t(size_t, total, priv->xmit_size - dport->xmit_cnt);
atomic_add(total, &priv->xmit_total);
dport->xmit_cnt += total;
/* Write the actual bytes (may need splitting if it wraps) */
for (count = total; count; count -= block) {
block = min(count, (int)(priv->xmit_size - dport->xmit_head));
memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
dport->xmit_head += block;
if (dport->xmit_head >= priv->xmit_size)
dport->xmit_head -= priv->xmit_size;
buf += block;
}
count = dport->xmit_cnt;
/* Xmit buffer no longer empty? */
if (count)
reinit_completion(&dport->xmit_empty);
spin_unlock(&dport->xmit_lock);
/* Wake up the kthread */
if (total)
wake_up_interruptible(&priv->waitqueue);
return total;
}
static unsigned int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
struct mips_ejtag_fdc_tty *priv = dport->driver;
unsigned int room;
/* Report the space in the xmit buffer */
spin_lock(&dport->xmit_lock);
room = priv->xmit_size - dport->xmit_cnt;
spin_unlock(&dport->xmit_lock);
return room;
}
static unsigned int mips_ejtag_fdc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
unsigned int chars;
/* Report the number of bytes in the xmit buffer */
spin_lock(&dport->xmit_lock);
chars = dport->xmit_cnt;
spin_unlock(&dport->xmit_lock);
return chars;
}
static const struct tty_operations mips_ejtag_fdc_tty_ops = {
.install = mips_ejtag_fdc_tty_install,
.open = mips_ejtag_fdc_tty_open,
.close = mips_ejtag_fdc_tty_close,
.hangup = mips_ejtag_fdc_tty_hangup,
.write = mips_ejtag_fdc_tty_write,
.write_room = mips_ejtag_fdc_tty_write_room,
.chars_in_buffer = mips_ejtag_fdc_tty_chars_in_buffer,
};
int __weak get_c0_fdc_int(void)
{
return -1;
}
static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
{
int ret, nport;
struct mips_ejtag_fdc_tty_port *dport;
struct mips_ejtag_fdc_tty *priv;
struct tty_driver *driver;
unsigned int cfg, tx_fifo;
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->cpu = dev->cpu;
priv->dev = &dev->dev;
mips_cdmm_set_drvdata(dev, priv);
atomic_set(&priv->xmit_total, 0);
raw_spin_lock_init(&priv->lock);
priv->reg = devm_ioremap(priv->dev, dev->res.start,
resource_size(&dev->res));
if (!priv->reg) {
dev_err(priv->dev, "ioremap failed for resource %pR\n",
&dev->res);
return -ENOMEM;
}
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
tx_fifo = (cfg & REG_FDCFG_TXFIFOSIZE) >> REG_FDCFG_TXFIFOSIZE_SHIFT;
/* Disable interrupts */
cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
priv->xmit_size = min(tx_fifo * 4, (unsigned int)UART_XMIT_SIZE);
driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
priv->driver = driver;
driver->driver_name = "ejtag_fdc";
snprintf(priv->fdc_name, sizeof(priv->fdc_name), "ttyFDC%u", dev->cpu);
snprintf(priv->driver_name, sizeof(priv->driver_name), "%sc",
priv->fdc_name);
driver->name = priv->driver_name;
driver->major = 0; /* Auto-allocate */
driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SERIAL;
driver->subtype = SERIAL_TYPE_NORMAL;
driver->init_termios = tty_std_termios;
driver->init_termios.c_cflag |= CLOCAL;
driver->driver_state = priv;
tty_set_operations(driver, &mips_ejtag_fdc_tty_ops);
for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
dport = &priv->ports[nport];
dport->driver = priv;
tty_port_init(&dport->port);
dport->port.ops = &mips_ejtag_fdc_tty_port_ops;
raw_spin_lock_init(&dport->rx_lock);
spin_lock_init(&dport->xmit_lock);
/* The xmit buffer starts empty, i.e. completely written */
init_completion(&dport->xmit_empty);
complete(&dport->xmit_empty);
}
/* Set up the console */
mips_ejtag_fdc_con.regs[dev->cpu] = priv->reg;
if (dev->cpu == 0)
mips_ejtag_fdc_con.tty_drv = driver;
init_waitqueue_head(&priv->waitqueue);
/*
* Bind the writer thread to the right CPU so it can't migrate.
* The channels are per-CPU and we want all channel I/O to be on a
* single predictable CPU.
*/
priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
dev->cpu, "ttyFDC/%u");
if (IS_ERR(priv->thread)) {
ret = PTR_ERR(priv->thread);
dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
goto err_destroy_ports;
}
/* Look for an FDC IRQ */
priv->irq = get_c0_fdc_int();
/* Try requesting the IRQ */
if (priv->irq >= 0) {
/*
* IRQF_SHARED, IRQF_COND_SUSPEND: The FDC IRQ may be shared with
* other local interrupts such as the timer which sets
* IRQF_TIMER (including IRQF_NO_SUSPEND).
*
* IRQF_NO_THREAD: The FDC IRQ isn't individually maskable so it
* cannot be deferred and handled by a thread on RT kernels. For
* this reason any spinlocks used from the ISR are raw.
*/
ret = devm_request_irq(priv->dev, priv->irq, mips_ejtag_fdc_isr,
IRQF_PERCPU | IRQF_SHARED |
IRQF_NO_THREAD | IRQF_COND_SUSPEND,
priv->fdc_name, priv);
if (ret)
priv->irq = -1;
}
if (priv->irq >= 0) {
/* IRQ is usable, enable RX interrupt */
raw_spin_lock_irq(&priv->lock);
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
cfg &= ~REG_FDCFG_RXINTTHRES;
cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
timer_setup(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
TIMER_PINNED);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
* Always attach the timer to the right CPU. The channels are
* per-CPU so all polling should be from a single CPU.
*/
add_timer_on(&priv->poll_timer, dev->cpu);
dev_info(priv->dev, "No usable IRQ, polling enabled\n");
}
ret = tty_register_driver(driver);
if (ret < 0) {
dev_err(priv->dev, "Couldn't install tty driver (%d)\n", ret);
goto err_stop_irq;
}
return 0;
err_stop_irq:
if (priv->irq >= 0) {
raw_spin_lock_irq(&priv->lock);
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
/* Disable interrupts */
cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
raw_spin_unlock_irq(&priv->lock);
} else {
priv->removing = true;
del_timer_sync(&priv->poll_timer);
}
kthread_stop(priv->thread);
err_destroy_ports:
if (dev->cpu == 0)
mips_ejtag_fdc_con.tty_drv = NULL;
for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
dport = &priv->ports[nport];
tty_port_destroy(&dport->port);
}
tty_driver_kref_put(priv->driver);
return ret;
}
static int mips_ejtag_fdc_tty_cpu_down(struct mips_cdmm_device *dev)
{
struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
unsigned int cfg;
if (priv->irq >= 0) {
raw_spin_lock_irq(&priv->lock);
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
/* Disable interrupts */
cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
raw_spin_unlock_irq(&priv->lock);
} else {
priv->removing = true;
del_timer_sync(&priv->poll_timer);
}
kthread_stop(priv->thread);
return 0;
}
static int mips_ejtag_fdc_tty_cpu_up(struct mips_cdmm_device *dev)
{
struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
unsigned int cfg;
int ret = 0;
if (priv->irq >= 0) {
/*
* IRQ is usable, enable RX interrupt
* This must be before kthread is restarted, as kthread may
* enable TX interrupt.
*/
raw_spin_lock_irq(&priv->lock);
cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
cfg |= REG_FDCFG_RXINTTHRES_NOTEMPTY;
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
raw_spin_unlock_irq(&priv->lock);
} else {
/* Restart poll timer */
priv->removing = false;
add_timer_on(&priv->poll_timer, dev->cpu);
}
/* Restart the kthread */
/* Bind it back to the right CPU and set it off */
priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
dev->cpu, "ttyFDC/%u");
if (IS_ERR(priv->thread)) {
ret = PTR_ERR(priv->thread);
dev_err(priv->dev, "Couldn't re-create kthread (%d)\n", ret);
goto out;
}
out:
return ret;
}
static const struct mips_cdmm_device_id mips_ejtag_fdc_tty_ids[] = {
{ .type = 0xfd },
{ }
};
static struct mips_cdmm_driver mips_ejtag_fdc_tty_driver = {
.drv = {
.name = "mips_ejtag_fdc",
},
.probe = mips_ejtag_fdc_tty_probe,
.cpu_down = mips_ejtag_fdc_tty_cpu_down,
.cpu_up = mips_ejtag_fdc_tty_cpu_up,
.id_table = mips_ejtag_fdc_tty_ids,
};
builtin_mips_cdmm_driver(mips_ejtag_fdc_tty_driver);
static int __init mips_ejtag_fdc_init_console(void)
{
return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_con);
}
console_initcall(mips_ejtag_fdc_init_console);
#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
static struct mips_ejtag_fdc_console mips_ejtag_fdc_earlycon = {
.cons = {
.name = "early_fdc",
.write = mips_ejtag_fdc_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = CONSOLE_CHANNEL,
},
.lock = __RAW_SPIN_LOCK_UNLOCKED(mips_ejtag_fdc_earlycon.lock),
};
int __init setup_early_fdc_console(void)
{
return mips_ejtag_fdc_console_init(&mips_ejtag_fdc_earlycon);
}
#endif
#ifdef CONFIG_MIPS_EJTAG_FDC_KGDB
/* read buffer to allow decompaction */
static unsigned int kgdbfdc_rbuflen;
static unsigned int kgdbfdc_rpos;
static char kgdbfdc_rbuf[4];
/* write buffer to allow compaction */
static unsigned int kgdbfdc_wbuflen;
static char kgdbfdc_wbuf[4];
static void __iomem *kgdbfdc_setup(void)
{
void __iomem *regs;
unsigned int cpu;
/* Find address, piggy backing off console percpu regs */
cpu = smp_processor_id();
regs = mips_ejtag_fdc_con.regs[cpu];
/* First console output on this CPU? */
if (!regs) {
regs = mips_cdmm_early_probe(0xfd);
mips_ejtag_fdc_con.regs[cpu] = regs;
}
/* Already tried and failed to find FDC on this CPU? */
if (IS_ERR(regs))
return regs;
return regs;
}
/* read a character from the read buffer, filling from FDC RX FIFO */
static int kgdbfdc_read_char(void)
{
unsigned int stat, channel, data;
void __iomem *regs;
/* No more data, try and read another FDC word from RX FIFO */
if (kgdbfdc_rpos >= kgdbfdc_rbuflen) {
kgdbfdc_rpos = 0;
kgdbfdc_rbuflen = 0;
regs = kgdbfdc_setup();
if (IS_ERR(regs))
return NO_POLL_CHAR;
/* Read next word from KGDB channel */
do {
stat = __raw_readl(regs + REG_FDSTAT);
/* No data waiting? */
if (stat & REG_FDSTAT_RXE)
return NO_POLL_CHAR;
/* Read next word */
channel = (stat & REG_FDSTAT_RXCHAN) >>
REG_FDSTAT_RXCHAN_SHIFT;
data = __raw_readl(regs + REG_FDRX);
} while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
/* Decode into rbuf */
kgdbfdc_rbuflen = mips_ejtag_fdc_decode(data, kgdbfdc_rbuf);
}
pr_devel("kgdbfdc r %c\n", kgdbfdc_rbuf[kgdbfdc_rpos]);
return kgdbfdc_rbuf[kgdbfdc_rpos++];
}
/* push an FDC word from write buffer to TX FIFO */
static void kgdbfdc_push_one(void)
{
const char *bufs[1] = { kgdbfdc_wbuf };
struct fdc_word word;
void __iomem *regs;
unsigned int i;
/* Construct a word from any data in buffer */
word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
/* Relocate any remaining data to beginning of buffer */
kgdbfdc_wbuflen -= word.bytes;
for (i = 0; i < kgdbfdc_wbuflen; ++i)
kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
regs = kgdbfdc_setup();
if (IS_ERR(regs))
return;
/* Busy wait until there's space in fifo */
while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
;
__raw_writel(word.word,
regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
}
/* flush the whole write buffer to the TX FIFO */
static void kgdbfdc_flush(void)
{
while (kgdbfdc_wbuflen)
kgdbfdc_push_one();
}
/* write a character into the write buffer, writing out if full */
static void kgdbfdc_write_char(u8 chr)
{
pr_devel("kgdbfdc w %c\n", chr);
kgdbfdc_wbuf[kgdbfdc_wbuflen++] = chr;
if (kgdbfdc_wbuflen >= sizeof(kgdbfdc_wbuf))
kgdbfdc_push_one();
}
static struct kgdb_io kgdbfdc_io_ops = {
.name = "kgdbfdc",
.read_char = kgdbfdc_read_char,
.write_char = kgdbfdc_write_char,
.flush = kgdbfdc_flush,
};
static int __init kgdbfdc_init(void)
{
kgdb_register_io_module(&kgdbfdc_io_ops);
return 0;
}
early_initcall(kgdbfdc_init);
#endif
| linux-master | drivers/tty/mips_ejtag_fdc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/export.h>
#include "tty.h"
/*
* Routine which returns the baud rate of the tty
*
* Note that the baud_table needs to be kept in sync with the
* include/asm/termbits.h file.
*/
static const speed_t baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800,
#ifdef __sparc__
76800, 153600, 307200, 614400, 921600, 500000, 576000,
1000000, 1152000, 1500000, 2000000
#else
500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
2500000, 3000000, 3500000, 4000000
#endif
};
static const tcflag_t baud_bits[] = {
B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800,
#ifdef __sparc__
B76800, B153600, B307200, B614400, B921600, B500000, B576000,
B1000000, B1152000, B1500000, B2000000
#else
B500000, B576000, B921600, B1000000, B1152000, B1500000, B2000000,
B2500000, B3000000, B3500000, B4000000
#endif
};
static int n_baud_table = ARRAY_SIZE(baud_table);
/**
* tty_termios_baud_rate
* @termios: termios structure
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
* structure. Device drivers can call this function but should use
* ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
speed_t tty_termios_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud;
cbaud = termios->c_cflag & CBAUD;
/* Magic token for arbitrary speed via c_ispeed/c_ospeed */
if (cbaud == BOTHER)
return termios->c_ospeed;
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
/**
* tty_termios_input_baud_rate
* @termios: termios structure
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
* structure. Device drivers can call this function but should use
* ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
speed_t tty_termios_input_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
if (cbaud == B0)
return tty_termios_baud_rate(termios);
/* Magic token for arbitrary speed via c_ispeed */
if (cbaud == BOTHER)
return termios->c_ispeed;
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_input_baud_rate);
/**
* tty_termios_encode_baud_rate
* @termios: ktermios structure holding user requested state
* @ibaud: input speed
* @obaud: output speed
*
* Encode the speeds set into the passed termios structure. This is
* used as a library helper for drivers so that they can report back
* the actual speed selected when it differs from the speed requested
*
* For maximal back compatibility with legacy SYS5/POSIX *nix behaviour
* we need to carefully set the bits when the user does not get the
* desired speed. We allow small margins and preserve as much of possible
* of the input intent to keep compatibility.
*
* Locking: Caller should hold termios lock. This is already held
* when calling this function from the driver termios handler.
*
* The ifdefs deal with platforms whose owners have yet to update them
* and will all go away once this is done.
*/
void tty_termios_encode_baud_rate(struct ktermios *termios,
speed_t ibaud, speed_t obaud)
{
int i = 0;
int ifound = -1, ofound = -1;
int iclose = ibaud/50, oclose = obaud/50;
int ibinput = 0;
if (obaud == 0) /* CD dropped */
ibaud = 0; /* Clear ibaud to be sure */
termios->c_ispeed = ibaud;
termios->c_ospeed = obaud;
if (((termios->c_cflag >> IBSHIFT) & CBAUD) != B0)
ibinput = 1; /* An input speed was specified */
/* If the user asked for a precise weird speed give a precise weird
* answer. If they asked for a Bfoo speed they may have problems
* digesting non-exact replies so fuzz a bit.
*/
if ((termios->c_cflag & CBAUD) == BOTHER) {
oclose = 0;
if (!ibinput)
iclose = 0;
}
if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
iclose = 0;
termios->c_cflag &= ~CBAUD;
termios->c_cflag &= ~(CBAUD << IBSHIFT);
/*
* Our goal is to find a close match to the standard baud rate
* returned. Walk the baud rate table and if we get a very close
* match then report back the speed as a POSIX Bxxxx value by
* preference
*/
do {
if (obaud - oclose <= baud_table[i] &&
obaud + oclose >= baud_table[i]) {
termios->c_cflag |= baud_bits[i];
ofound = i;
}
if (ibaud - iclose <= baud_table[i] &&
ibaud + iclose >= baud_table[i]) {
/* For the case input == output don't set IBAUD bits
* if the user didn't do so.
*/
if (ofound == i && !ibinput) {
ifound = i;
} else {
ifound = i;
termios->c_cflag |= (baud_bits[i] << IBSHIFT);
}
}
} while (++i < n_baud_table);
/* If we found no match then use BOTHER. */
if (ofound == -1)
termios->c_cflag |= BOTHER;
/* Set exact input bits only if the input and output differ or the
* user already did.
*/
if (ifound == -1 && (ibaud != obaud || ibinput))
termios->c_cflag |= (BOTHER << IBSHIFT);
}
EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
/**
* tty_encode_baud_rate - set baud rate of the tty
* @tty: terminal device
* @ibaud: input baud rate
* @obaud: output baud rate
*
* Update the current termios data for the tty with the new speed
* settings. The caller must hold the termios_rwsem for the tty in
* question.
*/
void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud)
{
tty_termios_encode_baud_rate(&tty->termios, ibaud, obaud);
}
EXPORT_SYMBOL_GPL(tty_encode_baud_rate);
| linux-master | drivers/tty/tty_baudrate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
* or rs-channels. It also implements echoing, cooked mode etc.
*
* Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
*
* Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
* tty_struct and tty_queue structures. Previously there was an array
* of 256 tty_struct's which was statically allocated, and the
* tty_queue structures were allocated at boot time. Both are now
* dynamically allocated only when the tty is open.
*
* Also restructured routines so that there is more of a separation
* between the high-level tty routines (tty_io.c and tty_ioctl.c) and
* the low-level tty routines (serial.c, pty.c, console.c). This
* makes for cleaner and more compact code. -TYT, 9/17/92
*
* Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
* which can be dynamically activated and de-activated by the line
* discipline handling modules (like SLIP).
*
* NOTE: pay no attention to the line discipline code (yet); its
* interface is still subject to change in this version...
* -- TYT, 1/31/92
*
* Added functionality to the OPOST tty handling. No delays, but all
* other bits should be there.
* -- Nick Holloway <[email protected]>, 27th May 1993.
*
* Rewrote canonical mode and added more termios flags.
* -- [email protected] (J. Cowley), 13Jan94
*
* Reorganized FASYNC support so mouse code can share it.
* -- [email protected], 9Sep95
*
* New TIOCLINUX variants added.
* -- [email protected], 19-Nov-95
*
* Restrict vt switching via ioctl()
* -- [email protected], 5-Dec-95
*
* Move console and virtual terminal code to more appropriate files,
* implement CONFIG_VT and generalize console device interface.
* -- Marko Kohtala <[email protected]>, March 97
*
* Rewrote tty_init_dev and tty_release_dev to eliminate races.
* -- Bill Hawes <[email protected]>, June 97
*
* Added devfs support.
* -- C. Scott Ananian <[email protected]>, 13-Jan-1998
*
* Added support for a Unix98-style ptmx device.
* -- C. Scott Ananian <[email protected]>, 14-Jan-1998
*
* Reduced memory usage for older ARM systems
* -- Russell King <[email protected]>
*
* Move do_SAK() into process context. Less stack use in devfs functions.
* alloc_tty_struct() always uses kmalloc()
* -- Andrew Morton <[email protected]> 17Mar01
*/
#include <linux/types.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/fcntl.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/devpts_fs.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/console.h>
#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/kd.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/ppp-ioctl.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/ratelimit.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/termios_internal.h>
#include <linux/fs.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/selection.h>
#include <linux/kmod.h>
#include <linux/nsproxy.h>
#include "tty.h"
#undef TTY_DEBUG_HANGUP
#ifdef TTY_DEBUG_HANGUP
# define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args)
#else
# define tty_debug_hangup(tty, f, args...) do { } while (0)
#endif
#define TTY_PARANOIA_CHECK 1
#define CHECK_TTY_COUNT 1
struct ktermios tty_std_termios = { /* for the benefit of tty drivers */
.c_iflag = ICRNL | IXON,
.c_oflag = OPOST | ONLCR,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
ECHOCTL | ECHOKE | IEXTEN,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
.c_ospeed = 38400,
/* .c_line = N_TTY, */
};
EXPORT_SYMBOL(tty_std_termios);
/* This list gets poked at by procfs and various bits of boot up code. This
* could do with some rationalisation such as pulling the tty proc function
* into this file.
*/
LIST_HEAD(tty_drivers); /* linked list of tty drivers */
/* Mutex to protect creating and releasing a tty */
DEFINE_MUTEX(tty_mutex);
static ssize_t tty_read(struct kiocb *, struct iov_iter *);
static ssize_t tty_write(struct kiocb *, struct iov_iter *);
static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
#else
#define tty_compat_ioctl NULL
#endif
static int __tty_fasync(int fd, struct file *filp, int on);
static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
/**
* free_tty_struct - free a disused tty
* @tty: tty struct to free
*
* Free the write buffers, tty queue and tty memory itself.
*
* Locking: none. Must be called after tty is definitely unused
*/
static void free_tty_struct(struct tty_struct *tty)
{
tty_ldisc_deinit(tty);
put_device(tty->dev);
kvfree(tty->write_buf);
kfree(tty);
}
static inline struct tty_struct *file_tty(struct file *file)
{
return ((struct tty_file_private *)file->private_data)->tty;
}
int tty_alloc_file(struct file *file)
{
struct tty_file_private *priv;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
file->private_data = priv;
return 0;
}
/* Associate a new file with the tty structure */
void tty_add_file(struct tty_struct *tty, struct file *file)
{
struct tty_file_private *priv = file->private_data;
priv->tty = tty;
priv->file = file;
spin_lock(&tty->files_lock);
list_add(&priv->list, &tty->tty_files);
spin_unlock(&tty->files_lock);
}
/**
* tty_free_file - free file->private_data
* @file: to free private_data of
*
* This shall be used only for fail path handling when tty_add_file was not
* called yet.
*/
void tty_free_file(struct file *file)
{
struct tty_file_private *priv = file->private_data;
file->private_data = NULL;
kfree(priv);
}
/* Delete file from its tty */
static void tty_del_file(struct file *file)
{
struct tty_file_private *priv = file->private_data;
struct tty_struct *tty = priv->tty;
spin_lock(&tty->files_lock);
list_del(&priv->list);
spin_unlock(&tty->files_lock);
tty_free_file(file);
}
/**
* tty_name - return tty naming
* @tty: tty structure
*
* Convert a tty structure into a name. The name reflects the kernel naming
* policy and if udev is in use may not reflect user space
*
* Locking: none
*/
const char *tty_name(const struct tty_struct *tty)
{
if (!tty) /* Hmm. NULL pointer. That's fun. */
return "NULL tty";
return tty->name;
}
EXPORT_SYMBOL(tty_name);
const char *tty_driver_name(const struct tty_struct *tty)
{
if (!tty || !tty->driver)
return "";
return tty->driver->name;
}
static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
const char *routine)
{
#ifdef TTY_PARANOIA_CHECK
if (!tty) {
pr_warn("(%d:%d): %s: NULL tty\n",
imajor(inode), iminor(inode), routine);
return 1;
}
#endif
return 0;
}
/* Caller must hold tty_lock */
static void check_tty_count(struct tty_struct *tty, const char *routine)
{
#ifdef CHECK_TTY_COUNT
struct list_head *p;
int count = 0, kopen_count = 0;
spin_lock(&tty->files_lock);
list_for_each(p, &tty->tty_files) {
count++;
}
spin_unlock(&tty->files_lock);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
count++;
if (tty_port_kopened(tty->port))
kopen_count++;
if (tty->count != (count + kopen_count)) {
tty_warn(tty, "%s: tty->count(%d) != (#fd's(%d) + #kopen's(%d))\n",
routine, tty->count, count, kopen_count);
}
#endif
}
/**
* get_tty_driver - find device of a tty
* @device: device identifier
* @index: returns the index of the tty
*
* This routine returns a tty driver structure, given a device number and also
* passes back the index number.
*
* Locking: caller must hold tty_mutex
*/
static struct tty_driver *get_tty_driver(dev_t device, int *index)
{
struct tty_driver *p;
list_for_each_entry(p, &tty_drivers, tty_drivers) {
dev_t base = MKDEV(p->major, p->minor_start);
if (device < base || device >= base + p->num)
continue;
*index = device - base;
return tty_driver_kref_get(p);
}
return NULL;
}
/**
* tty_dev_name_to_number - return dev_t for device name
* @name: user space name of device under /dev
* @number: pointer to dev_t that this function will populate
*
* This function converts device names like ttyS0 or ttyUSB1 into dev_t like
* (4, 64) or (188, 1). If no corresponding driver is registered then the
* function returns -%ENODEV.
*
* Locking: this acquires tty_mutex to protect the tty_drivers list from
* being modified while we are traversing it, and makes sure to
* release it before exiting.
*/
int tty_dev_name_to_number(const char *name, dev_t *number)
{
struct tty_driver *p;
int ret;
int index, prefix_length = 0;
const char *str;
for (str = name; *str && !isdigit(*str); str++)
;
if (!*str)
return -EINVAL;
ret = kstrtoint(str, 10, &index);
if (ret)
return ret;
prefix_length = str - name;
mutex_lock(&tty_mutex);
list_for_each_entry(p, &tty_drivers, tty_drivers)
if (prefix_length == strlen(p->name) && strncmp(name,
p->name, prefix_length) == 0) {
if (index < p->num) {
*number = MKDEV(p->major, p->minor_start + index);
goto out;
}
}
/* if here then driver wasn't found */
ret = -ENODEV;
out:
mutex_unlock(&tty_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
#ifdef CONFIG_CONSOLE_POLL
/**
* tty_find_polling_driver - find device of a polled tty
* @name: name string to match
* @line: pointer to resulting tty line nr
*
* This routine returns a tty driver structure, given a name and the condition
* that the tty driver is capable of polled operation.
*/
struct tty_driver *tty_find_polling_driver(char *name, int *line)
{
struct tty_driver *p, *res = NULL;
int tty_line = 0;
int len;
char *str, *stp;
for (str = name; *str; str++)
if ((*str >= '0' && *str <= '9') || *str == ',')
break;
if (!*str)
return NULL;
len = str - name;
tty_line = simple_strtoul(str, &str, 10);
mutex_lock(&tty_mutex);
/* Search through the tty devices to look for a match */
list_for_each_entry(p, &tty_drivers, tty_drivers) {
if (!len || strncmp(name, p->name, len) != 0)
continue;
stp = str;
if (*stp == ',')
stp++;
if (*stp == '\0')
stp = NULL;
if (tty_line >= 0 && tty_line < p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
break;
}
}
mutex_unlock(&tty_mutex);
return res;
}
EXPORT_SYMBOL_GPL(tty_find_polling_driver);
#endif
static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
{
return 0;
}
static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return -EIO;
}
/* No kernel lock held - none needed ;) */
static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
{
return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM;
}
static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
}
static long hung_up_tty_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
}
static int hung_up_tty_fasync(int fd, struct file *file, int on)
{
return -ENOTTY;
}
static void tty_show_fdinfo(struct seq_file *m, struct file *file)
{
struct tty_struct *tty = file_tty(file);
if (tty && tty->ops && tty->ops->show_fdinfo)
tty->ops->show_fdinfo(tty, m);
}
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read_iter = tty_read,
.write_iter = tty_write,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
.open = tty_open,
.release = tty_release,
.fasync = tty_fasync,
.show_fdinfo = tty_show_fdinfo,
};
static const struct file_operations console_fops = {
.llseek = no_llseek,
.read_iter = tty_read,
.write_iter = redirected_tty_write,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
.open = tty_open,
.release = tty_release,
.fasync = tty_fasync,
};
static const struct file_operations hung_up_tty_fops = {
.llseek = no_llseek,
.read_iter = hung_up_tty_read,
.write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
.release = tty_release,
.fasync = hung_up_tty_fasync,
};
static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
/**
* tty_wakeup - request more data
* @tty: terminal
*
* Internal and external helper for wakeups of tty. This function informs the
* line discipline if present that the driver is ready to receive more output
* data.
*/
void tty_wakeup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->write_wakeup)
ld->ops->write_wakeup(tty);
tty_ldisc_deref(ld);
}
}
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
* tty_release_redirect - Release a redirect on a pty if present
* @tty: tty device
*
* This is available to the pty code so if the master closes, if the slave is a
* redirect it can release the redirect.
*/
static struct file *tty_release_redirect(struct tty_struct *tty)
{
struct file *f = NULL;
spin_lock(&redirect_lock);
if (redirect && file_tty(redirect) == tty) {
f = redirect;
redirect = NULL;
}
spin_unlock(&redirect_lock);
return f;
}
/**
* __tty_hangup - actual handler for hangup events
* @tty: tty device
* @exit_session: if non-zero, signal all foreground group processes
*
* This can be called by a "kworker" kernel thread. That is process synchronous
* but doesn't hold any locks, so we need to make sure we have the appropriate
* locks for what we're doing.
*
* The hangup event clears any pending redirections onto the hung up device. It
* ensures future writes will error and it does the needed line discipline
* hangup and signal delivery. The tty object itself remains intact.
*
* Locking:
* * BTM
*
* * redirect lock for undoing redirection
* * file list lock for manipulating list of ttys
* * tty_ldiscs_lock from called functions
* * termios_rwsem resetting termios data
* * tasklist_lock to walk task list for hangup event
*
* * ->siglock to protect ->signal/->sighand
*
*/
static void __tty_hangup(struct tty_struct *tty, int exit_session)
{
struct file *cons_filp = NULL;
struct file *filp, *f;
struct tty_file_private *priv;
int closecount = 0, n;
int refs;
if (!tty)
return;
f = tty_release_redirect(tty);
tty_lock(tty);
if (test_bit(TTY_HUPPED, &tty->flags)) {
tty_unlock(tty);
return;
}
/*
* Some console devices aren't actually hung up for technical and
* historical reasons, which can lead to indefinite interruptible
* sleep in n_tty_read(). The following explicitly tells
* n_tty_read() to abort readers.
*/
set_bit(TTY_HUPPING, &tty->flags);
/* inuse_filps is protected by the single tty lock,
* this really needs to change if we want to flush the
* workqueue with the lock held.
*/
check_tty_count(tty, "tty_hangup");
spin_lock(&tty->files_lock);
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
if (filp->f_op->write_iter == redirected_tty_write)
cons_filp = filp;
if (filp->f_op->write_iter != tty_write)
continue;
closecount++;
__tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
spin_unlock(&tty->files_lock);
refs = tty_signal_session_leader(tty, exit_session);
/* Account for the p->signal references we killed */
while (refs--)
tty_kref_put(tty);
tty_ldisc_hangup(tty, cons_filp != NULL);
spin_lock_irq(&tty->ctrl.lock);
clear_bit(TTY_THROTTLED, &tty->flags);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
put_pid(tty->ctrl.session);
put_pid(tty->ctrl.pgrp);
tty->ctrl.session = NULL;
tty->ctrl.pgrp = NULL;
tty->ctrl.pktstatus = 0;
spin_unlock_irq(&tty->ctrl.lock);
/*
* If one of the devices matches a console pointer, we
* cannot just call hangup() because that will cause
* tty->count and state->count to go out of sync.
* So we just call close() the right number of times.
*/
if (cons_filp) {
if (tty->ops->close)
for (n = 0; n < closecount; n++)
tty->ops->close(tty, cons_filp);
} else if (tty->ops->hangup)
tty->ops->hangup(tty);
/*
* We don't want to have driver/ldisc interactions beyond the ones
* we did here. The driver layer expects no calls after ->hangup()
* from the ldisc side, which is now guaranteed.
*/
set_bit(TTY_HUPPED, &tty->flags);
clear_bit(TTY_HUPPING, &tty->flags);
tty_unlock(tty);
if (f)
fput(f);
}
static void do_tty_hangup(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, hangup_work);
__tty_hangup(tty, 0);
}
/**
* tty_hangup - trigger a hangup event
* @tty: tty to hangup
*
* A carrier loss (virtual or otherwise) has occurred on @tty. Schedule a
* hangup sequence to run after this event.
*/
void tty_hangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "hangup\n");
schedule_work(&tty->hangup_work);
}
EXPORT_SYMBOL(tty_hangup);
/**
* tty_vhangup - process vhangup
* @tty: tty to hangup
*
* The user has asked via system call for the terminal to be hung up. We do
* this synchronously so that when the syscall returns the process is complete.
* That guarantee is necessary for security reasons.
*/
void tty_vhangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "vhangup\n");
__tty_hangup(tty, 0);
}
EXPORT_SYMBOL(tty_vhangup);
/**
* tty_vhangup_self - process vhangup for own ctty
*
* Perform a vhangup on the current controlling tty
*/
void tty_vhangup_self(void)
{
struct tty_struct *tty;
tty = get_current_tty();
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
}
/**
* tty_vhangup_session - hangup session leader exit
* @tty: tty to hangup
*
* The session leader is exiting and hanging up its controlling terminal.
* Every process in the foreground process group is signalled %SIGHUP.
*
* We do this synchronously so that when the syscall returns the process is
* complete. That guarantee is necessary for security reasons.
*/
void tty_vhangup_session(struct tty_struct *tty)
{
tty_debug_hangup(tty, "session hangup\n");
__tty_hangup(tty, 1);
}
/**
* tty_hung_up_p - was tty hung up
* @filp: file pointer of tty
*
* Return: true if the tty has been subject to a vhangup or a carrier loss
*/
int tty_hung_up_p(struct file *filp)
{
return (filp && filp->f_op == &hung_up_tty_fops);
}
EXPORT_SYMBOL(tty_hung_up_p);
void __stop_tty(struct tty_struct *tty)
{
if (tty->flow.stopped)
return;
tty->flow.stopped = true;
if (tty->ops->stop)
tty->ops->stop(tty);
}
/**
* stop_tty - propagate flow control
* @tty: tty to stop
*
* Perform flow control to the driver. May be called on an already stopped
* device and will not re-call the &tty_driver->stop() method.
*
* This functionality is used by both the line disciplines for halting incoming
* flow and by the driver. It may therefore be called from any context, may be
* under the tty %atomic_write_lock but not always.
*
* Locking:
* flow.lock
*/
void stop_tty(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->flow.lock, flags);
__stop_tty(tty);
spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(stop_tty);
void __start_tty(struct tty_struct *tty)
{
if (!tty->flow.stopped || tty->flow.tco_stopped)
return;
tty->flow.stopped = false;
if (tty->ops->start)
tty->ops->start(tty);
tty_wakeup(tty);
}
/**
* start_tty - propagate flow control
* @tty: tty to start
*
* Start a tty that has been stopped if at all possible. If @tty was previously
* stopped and is now being started, the &tty_driver->start() method is invoked
* and the line discipline woken.
*
* Locking:
* flow.lock
*/
void start_tty(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->flow.lock, flags);
__start_tty(tty);
spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(start_tty);
static void tty_update_time(struct tty_struct *tty, bool mtime)
{
time64_t sec = ktime_get_real_seconds();
struct tty_file_private *priv;
spin_lock(&tty->files_lock);
list_for_each_entry(priv, &tty->tty_files, list) {
struct inode *inode = file_inode(priv->file);
struct timespec64 *time = mtime ? &inode->i_mtime : &inode->i_atime;
/*
* We only care if the two values differ in anything other than the
* lower three bits (i.e every 8 seconds). If so, then we can update
* the time of the tty device, otherwise it could be construded as a
* security leak to let userspace know the exact timing of the tty.
*/
if ((sec ^ time->tv_sec) & ~7)
time->tv_sec = sec;
}
spin_unlock(&tty->files_lock);
}
/*
* Iterate on the ldisc ->read() function until we've gotten all
* the data the ldisc has for us.
*
* The "cookie" is something that the ldisc read function can fill
* in to let us know that there is more data to be had.
*
* We promise to continue to call the ldisc until it stops returning
* data or clears the cookie. The cookie may be something that the
* ldisc maintains state for and needs to free.
*/
static ssize_t iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
struct file *file, struct iov_iter *to)
{
void *cookie = NULL;
unsigned long offset = 0;
char kernel_buf[64];
ssize_t retval = 0;
size_t copied, count = iov_iter_count(to);
do {
ssize_t size = min(count, sizeof(kernel_buf));
size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
if (!size)
break;
if (size < 0) {
/* Did we have an earlier error (ie -EFAULT)? */
if (retval)
break;
retval = size;
/*
* -EOVERFLOW means we didn't have enough space
* for a whole packet, and we shouldn't return
* a partial result.
*/
if (retval == -EOVERFLOW)
offset = 0;
break;
}
copied = copy_to_iter(kernel_buf, size, to);
offset += copied;
count -= copied;
/*
* If the user copy failed, we still need to do another ->read()
* call if we had a cookie to let the ldisc clear up.
*
* But make sure size is zeroed.
*/
if (unlikely(copied != size)) {
count = 0;
retval = -EFAULT;
}
} while (cookie);
/* We always clear tty buffer in case they contained passwords */
memzero_explicit(kernel_buf, sizeof(kernel_buf));
return offset ? offset : retval;
}
/**
* tty_read - read method for tty device files
* @iocb: kernel I/O control block
* @to: destination for the data read
*
* Perform the read system call function on this terminal device. Checks
* for hung up devices before calling the line discipline method.
*
* Locking:
* Locks the line discipline internally while needed. Multiple read calls
* may be outstanding in parallel.
*/
static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
if (!tty || tty_io_error(tty))
return -EIO;
/* We want to wait for the line discipline to sort out in this
* situation.
*/
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_read(iocb, to);
ret = -EIO;
if (ld->ops->read)
ret = iterate_tty_read(ld, tty, file, to);
tty_ldisc_deref(ld);
if (ret > 0)
tty_update_time(tty, false);
return ret;
}
void tty_write_unlock(struct tty_struct *tty)
{
mutex_unlock(&tty->atomic_write_lock);
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
int tty_write_lock(struct tty_struct *tty, bool ndelay)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
return -EAGAIN;
if (mutex_lock_interruptible(&tty->atomic_write_lock))
return -ERESTARTSYS;
}
return 0;
}
/*
* Split writes up in sane blocksizes to avoid
* denial-of-service type attacks
*/
static ssize_t iterate_tty_write(struct tty_ldisc *ld, struct tty_struct *tty,
struct file *file, struct iov_iter *from)
{
size_t chunk, count = iov_iter_count(from);
ssize_t ret, written = 0;
ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
if (ret < 0)
return ret;
/*
* We chunk up writes into a temporary buffer. This
* simplifies low-level drivers immensely, since they
* don't have locking issues and user mode accesses.
*
* But if TTY_NO_WRITE_SPLIT is set, we should use a
* big chunk-size..
*
* The default chunk-size is 2kB, because the NTTY
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
chunk = 65536;
if (count < chunk)
chunk = count;
/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
if (tty->write_cnt < chunk) {
unsigned char *buf_chunk;
if (chunk < 1024)
chunk = 1024;
buf_chunk = kvmalloc(chunk, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!buf_chunk) {
ret = -ENOMEM;
goto out;
}
kvfree(tty->write_buf);
tty->write_cnt = chunk;
tty->write_buf = buf_chunk;
}
/* Do the write .. */
for (;;) {
size_t size = min(chunk, count);
ret = -EFAULT;
if (copy_from_iter(tty->write_buf, size, from) != size)
break;
ret = ld->ops->write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
written += ret;
if (ret > size)
break;
/* FIXME! Have Al check this! */
if (ret != size)
iov_iter_revert(from, size-ret);
count -= ret;
if (!count)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
cond_resched();
}
if (written) {
tty_update_time(tty, true);
ret = written;
}
out:
tty_write_unlock(tty);
return ret;
}
/**
* tty_write_message - write a message to a certain tty, not just the console.
* @tty: the destination tty_struct
* @msg: the message to write
*
* This is used for messages that need to be redirected to a specific tty. We
* don't put it into the syslog queue right now maybe in the future if really
* needed.
*
* We must still hold the BTM and test the CLOSING flag for the moment.
*/
void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
mutex_lock(&tty->atomic_write_lock);
tty_lock(tty);
if (tty->ops->write && tty->count > 0)
tty->ops->write(tty, msg, strlen(msg));
tty_unlock(tty);
tty_write_unlock(tty);
}
}
static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
return -EIO;
if (!tty || !tty->ops->write || tty_io_error(tty))
return -EIO;
/* Short term debug to catch buggy drivers */
if (tty->ops->write_room == NULL)
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_write(iocb, from);
if (!ld->ops->write)
ret = -EIO;
else
ret = iterate_tty_write(ld, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
/**
* tty_write - write method for tty device file
* @iocb: kernel I/O control block
* @from: iov_iter with data to write
*
* Write data to a tty device via the line discipline.
*
* Locking:
* Locks the line discipline as required
* Writes to the tty driver are serialized by the atomic_write_lock
* and are then processed in chunks to the device. The line
* discipline write method will not be invoked in parallel for
* each device.
*/
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return file_tty_write(iocb->ki_filp, iocb, from);
}
ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *p = NULL;
spin_lock(&redirect_lock);
if (redirect)
p = get_file(redirect);
spin_unlock(&redirect_lock);
/*
* We know the redirected tty is just another tty, we can
* call file_tty_write() directly with that file pointer.
*/
if (p) {
ssize_t res;
res = file_tty_write(p, iocb, iter);
fput(p);
return res;
}
return tty_write(iocb, iter);
}
/**
* tty_send_xchar - send priority character
* @tty: the tty to send to
* @ch: xchar to send
*
* Send a high priority character to the tty even if stopped.
*
* Locking: none for xchar method, write ordering for write method.
*/
int tty_send_xchar(struct tty_struct *tty, char ch)
{
bool was_stopped = tty->flow.stopped;
if (tty->ops->send_xchar) {
down_read(&tty->termios_rwsem);
tty->ops->send_xchar(tty, ch);
up_read(&tty->termios_rwsem);
return 0;
}
if (tty_write_lock(tty, false) < 0)
return -ERESTARTSYS;
down_read(&tty->termios_rwsem);
if (was_stopped)
start_tty(tty);
tty->ops->write(tty, &ch, 1);
if (was_stopped)
stop_tty(tty);
up_read(&tty->termios_rwsem);
tty_write_unlock(tty);
return 0;
}
/**
* pty_line_name - generate name for a pty
* @driver: the tty driver in use
* @index: the minor number
* @p: output buffer of at least 6 bytes
*
* Generate a name from a @driver reference and write it to the output buffer
* @p.
*
* Locking: None
*/
static void pty_line_name(struct tty_driver *driver, int index, char *p)
{
static const char ptychar[] = "pqrstuvwxyzabcde";
int i = index + driver->name_base;
/* ->name is initialized to "ttyp", but "tty" is expected */
sprintf(p, "%s%c%x",
driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
ptychar[i >> 4 & 0xf], i & 0xf);
}
/**
* tty_line_name - generate name for a tty
* @driver: the tty driver in use
* @index: the minor number
* @p: output buffer of at least 7 bytes
*
* Generate a name from a @driver reference and write it to the output buffer
* @p.
*
* Locking: None
*/
static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
return sprintf(p, "%s", driver->name);
else
return sprintf(p, "%s%d", driver->name,
index + driver->name_base);
}
/**
* tty_driver_lookup_tty() - find an existing tty, if any
* @driver: the driver for the tty
* @file: file object
* @idx: the minor number
*
* Return: the tty, if found. If not found, return %NULL or ERR_PTR() if the
* driver lookup() method returns an error.
*
* Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
struct file *file, int idx)
{
struct tty_struct *tty;
if (driver->ops->lookup) {
if (!file)
tty = ERR_PTR(-EIO);
else
tty = driver->ops->lookup(driver, file, idx);
} else {
if (idx >= driver->num)
return ERR_PTR(-EINVAL);
tty = driver->ttys[idx];
}
if (!IS_ERR(tty))
tty_kref_get(tty);
return tty;
}
/**
* tty_init_termios - helper for termios setup
* @tty: the tty to set up
*
* Initialise the termios structure for this tty. This runs under the
* %tty_mutex currently so we can be relaxed about ordering.
*/
void tty_init_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
tty->termios = tty->driver->init_termios;
else {
/* Check for lazy saved data */
tp = tty->driver->termios[idx];
if (tp != NULL) {
tty->termios = *tp;
tty->termios.c_line = tty->driver->init_termios.c_line;
} else
tty->termios = tty->driver->init_termios;
}
/* Compatibility until drivers always set this */
tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
}
EXPORT_SYMBOL_GPL(tty_init_termios);
/**
* tty_standard_install - usual tty->ops->install
* @driver: the driver for the tty
* @tty: the tty
*
* If the @driver overrides @tty->ops->install, it still can call this function
* to perform the standard install operations.
*/
int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
{
tty_init_termios(tty);
tty_driver_kref_get(driver);
tty->count++;
driver->ttys[tty->index] = tty;
return 0;
}
EXPORT_SYMBOL_GPL(tty_standard_install);
/**
* tty_driver_install_tty() - install a tty entry in the driver
* @driver: the driver for the tty
* @tty: the tty
*
* Install a tty object into the driver tables. The @tty->index field will be
* set by the time this is called. This method is responsible for ensuring any
* need additional structures are allocated and configured.
*
* Locking: tty_mutex for now
*/
static int tty_driver_install_tty(struct tty_driver *driver,
struct tty_struct *tty)
{
return driver->ops->install ? driver->ops->install(driver, tty) :
tty_standard_install(driver, tty);
}
/**
* tty_driver_remove_tty() - remove a tty from the driver tables
* @driver: the driver for the tty
* @tty: tty to remove
*
* Remove a tty object from the driver tables. The tty->index field will be set
* by the time this is called.
*
* Locking: tty_mutex for now
*/
static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
{
if (driver->ops->remove)
driver->ops->remove(driver, tty);
else
driver->ttys[tty->index] = NULL;
}
/**
* tty_reopen() - fast re-open of an open tty
* @tty: the tty to open
*
* Re-opens on master ptys are not allowed and return -%EIO.
*
* Locking: Caller must hold tty_lock
* Return: 0 on success, -errno on error.
*/
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
struct tty_ldisc *ld;
int retval = 0;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
if (!tty->count)
return -EAGAIN;
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
ld = tty_ldisc_ref_wait(tty);
if (ld) {
tty_ldisc_deref(ld);
} else {
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
return retval;
if (!tty->ldisc)
retval = tty_ldisc_reinit(tty, tty->termios.c_line);
tty_ldisc_unlock(tty);
}
if (retval == 0)
tty->count++;
return retval;
}
/**
* tty_init_dev - initialise a tty device
* @driver: tty driver we are opening a device on
* @idx: device index
*
* Prepare a tty device. This may not be a "new" clean device but could also be
* an active device. The pty drivers require special handling because of this.
*
* Locking:
* The function is called under the tty_mutex, which protects us from the
* tty struct or driver itself going away.
*
* On exit the tty device has the line discipline attached and a reference
* count of 1. If a pair was created for pty/tty use and the other was a pty
* master then it too has a reference count of 1.
*
* WSH 06/09/97: Rewritten to remove races and properly clean up after a failed
* open. The new code protects the open with a mutex, so it's really quite
* straightforward. The mutex locking can probably be relaxed for the (most
* common) case of reopening a tty.
*
* Return: new tty structure
*/
struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
int retval;
/*
* First time open is complex, especially for PTY devices.
* This code guarantees that either everything succeeds and the
* TTY is ready for operation, or else the table slots are vacated
* and the allocated memory released. (Except that the termios
* may be retained.)
*/
if (!try_module_get(driver->owner))
return ERR_PTR(-ENODEV);
tty = alloc_tty_struct(driver, idx);
if (!tty) {
retval = -ENOMEM;
goto err_module_put;
}
tty_lock(tty);
retval = tty_driver_install_tty(driver, tty);
if (retval < 0)
goto err_free_tty;
if (!tty->port)
tty->port = driver->ports[idx];
if (WARN_RATELIMIT(!tty->port,
"%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
__func__, tty->driver->name)) {
retval = -EINVAL;
goto err_release_lock;
}
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
goto err_release_lock;
tty->port->itty = tty;
/*
* Structures all installed ... call the ldisc open routines.
* If we fail here just call release_tty to clean up. No need
* to decrement the use counts, as release_tty doesn't care.
*/
retval = tty_ldisc_setup(tty, tty->link);
if (retval)
goto err_release_tty;
tty_ldisc_unlock(tty);
/* Return the tty locked so that it cannot vanish under the caller */
return tty;
err_free_tty:
tty_unlock(tty);
free_tty_struct(tty);
err_module_put:
module_put(driver->owner);
return ERR_PTR(retval);
/* call the tty release_tty routine to clean out this slot */
err_release_tty:
tty_ldisc_unlock(tty);
tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
retval, idx);
err_release_lock:
tty_unlock(tty);
release_tty(tty, idx);
return ERR_PTR(retval);
}
/**
* tty_save_termios() - save tty termios data in driver table
* @tty: tty whose termios data to save
*
* Locking: Caller guarantees serialisation with tty_init_termios().
*/
void tty_save_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
/* If the port is going to reset then it has no termios to save */
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
return;
/* Stash the termios data */
tp = tty->driver->termios[idx];
if (tp == NULL) {
tp = kmalloc(sizeof(*tp), GFP_KERNEL);
if (tp == NULL)
return;
tty->driver->termios[idx] = tp;
}
*tp = tty->termios;
}
EXPORT_SYMBOL_GPL(tty_save_termios);
/**
* tty_flush_works - flush all works of a tty/pty pair
* @tty: tty device to flush works for (or either end of a pty pair)
*
* Sync flush all works belonging to @tty (and the 'other' tty).
*/
static void tty_flush_works(struct tty_struct *tty)
{
flush_work(&tty->SAK_work);
flush_work(&tty->hangup_work);
if (tty->link) {
flush_work(&tty->link->SAK_work);
flush_work(&tty->link->hangup_work);
}
}
/**
* release_one_tty - release tty structure memory
* @work: work of tty we are obliterating
*
* Releases memory associated with a tty structure, and clears out the
* driver table slots. This function is called when a device is no longer
* in use. It also gets called when setup of a device fails.
*
* Locking:
* takes the file list lock internally when working on the list of ttys
* that the driver keeps.
*
* This method gets called from a work queue so that the driver private
* cleanup ops can sleep (needed for USB at least)
*/
static void release_one_tty(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, hangup_work);
struct tty_driver *driver = tty->driver;
struct module *owner = driver->owner;
if (tty->ops->cleanup)
tty->ops->cleanup(tty);
tty_driver_kref_put(driver);
module_put(owner);
spin_lock(&tty->files_lock);
list_del_init(&tty->tty_files);
spin_unlock(&tty->files_lock);
put_pid(tty->ctrl.pgrp);
put_pid(tty->ctrl.session);
free_tty_struct(tty);
}
static void queue_release_one_tty(struct kref *kref)
{
struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
/* The hangup queue is now free so we can reuse it rather than
* waste a chunk of memory for each port.
*/
INIT_WORK(&tty->hangup_work, release_one_tty);
schedule_work(&tty->hangup_work);
}
/**
* tty_kref_put - release a tty kref
* @tty: tty device
*
* Release a reference to the @tty device and if need be let the kref layer
* destruct the object for us.
*/
void tty_kref_put(struct tty_struct *tty)
{
if (tty)
kref_put(&tty->kref, queue_release_one_tty);
}
EXPORT_SYMBOL(tty_kref_put);
/**
* release_tty - release tty structure memory
* @tty: tty device release
* @idx: index of the tty device release
*
* Release both @tty and a possible linked partner (think pty pair),
* and decrement the refcount of the backing module.
*
* Locking:
* tty_mutex
* takes the file list lock internally when working on the list of ttys
* that the driver keeps.
*/
static void release_tty(struct tty_struct *tty, int idx)
{
/* This should always be true but check for the moment */
WARN_ON(tty->index != idx);
WARN_ON(!mutex_is_locked(&tty_mutex));
if (tty->ops->shutdown)
tty->ops->shutdown(tty);
tty_save_termios(tty);
tty_driver_remove_tty(tty->driver, tty);
if (tty->port)
tty->port->itty = NULL;
if (tty->link)
tty->link->port->itty = NULL;
if (tty->port)
tty_buffer_cancel_work(tty->port);
if (tty->link)
tty_buffer_cancel_work(tty->link->port);
tty_kref_put(tty->link);
tty_kref_put(tty);
}
/**
* tty_release_checks - check a tty before real release
* @tty: tty to check
* @idx: index of the tty
*
* Performs some paranoid checking before true release of the @tty. This is a
* no-op unless %TTY_PARANOIA_CHECK is defined.
*/
static int tty_release_checks(struct tty_struct *tty, int idx)
{
#ifdef TTY_PARANOIA_CHECK
if (idx < 0 || idx >= tty->driver->num) {
tty_debug(tty, "bad idx %d\n", idx);
return -1;
}
/* not much to check for devpts */
if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
return 0;
if (tty != tty->driver->ttys[idx]) {
tty_debug(tty, "bad driver table[%d] = %p\n",
idx, tty->driver->ttys[idx]);
return -1;
}
if (tty->driver->other) {
struct tty_struct *o_tty = tty->link;
if (o_tty != tty->driver->other->ttys[idx]) {
tty_debug(tty, "bad other table[%d] = %p\n",
idx, tty->driver->other->ttys[idx]);
return -1;
}
if (o_tty->link != tty) {
tty_debug(tty, "bad link = %p\n", o_tty->link);
return -1;
}
}
#endif
return 0;
}
/**
* tty_kclose - closes tty opened by tty_kopen
* @tty: tty device
*
* Performs the final steps to release and free a tty device. It is the same as
* tty_release_struct() except that it also resets %TTY_PORT_KOPENED flag on
* @tty->port.
*/
void tty_kclose(struct tty_struct *tty)
{
/*
* Ask the line discipline code to release its structures
*/
tty_ldisc_release(tty);
/* Wait for pending work before tty destruction commences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
/*
* The release_tty function takes care of the details of clearing
* the slots and preserving the termios structure.
*/
mutex_lock(&tty_mutex);
tty_port_set_kopened(tty->port, 0);
release_tty(tty, tty->index);
mutex_unlock(&tty_mutex);
}
EXPORT_SYMBOL_GPL(tty_kclose);
/**
* tty_release_struct - release a tty struct
* @tty: tty device
* @idx: index of the tty
*
* Performs the final steps to release and free a tty device. It is roughly the
* reverse of tty_init_dev().
*/
void tty_release_struct(struct tty_struct *tty, int idx)
{
/*
* Ask the line discipline code to release its structures
*/
tty_ldisc_release(tty);
/* Wait for pending work before tty destruction commmences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
/*
* The release_tty function takes care of the details of clearing
* the slots and preserving the termios structure.
*/
mutex_lock(&tty_mutex);
release_tty(tty, idx);
mutex_unlock(&tty_mutex);
}
EXPORT_SYMBOL_GPL(tty_release_struct);
/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
*
* Called the last time each file handle is closed that references this tty.
* There may however be several such references.
*
* Locking:
* Takes BKL. See tty_release_dev().
*
* Even releasing the tty structures is a tricky business. We have to be very
* careful that the structures are all released at the same time, as interrupts
* might otherwise get the wrong pointers.
*
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = file_tty(filp);
struct tty_struct *o_tty = NULL;
int do_sleep, final;
int idx;
long timeout = 0;
int once = 1;
if (tty_paranoia_check(tty, inode, __func__))
return 0;
tty_lock(tty);
check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
idx = tty->index;
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
o_tty = tty->link;
if (tty_release_checks(tty, idx)) {
tty_unlock(tty);
return 0;
}
tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count);
if (tty->ops->close)
tty->ops->close(tty, filp);
/* If tty is pty master, lock the slave pty (stable lock order) */
tty_lock_slave(o_tty);
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
* wait queues and kick everyone out _before_ actually starting to
* close. This ensures that we won't block while releasing the tty
* structure.
*
* The test for the o_tty closing is necessary, since the master and
* slave sides may close in any order. If the slave side closes out
* first, its count will be one, since the master side holds an open.
* Thus this test wouldn't be triggered at the time the slave closed,
* so we do it now.
*/
while (1) {
do_sleep = 0;
if (tty->count <= 1) {
if (waitqueue_active(&tty->read_wait)) {
wake_up_poll(&tty->read_wait, EPOLLIN);
do_sleep++;
}
if (waitqueue_active(&tty->write_wait)) {
wake_up_poll(&tty->write_wait, EPOLLOUT);
do_sleep++;
}
}
if (o_tty && o_tty->count <= 1) {
if (waitqueue_active(&o_tty->read_wait)) {
wake_up_poll(&o_tty->read_wait, EPOLLIN);
do_sleep++;
}
if (waitqueue_active(&o_tty->write_wait)) {
wake_up_poll(&o_tty->write_wait, EPOLLOUT);
do_sleep++;
}
}
if (!do_sleep)
break;
if (once) {
once = 0;
tty_warn(tty, "read/write wait queue active!\n");
}
schedule_timeout_killable(timeout);
if (timeout < 120 * HZ)
timeout = 2 * timeout + 1;
else
timeout = MAX_SCHEDULE_TIMEOUT;
}
if (o_tty) {
if (--o_tty->count < 0) {
tty_warn(tty, "bad slave count (%d)\n", o_tty->count);
o_tty->count = 0;
}
}
if (--tty->count < 0) {
tty_warn(tty, "bad tty->count (%d)\n", tty->count);
tty->count = 0;
}
/*
* We've decremented tty->count, so we need to remove this file
* descriptor off the tty->tty_files list; this serves two
* purposes:
* - check_tty_count sees the correct number of file descriptors
* associated with this tty.
* - do_tty_hangup no longer sees this file descriptor as
* something that needs to be handled for hangups.
*/
tty_del_file(filp);
/*
* Perform some housekeeping before deciding whether to return.
*
* If _either_ side is closing, make sure there aren't any
* processes that still think tty or o_tty is their controlling
* tty.
*/
if (!tty->count) {
read_lock(&tasklist_lock);
session_clear_tty(tty->ctrl.session);
if (o_tty)
session_clear_tty(o_tty->ctrl.session);
read_unlock(&tasklist_lock);
}
/* check whether both sides are closing ... */
final = !tty->count && !(o_tty && o_tty->count);
tty_unlock_slave(o_tty);
tty_unlock(tty);
/* At this point, the tty->count == 0 should ensure a dead tty
* cannot be re-opened by a racing opener.
*/
if (!final)
return 0;
tty_debug_hangup(tty, "final close\n");
tty_release_struct(tty, idx);
return 0;
}
/**
* tty_open_current_tty - get locked tty of current task
* @device: device number
* @filp: file pointer to tty
* @return: locked tty of the current task iff @device is /dev/tty
*
* Performs a re-open of the current task's controlling tty.
*
* We cannot return driver and index like for the other nodes because devpts
* will not work then. It expects inodes to be from devpts FS.
*/
static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
{
struct tty_struct *tty;
int retval;
if (device != MKDEV(TTYAUX_MAJOR, 0))
return NULL;
tty = get_current_tty();
if (!tty)
return ERR_PTR(-ENXIO);
filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
/* noctty = 1; */
tty_lock(tty);
tty_kref_put(tty); /* safe to drop the kref now */
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
tty = ERR_PTR(retval);
}
return tty;
}
/**
* tty_lookup_driver - lookup a tty driver for a given device file
* @device: device number
* @filp: file pointer to tty
* @index: index for the device in the @return driver
*
* If returned value is not erroneous, the caller is responsible to decrement
* the refcount by tty_driver_kref_put().
*
* Locking: %tty_mutex protects get_tty_driver()
*
* Return: driver for this inode (with increased refcount)
*/
static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
int *index)
{
struct tty_driver *driver = NULL;
switch (device) {
#ifdef CONFIG_VT
case MKDEV(TTY_MAJOR, 0): {
extern struct tty_driver *console_driver;
driver = tty_driver_kref_get(console_driver);
*index = fg_console;
break;
}
#endif
case MKDEV(TTYAUX_MAJOR, 1): {
struct tty_driver *console_driver = console_device(index);
if (console_driver) {
driver = tty_driver_kref_get(console_driver);
if (driver && filp) {
/* Don't let /dev/console block */
filp->f_flags |= O_NONBLOCK;
break;
}
}
if (driver)
tty_driver_kref_put(driver);
return ERR_PTR(-ENODEV);
}
default:
driver = get_tty_driver(device, index);
if (!driver)
return ERR_PTR(-ENODEV);
break;
}
return driver;
}
static struct tty_struct *tty_kopen(dev_t device, int shared)
{
struct tty_struct *tty;
struct tty_driver *driver;
int index = -1;
mutex_lock(&tty_mutex);
driver = tty_lookup_driver(device, NULL, &index);
if (IS_ERR(driver)) {
mutex_unlock(&tty_mutex);
return ERR_CAST(driver);
}
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, NULL, index);
if (IS_ERR(tty) || shared)
goto out;
if (tty) {
/* drop kref from tty_driver_lookup_tty() */
tty_kref_put(tty);
tty = ERR_PTR(-EBUSY);
} else { /* tty_init_dev returns tty with the tty_lock held */
tty = tty_init_dev(driver, index);
if (IS_ERR(tty))
goto out;
tty_port_set_kopened(tty->port, 1);
}
out:
mutex_unlock(&tty_mutex);
tty_driver_kref_put(driver);
return tty;
}
/**
* tty_kopen_exclusive - open a tty device for kernel
* @device: dev_t of device to open
*
* Opens tty exclusively for kernel. Performs the driver lookup, makes sure
* it's not already opened and performs the first-time tty initialization.
*
* Claims the global %tty_mutex to serialize:
* * concurrent first-time tty initialization
* * concurrent tty driver removal w/ lookup
* * concurrent tty removal from driver table
*
* Return: the locked initialized &tty_struct
*/
struct tty_struct *tty_kopen_exclusive(dev_t device)
{
return tty_kopen(device, 0);
}
EXPORT_SYMBOL_GPL(tty_kopen_exclusive);
/**
* tty_kopen_shared - open a tty device for shared in-kernel use
* @device: dev_t of device to open
*
* Opens an already existing tty for in-kernel use. Compared to
* tty_kopen_exclusive() above it doesn't ensure to be the only user.
*
* Locking: identical to tty_kopen() above.
*/
struct tty_struct *tty_kopen_shared(dev_t device)
{
return tty_kopen(device, 1);
}
EXPORT_SYMBOL_GPL(tty_kopen_shared);
/**
* tty_open_by_driver - open a tty device
* @device: dev_t of device to open
* @filp: file pointer to tty
*
* Performs the driver lookup, checks for a reopen, or otherwise performs the
* first-time tty initialization.
*
*
* Claims the global tty_mutex to serialize:
* * concurrent first-time tty initialization
* * concurrent tty driver removal w/ lookup
* * concurrent tty removal from driver table
*
* Return: the locked initialized or re-opened &tty_struct
*/
static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
{
struct tty_struct *tty;
struct tty_driver *driver = NULL;
int index = -1;
int retval;
mutex_lock(&tty_mutex);
driver = tty_lookup_driver(device, filp, &index);
if (IS_ERR(driver)) {
mutex_unlock(&tty_mutex);
return ERR_CAST(driver);
}
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, filp, index);
if (IS_ERR(tty)) {
mutex_unlock(&tty_mutex);
goto out;
}
if (tty) {
if (tty_port_kopened(tty->port)) {
tty_kref_put(tty);
mutex_unlock(&tty_mutex);
tty = ERR_PTR(-EBUSY);
goto out;
}
mutex_unlock(&tty_mutex);
retval = tty_lock_interruptible(tty);
tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
if (retval) {
if (retval == -EINTR)
retval = -ERESTARTSYS;
tty = ERR_PTR(retval);
goto out;
}
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
tty = ERR_PTR(retval);
}
} else { /* Returns with the tty_lock held for now */
tty = tty_init_dev(driver, index);
mutex_unlock(&tty_mutex);
}
out:
tty_driver_kref_put(driver);
return tty;
}
/**
* tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
*
* tty_open() and tty_release() keep up the tty count that contains the number
* of opens done on a tty. We cannot use the inode-count, as different inodes
* might point to the same tty.
*
* Open-counting is needed for pty masters, as well as for keeping track of
* serial lines: DTR is dropped when the last close happens.
* (This is not done solely through tty->count, now. - Ted 1/27/92)
*
* The termios state of a pty is reset on the first open so that settings don't
* persist across reuse.
*
* Locking:
* * %tty_mutex protects tty, tty_lookup_driver() and tty_init_dev().
* * @tty->count should protect the rest.
* * ->siglock protects ->signal/->sighand
*
* Note: the tty_unlock/lock cases without a ref are only safe due to %tty_mutex
*/
static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
int noctty, retval;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
nonseekable_open(inode, filp);
retry_open:
retval = tty_alloc_file(filp);
if (retval)
return -ENOMEM;
tty = tty_open_current_tty(device, filp);
if (!tty)
tty = tty_open_by_driver(device, filp);
if (IS_ERR(tty)) {
tty_free_file(filp);
retval = PTR_ERR(tty);
if (retval != -EAGAIN || signal_pending(current))
return retval;
schedule();
goto retry_open;
}
tty_add_file(tty, filp);
check_tty_count(tty, __func__);
tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
else
retval = -ENODEV;
filp->f_flags = saved_flags;
if (retval) {
tty_debug_hangup(tty, "open error %d, releasing\n", retval);
tty_unlock(tty); /* need to call tty_release without BTM */
tty_release(inode, filp);
if (retval != -ERESTARTSYS)
return retval;
if (signal_pending(current))
return retval;
schedule();
/*
* Need to reset f_op in case a hangup happened.
*/
if (tty_hung_up_p(filp))
filp->f_op = &tty_fops;
goto retry_open;
}
clear_bit(TTY_HUPPED, &tty->flags);
noctty = (filp->f_flags & O_NOCTTY) ||
(IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
device == MKDEV(TTYAUX_MAJOR, 1) ||
(tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
if (!noctty)
tty_open_proc_set_tty(filp, tty);
tty_unlock(tty);
return 0;
}
/**
* tty_poll - check tty status
* @filp: file being polled
* @wait: poll wait structures to update
*
* Call the line discipline polling method to obtain the poll status of the
* device.
*
* Locking: locks called line discipline but ldisc poll method may be
* re-entered freely by other callers.
*/
static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
__poll_t ret = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
return 0;
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_poll(filp, wait);
if (ld->ops->poll)
ret = ld->ops->poll(tty, filp, wait);
tty_ldisc_deref(ld);
return ret;
}
static int __tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty = file_tty(filp);
unsigned long flags;
int retval = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
goto out;
retval = fasync_helper(fd, filp, on, &tty->fasync);
if (retval <= 0)
goto out;
if (on) {
enum pid_type type;
struct pid *pid;
spin_lock_irqsave(&tty->ctrl.lock, flags);
if (tty->ctrl.pgrp) {
pid = tty->ctrl.pgrp;
type = PIDTYPE_PGID;
} else {
pid = task_pid(current);
type = PIDTYPE_TGID;
}
get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
__f_setown(filp, pid, type, 0);
put_pid(pid);
retval = 0;
}
out:
return retval;
}
static int tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty = file_tty(filp);
int retval = -ENOTTY;
tty_lock(tty);
if (!tty_hung_up_p(filp))
retval = __tty_fasync(fd, filp, on);
tty_unlock(tty);
return retval;
}
static bool tty_legacy_tiocsti __read_mostly = IS_ENABLED(CONFIG_LEGACY_TIOCSTI);
/**
* tiocsti - fake input character
* @tty: tty to fake input into
* @p: pointer to character
*
* Fake input to a tty device. Does the necessary locking and input management.
*
* FIXME: does not honour flow control ??
*
* Locking:
* * Called functions take tty_ldiscs_lock
* * current->signal->tty check is safe without locks
*/
static int tiocsti(struct tty_struct *tty, char __user *p)
{
char ch, mbz = 0;
struct tty_ldisc *ld;
if (!tty_legacy_tiocsti && !capable(CAP_SYS_ADMIN))
return -EIO;
if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ch, p))
return -EFAULT;
tty_audit_tiocsti(tty, ch);
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return -EIO;
tty_buffer_lock_exclusive(tty->port);
if (ld->ops->receive_buf)
ld->ops->receive_buf(tty, &ch, &mbz, 1);
tty_buffer_unlock_exclusive(tty->port);
tty_ldisc_deref(ld);
return 0;
}
/**
* tiocgwinsz - implement window query ioctl
* @tty: tty
* @arg: user buffer for result
*
* Copies the kernel idea of the window size into the user buffer.
*
* Locking: @tty->winsize_mutex is taken to ensure the winsize data is
* consistent.
*/
static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
{
int err;
mutex_lock(&tty->winsize_mutex);
err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
mutex_unlock(&tty->winsize_mutex);
return err ? -EFAULT : 0;
}
/**
* tty_do_resize - resize event
* @tty: tty being resized
* @ws: new dimensions
*
* Update the termios variables and send the necessary signals to peform a
* terminal resize correctly.
*/
int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp;
/* Lock the tty */
mutex_lock(&tty->winsize_mutex);
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
/* Signal the foreground process group */
pgrp = tty_get_pgrp(tty);
if (pgrp)
kill_pgrp(pgrp, SIGWINCH, 1);
put_pid(pgrp);
tty->winsize = *ws;
done:
mutex_unlock(&tty->winsize_mutex);
return 0;
}
EXPORT_SYMBOL(tty_do_resize);
/**
* tiocswinsz - implement window size set ioctl
* @tty: tty side of tty
* @arg: user buffer for result
*
* Copies the user idea of the window size to the kernel. Traditionally this is
* just advisory information but for the Linux console it actually has driver
* level meaning and triggers a VC resize.
*
* Locking:
* Driver dependent. The default do_resize method takes the tty termios
* mutex and ctrl.lock. The console takes its own lock then calls into the
* default method.
*/
static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
{
struct winsize tmp_ws;
if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
return -EFAULT;
if (tty->ops->resize)
return tty->ops->resize(tty, &tmp_ws);
else
return tty_do_resize(tty, &tmp_ws);
}
/**
* tioccons - allow admin to move logical console
* @file: the file to become console
*
* Allow the administrator to move the redirected console device.
*
* Locking: uses redirect_lock to guard the redirect information
*/
static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
spin_lock(&redirect_lock);
f = redirect;
redirect = NULL;
spin_unlock(&redirect_lock);
if (f)
fput(f);
return 0;
}
if (file->f_op->write_iter != tty_write)
return -ENOTTY;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
spin_lock(&redirect_lock);
if (redirect) {
spin_unlock(&redirect_lock);
return -EBUSY;
}
redirect = get_file(file);
spin_unlock(&redirect_lock);
return 0;
}
/**
* tiocsetd - set line discipline
* @tty: tty device
* @p: pointer to user data
*
* Set the line discipline according to user request.
*
* Locking: see tty_set_ldisc(), this function is just a helper
*/
static int tiocsetd(struct tty_struct *tty, int __user *p)
{
int disc;
int ret;
if (get_user(disc, p))
return -EFAULT;
ret = tty_set_ldisc(tty, disc);
return ret;
}
/**
* tiocgetd - get line discipline
* @tty: tty device
* @p: pointer to user data
*
* Retrieves the line discipline id directly from the ldisc.
*
* Locking: waits for ldisc reference (in case the line discipline is changing
* or the @tty is being hungup)
*/
static int tiocgetd(struct tty_struct *tty, int __user *p)
{
struct tty_ldisc *ld;
int ret;
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return -EIO;
ret = put_user(ld->ops->num, p);
tty_ldisc_deref(ld);
return ret;
}
/**
* send_break - performed time break
* @tty: device to break on
* @duration: timeout in mS
*
* Perform a timed break on hardware that lacks its own driver level timed
* break functionality.
*
* Locking:
* @tty->atomic_write_lock serializes
*/
static int send_break(struct tty_struct *tty, unsigned int duration)
{
int retval;
if (tty->ops->break_ctl == NULL)
return 0;
if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
retval = tty->ops->break_ctl(tty, duration);
else {
/* Do the work ourselves */
if (tty_write_lock(tty, false) < 0)
return -EINTR;
retval = tty->ops->break_ctl(tty, -1);
if (retval)
goto out;
if (!signal_pending(current))
msleep_interruptible(duration);
retval = tty->ops->break_ctl(tty, 0);
out:
tty_write_unlock(tty);
if (signal_pending(current))
retval = -EINTR;
}
return retval;
}
/**
* tty_tiocmget - get modem status
* @tty: tty device
* @p: pointer to result
*
* Obtain the modem status bits from the tty driver if the feature is
* supported. Return -%ENOTTY if it is not available.
*
* Locking: none (up to the driver)
*/
static int tty_tiocmget(struct tty_struct *tty, int __user *p)
{
int retval = -ENOTTY;
if (tty->ops->tiocmget) {
retval = tty->ops->tiocmget(tty);
if (retval >= 0)
retval = put_user(retval, p);
}
return retval;
}
/**
* tty_tiocmset - set modem status
* @tty: tty device
* @cmd: command - clear bits, set bits or set all
* @p: pointer to desired bits
*
* Set the modem status bits from the tty driver if the feature
* is supported. Return -%ENOTTY if it is not available.
*
* Locking: none (up to the driver)
*/
static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
unsigned __user *p)
{
int retval;
unsigned int set, clear, val;
if (tty->ops->tiocmset == NULL)
return -ENOTTY;
retval = get_user(val, p);
if (retval)
return retval;
set = clear = 0;
switch (cmd) {
case TIOCMBIS:
set = val;
break;
case TIOCMBIC:
clear = val;
break;
case TIOCMSET:
set = val;
clear = ~val;
break;
}
set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
return tty->ops->tiocmset(tty, set, clear);
}
/**
* tty_get_icount - get tty statistics
* @tty: tty device
* @icount: output parameter
*
* Gets a copy of the @tty's icount statistics.
*
* Locking: none (up to the driver)
*/
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
memset(icount, 0, sizeof(*icount));
if (tty->ops->get_icount)
return tty->ops->get_icount(tty, icount);
else
return -ENOTTY;
}
EXPORT_SYMBOL_GPL(tty_get_icount);
static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
{
struct serial_icounter_struct icount;
int retval;
retval = tty_get_icount(tty, &icount);
if (retval != 0)
return retval;
if (copy_to_user(arg, &icount, sizeof(icount)))
return -EFAULT;
return 0;
}
static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss)
{
char comm[TASK_COMM_LEN];
int flags;
flags = ss->flags & ASYNC_DEPRECATED;
if (flags)
pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
__func__, get_task_comm(comm, current), flags);
if (!tty->ops->set_serial)
return -ENOTTY;
return tty->ops->set_serial(tty, ss);
}
static int tty_tiocsserial(struct tty_struct *tty, struct serial_struct __user *ss)
{
struct serial_struct v;
if (copy_from_user(&v, ss, sizeof(*ss)))
return -EFAULT;
return tty_set_serial(tty, &v);
}
static int tty_tiocgserial(struct tty_struct *tty, struct serial_struct __user *ss)
{
struct serial_struct v;
int err;
memset(&v, 0, sizeof(v));
if (!tty->ops->get_serial)
return -ENOTTY;
err = tty->ops->get_serial(tty, &v);
if (!err && copy_to_user(ss, &v, sizeof(v)))
err = -EFAULT;
return err;
}
/*
* if pty, return the slave side (real_tty)
* otherwise, return self
*/
static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
{
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
tty = tty->link;
return tty;
}
/*
* Split this up, as gcc can choke on it otherwise..
*/
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct tty_struct *tty = file_tty(file);
struct tty_struct *real_tty;
void __user *p = (void __user *)arg;
int retval;
struct tty_ldisc *ld;
if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
return -EINVAL;
real_tty = tty_pair_get_tty(tty);
/*
* Factor out some common prep work
*/
switch (cmd) {
case TIOCSETD:
case TIOCSBRK:
case TIOCCBRK:
case TCSBRK:
case TCSBRKP:
retval = tty_check_change(tty);
if (retval)
return retval;
if (cmd != TIOCCBRK) {
tty_wait_until_sent(tty, 0);
if (signal_pending(current))
return -EINTR;
}
break;
}
/*
* Now do the stuff.
*/
switch (cmd) {
case TIOCSTI:
return tiocsti(tty, p);
case TIOCGWINSZ:
return tiocgwinsz(real_tty, p);
case TIOCSWINSZ:
return tiocswinsz(real_tty, p);
case TIOCCONS:
return real_tty != tty ? -EINVAL : tioccons(file);
case TIOCEXCL:
set_bit(TTY_EXCLUSIVE, &tty->flags);
return 0;
case TIOCNXCL:
clear_bit(TTY_EXCLUSIVE, &tty->flags);
return 0;
case TIOCGEXCL:
{
int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
return put_user(excl, (int __user *)p);
}
case TIOCGETD:
return tiocgetd(tty, p);
case TIOCSETD:
return tiocsetd(tty, p);
case TIOCVHANGUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
tty_vhangup(tty);
return 0;
case TIOCGDEV:
{
unsigned int ret = new_encode_dev(tty_devnum(real_tty));
return put_user(ret, (unsigned int __user *)p);
}
/*
* Break handling
*/
case TIOCSBRK: /* Turn break on, unconditionally */
if (tty->ops->break_ctl)
return tty->ops->break_ctl(tty, -1);
return 0;
case TIOCCBRK: /* Turn break off, unconditionally */
if (tty->ops->break_ctl)
return tty->ops->break_ctl(tty, 0);
return 0;
case TCSBRK: /* SVID version: non-zero arg --> no break */
/* non-zero arg means wait for all output data
* to be sent (performed above) but don't send break.
* This is used by the tcdrain() termios function.
*/
if (!arg)
return send_break(tty, 250);
return 0;
case TCSBRKP: /* support for POSIX tcsendbreak() */
return send_break(tty, arg ? arg*100 : 250);
case TIOCMGET:
return tty_tiocmget(tty, p);
case TIOCMSET:
case TIOCMBIC:
case TIOCMBIS:
return tty_tiocmset(tty, cmd, p);
case TIOCGICOUNT:
return tty_tiocgicount(tty, p);
case TCFLSH:
switch (arg) {
case TCIFLUSH:
case TCIOFLUSH:
/* flush tty buffer and allow ldisc to process ioctl */
tty_buffer_flush(tty, NULL);
break;
}
break;
case TIOCSSERIAL:
return tty_tiocsserial(tty, p);
case TIOCGSERIAL:
return tty_tiocgserial(tty, p);
case TIOCGPTPEER:
/* Special because the struct file is needed */
return ptm_open_peer(file, tty, (int)arg);
default:
retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
if (retval != -ENOIOCTLCMD)
return retval;
}
if (tty->ops->ioctl) {
retval = tty->ops->ioctl(tty, cmd, arg);
if (retval != -ENOIOCTLCMD)
return retval;
}
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_ioctl(file, cmd, arg);
retval = -EINVAL;
if (ld->ops->ioctl) {
retval = ld->ops->ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
tty_ldisc_deref(ld);
return retval;
}
#ifdef CONFIG_COMPAT
struct serial_struct32 {
compat_int_t type;
compat_int_t line;
compat_uint_t port;
compat_int_t irq;
compat_int_t flags;
compat_int_t xmit_fifo_size;
compat_int_t custom_divisor;
compat_int_t baud_base;
unsigned short close_delay;
char io_type;
char reserved_char;
compat_int_t hub6;
unsigned short closing_wait; /* time to wait before closing */
unsigned short closing_wait2; /* no longer used... */
compat_uint_t iomem_base;
unsigned short iomem_reg_shift;
unsigned int port_high;
/* compat_ulong_t iomap_base FIXME */
compat_int_t reserved;
};
static int compat_tty_tiocsserial(struct tty_struct *tty,
struct serial_struct32 __user *ss)
{
struct serial_struct32 v32;
struct serial_struct v;
if (copy_from_user(&v32, ss, sizeof(*ss)))
return -EFAULT;
memcpy(&v, &v32, offsetof(struct serial_struct32, iomem_base));
v.iomem_base = compat_ptr(v32.iomem_base);
v.iomem_reg_shift = v32.iomem_reg_shift;
v.port_high = v32.port_high;
v.iomap_base = 0;
return tty_set_serial(tty, &v);
}
static int compat_tty_tiocgserial(struct tty_struct *tty,
struct serial_struct32 __user *ss)
{
struct serial_struct32 v32;
struct serial_struct v;
int err;
memset(&v, 0, sizeof(v));
memset(&v32, 0, sizeof(v32));
if (!tty->ops->get_serial)
return -ENOTTY;
err = tty->ops->get_serial(tty, &v);
if (!err) {
memcpy(&v32, &v, offsetof(struct serial_struct32, iomem_base));
v32.iomem_base = (unsigned long)v.iomem_base >> 32 ?
0xfffffff : ptr_to_compat(v.iomem_base);
v32.iomem_reg_shift = v.iomem_reg_shift;
v32.port_high = v.port_high;
if (copy_to_user(ss, &v32, sizeof(v32)))
err = -EFAULT;
}
return err;
}
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
int retval = -ENOIOCTLCMD;
switch (cmd) {
case TIOCOUTQ:
case TIOCSTI:
case TIOCGWINSZ:
case TIOCSWINSZ:
case TIOCGEXCL:
case TIOCGETD:
case TIOCSETD:
case TIOCGDEV:
case TIOCMGET:
case TIOCMSET:
case TIOCMBIC:
case TIOCMBIS:
case TIOCGICOUNT:
case TIOCGPGRP:
case TIOCSPGRP:
case TIOCGSID:
case TIOCSERGETLSR:
case TIOCGRS485:
case TIOCSRS485:
#ifdef TIOCGETP
case TIOCGETP:
case TIOCSETP:
case TIOCSETN:
#endif
#ifdef TIOCGETC
case TIOCGETC:
case TIOCSETC:
#endif
#ifdef TIOCGLTC
case TIOCGLTC:
case TIOCSLTC:
#endif
case TCSETSF:
case TCSETSW:
case TCSETS:
case TCGETS:
#ifdef TCGETS2
case TCGETS2:
case TCSETSF2:
case TCSETSW2:
case TCSETS2:
#endif
case TCGETA:
case TCSETAF:
case TCSETAW:
case TCSETA:
case TIOCGLCKTRMIOS:
case TIOCSLCKTRMIOS:
#ifdef TCGETX
case TCGETX:
case TCSETX:
case TCSETXW:
case TCSETXF:
#endif
case TIOCGSOFTCAR:
case TIOCSSOFTCAR:
case PPPIOCGCHAN:
case PPPIOCGUNIT:
return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
case TIOCCONS:
case TIOCEXCL:
case TIOCNXCL:
case TIOCVHANGUP:
case TIOCSBRK:
case TIOCCBRK:
case TCSBRK:
case TCSBRKP:
case TCFLSH:
case TIOCGPTPEER:
case TIOCNOTTY:
case TIOCSCTTY:
case TCXONC:
case TIOCMIWAIT:
case TIOCSERCONFIG:
return tty_ioctl(file, cmd, arg);
}
if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
return -EINVAL;
switch (cmd) {
case TIOCSSERIAL:
return compat_tty_tiocsserial(tty, compat_ptr(arg));
case TIOCGSERIAL:
return compat_tty_tiocgserial(tty, compat_ptr(arg));
}
if (tty->ops->compat_ioctl) {
retval = tty->ops->compat_ioctl(tty, cmd, arg);
if (retval != -ENOIOCTLCMD)
return retval;
}
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_compat_ioctl(file, cmd, arg);
if (ld->ops->compat_ioctl)
retval = ld->ops->compat_ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
retval = ld->ops->ioctl(tty, (unsigned long)compat_ptr(cmd),
arg);
tty_ldisc_deref(ld);
return retval;
}
#endif
static int this_tty(const void *t, struct file *file, unsigned fd)
{
if (likely(file->f_op->read_iter != tty_read))
return 0;
return file_tty(file) != t ? 0 : fd + 1;
}
/*
* This implements the "Secure Attention Key" --- the idea is to
* prevent trojan horses by killing all processes associated with this
* tty when the user hits the "Secure Attention Key". Required for
* super-paranoid applications --- see the Orange Book for more details.
*
* This code could be nicer; ideally it should send a HUP, wait a few
* seconds, then send a INT, and then a KILL signal. But you then
* have to coordinate with the init process, since all processes associated
* with the current tty must be dead before the new getty is allowed
* to spawn.
*
* Now, if it would be correct ;-/ The current code has a nasty hole -
* it doesn't catch files in flight. We may send the descriptor to ourselves
* via AF_UNIX socket, close it and later fetch from socket. FIXME.
*
* Nasty bug: do_SAK is being called in interrupt context. This can
* deadlock. We punt it up to process context. AKPM - 16Mar2001
*/
void __do_SAK(struct tty_struct *tty)
{
struct task_struct *g, *p;
struct pid *session;
int i;
unsigned long flags;
spin_lock_irqsave(&tty->ctrl.lock, flags);
session = get_pid(tty->ctrl.session);
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
read_lock(&tasklist_lock);
/* Kill the entire session */
do_each_pid_task(session, PIDTYPE_SID, p) {
tty_notice(tty, "SAK: killed process %d (%s): by session\n",
task_pid_nr(p), p->comm);
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
} while_each_pid_task(session, PIDTYPE_SID, p);
/* Now kill any processes that happen to have the tty open */
for_each_process_thread(g, p) {
if (p->signal->tty == tty) {
tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
task_pid_nr(p), p->comm);
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
PIDTYPE_SID);
continue;
}
task_lock(p);
i = iterate_fd(p->files, 0, this_tty, tty);
if (i != 0) {
tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
task_pid_nr(p), p->comm, i - 1);
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
PIDTYPE_SID);
}
task_unlock(p);
}
read_unlock(&tasklist_lock);
put_pid(session);
}
static void do_SAK_work(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, SAK_work);
__do_SAK(tty);
}
/*
* The tq handling here is a little racy - tty->SAK_work may already be queued.
* Fortunately we don't need to worry, because if ->SAK_work is already queued,
* the values which we write to it will be identical to the values which it
* already has. --akpm
*/
void do_SAK(struct tty_struct *tty)
{
if (!tty)
return;
schedule_work(&tty->SAK_work);
}
EXPORT_SYMBOL(do_SAK);
/* Must put_device() after it's unused! */
static struct device *tty_get_device(struct tty_struct *tty)
{
dev_t devt = tty_devnum(tty);
return class_find_device_by_devt(&tty_class, devt);
}
/**
* alloc_tty_struct - allocate a new tty
* @driver: driver which will handle the returned tty
* @idx: minor of the tty
*
* This subroutine allocates and initializes a tty structure.
*
* Locking: none - @tty in question is not exposed at this point
*/
struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
tty = kzalloc(sizeof(*tty), GFP_KERNEL_ACCOUNT);
if (!tty)
return NULL;
kref_init(&tty->kref);
if (tty_ldisc_init(tty)) {
kfree(tty);
return NULL;
}
tty->ctrl.session = NULL;
tty->ctrl.pgrp = NULL;
mutex_init(&tty->legacy_mutex);
mutex_init(&tty->throttle_mutex);
init_rwsem(&tty->termios_rwsem);
mutex_init(&tty->winsize_mutex);
init_ldsem(&tty->ldisc_sem);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
mutex_init(&tty->atomic_write_lock);
spin_lock_init(&tty->ctrl.lock);
spin_lock_init(&tty->flow.lock);
spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
INIT_WORK(&tty->SAK_work, do_SAK_work);
tty->driver = driver;
tty->ops = driver->ops;
tty->index = idx;
tty_line_name(driver, idx, tty->name);
tty->dev = tty_get_device(tty);
return tty;
}
/**
* tty_put_char - write one character to a tty
* @tty: tty
* @ch: character to write
*
* Write one byte to the @tty using the provided @tty->ops->put_char() method
* if present.
*
* Note: the specific put_char operation in the driver layer may go
* away soon. Don't call it directly, use this method
*
* Return: the number of characters successfully output.
*/
int tty_put_char(struct tty_struct *tty, unsigned char ch)
{
if (tty->ops->put_char)
return tty->ops->put_char(tty, ch);
return tty->ops->write(tty, &ch, 1);
}
EXPORT_SYMBOL_GPL(tty_put_char);
static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
unsigned int index, unsigned int count)
{
int err;
/* init here, since reused cdevs cause crashes */
driver->cdevs[index] = cdev_alloc();
if (!driver->cdevs[index])
return -ENOMEM;
driver->cdevs[index]->ops = &tty_fops;
driver->cdevs[index]->owner = driver->owner;
err = cdev_add(driver->cdevs[index], dev, count);
if (err)
kobject_put(&driver->cdevs[index]->kobj);
return err;
}
/**
* tty_register_device - register a tty device
* @driver: the tty driver that describes the tty device
* @index: the index in the tty driver for this tty device
* @device: a struct device that is associated with this tty device.
* This field is optional, if there is no known struct device
* for this tty device it can be set to NULL safely.
*
* This call is required to be made to register an individual tty device
* if the tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If
* that bit is not set, this function should not be called by a tty
* driver.
*
* Locking: ??
*
* Return: A pointer to the struct device for this tty device (or
* ERR_PTR(-EFOO) on error).
*/
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
struct device *device)
{
return tty_register_device_attr(driver, index, device, NULL, NULL);
}
EXPORT_SYMBOL(tty_register_device);
static void tty_device_create_release(struct device *dev)
{
dev_dbg(dev, "releasing...\n");
kfree(dev);
}
/**
* tty_register_device_attr - register a tty device
* @driver: the tty driver that describes the tty device
* @index: the index in the tty driver for this tty device
* @device: a struct device that is associated with this tty device.
* This field is optional, if there is no known struct device
* for this tty device it can be set to %NULL safely.
* @drvdata: Driver data to be set to device.
* @attr_grp: Attribute group to be set on device.
*
* This call is required to be made to register an individual tty device if the
* tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If that bit is
* not set, this function should not be called by a tty driver.
*
* Locking: ??
*
* Return: A pointer to the struct device for this tty device (or
* ERR_PTR(-EFOO) on error).
*/
struct device *tty_register_device_attr(struct tty_driver *driver,
unsigned index, struct device *device,
void *drvdata,
const struct attribute_group **attr_grp)
{
char name[64];
dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
struct ktermios *tp;
struct device *dev;
int retval;
if (index >= driver->num) {
pr_err("%s: Attempt to register invalid tty line number (%d)\n",
driver->name, index);
return ERR_PTR(-EINVAL);
}
if (driver->type == TTY_DRIVER_TYPE_PTY)
pty_line_name(driver, index, name);
else
tty_line_name(driver, index, name);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->devt = devt;
dev->class = &tty_class;
dev->parent = device;
dev->release = tty_device_create_release;
dev_set_name(dev, "%s", name);
dev->groups = attr_grp;
dev_set_drvdata(dev, drvdata);
dev_set_uevent_suppress(dev, 1);
retval = device_register(dev);
if (retval)
goto err_put;
if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
/*
* Free any saved termios data so that the termios state is
* reset when reusing a minor number.
*/
tp = driver->termios[index];
if (tp) {
driver->termios[index] = NULL;
kfree(tp);
}
retval = tty_cdev_add(driver, devt, index, 1);
if (retval)
goto err_del;
}
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
return dev;
err_del:
device_del(dev);
err_put:
put_device(dev);
return ERR_PTR(retval);
}
EXPORT_SYMBOL_GPL(tty_register_device_attr);
/**
* tty_unregister_device - unregister a tty device
* @driver: the tty driver that describes the tty device
* @index: the index in the tty driver for this tty device
*
* If a tty device is registered with a call to tty_register_device() then
* this function must be called when the tty device is gone.
*
* Locking: ??
*/
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
device_destroy(&tty_class, MKDEV(driver->major, driver->minor_start) + index);
if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
cdev_del(driver->cdevs[index]);
driver->cdevs[index] = NULL;
}
}
EXPORT_SYMBOL(tty_unregister_device);
/**
* __tty_alloc_driver -- allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
* @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
*
* This should not be called directly, some of the provided macros should be
* used instead. Use IS_ERR() and friends on @retval.
*/
struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
unsigned long flags)
{
struct tty_driver *driver;
unsigned int cdevs = 1;
int err;
if (!lines || (flags & TTY_DRIVER_UNNUMBERED_NODE && lines > 1))
return ERR_PTR(-EINVAL);
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return ERR_PTR(-ENOMEM);
kref_init(&driver->kref);
driver->num = lines;
driver->owner = owner;
driver->flags = flags;
if (!(flags & TTY_DRIVER_DEVPTS_MEM)) {
driver->ttys = kcalloc(lines, sizeof(*driver->ttys),
GFP_KERNEL);
driver->termios = kcalloc(lines, sizeof(*driver->termios),
GFP_KERNEL);
if (!driver->ttys || !driver->termios) {
err = -ENOMEM;
goto err_free_all;
}
}
if (!(flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
driver->ports = kcalloc(lines, sizeof(*driver->ports),
GFP_KERNEL);
if (!driver->ports) {
err = -ENOMEM;
goto err_free_all;
}
cdevs = lines;
}
driver->cdevs = kcalloc(cdevs, sizeof(*driver->cdevs), GFP_KERNEL);
if (!driver->cdevs) {
err = -ENOMEM;
goto err_free_all;
}
return driver;
err_free_all:
kfree(driver->ports);
kfree(driver->ttys);
kfree(driver->termios);
kfree(driver->cdevs);
kfree(driver);
return ERR_PTR(err);
}
EXPORT_SYMBOL(__tty_alloc_driver);
static void destruct_tty_driver(struct kref *kref)
{
struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
int i;
struct ktermios *tp;
if (driver->flags & TTY_DRIVER_INSTALLED) {
for (i = 0; i < driver->num; i++) {
tp = driver->termios[i];
if (tp) {
driver->termios[i] = NULL;
kfree(tp);
}
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
tty_unregister_device(driver, i);
}
proc_tty_unregister_driver(driver);
if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
cdev_del(driver->cdevs[0]);
}
kfree(driver->cdevs);
kfree(driver->ports);
kfree(driver->termios);
kfree(driver->ttys);
kfree(driver);
}
/**
* tty_driver_kref_put -- drop a reference to a tty driver
* @driver: driver of which to drop the reference
*
* The final put will destroy and free up the driver.
*/
void tty_driver_kref_put(struct tty_driver *driver)
{
kref_put(&driver->kref, destruct_tty_driver);
}
EXPORT_SYMBOL(tty_driver_kref_put);
/**
* tty_register_driver -- register a tty driver
* @driver: driver to register
*
* Called by a tty driver to register itself.
*/
int tty_register_driver(struct tty_driver *driver)
{
int error;
int i;
dev_t dev;
struct device *d;
if (!driver->major) {
error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num, driver->name);
if (!error) {
driver->major = MAJOR(dev);
driver->minor_start = MINOR(dev);
}
} else {
dev = MKDEV(driver->major, driver->minor_start);
error = register_chrdev_region(dev, driver->num, driver->name);
}
if (error < 0)
goto err;
if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) {
error = tty_cdev_add(driver, dev, 0, driver->num);
if (error)
goto err_unreg_char;
}
mutex_lock(&tty_mutex);
list_add(&driver->tty_drivers, &tty_drivers);
mutex_unlock(&tty_mutex);
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
for (i = 0; i < driver->num; i++) {
d = tty_register_device(driver, i, NULL);
if (IS_ERR(d)) {
error = PTR_ERR(d);
goto err_unreg_devs;
}
}
}
proc_tty_register_driver(driver);
driver->flags |= TTY_DRIVER_INSTALLED;
return 0;
err_unreg_devs:
for (i--; i >= 0; i--)
tty_unregister_device(driver, i);
mutex_lock(&tty_mutex);
list_del(&driver->tty_drivers);
mutex_unlock(&tty_mutex);
err_unreg_char:
unregister_chrdev_region(dev, driver->num);
err:
return error;
}
EXPORT_SYMBOL(tty_register_driver);
/**
* tty_unregister_driver -- unregister a tty driver
* @driver: driver to unregister
*
* Called by a tty driver to unregister itself.
*/
void tty_unregister_driver(struct tty_driver *driver)
{
unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
driver->num);
mutex_lock(&tty_mutex);
list_del(&driver->tty_drivers);
mutex_unlock(&tty_mutex);
}
EXPORT_SYMBOL(tty_unregister_driver);
dev_t tty_devnum(struct tty_struct *tty)
{
return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
}
EXPORT_SYMBOL(tty_devnum);
void tty_default_fops(struct file_operations *fops)
{
*fops = tty_fops;
}
static char *tty_devnode(const struct device *dev, umode_t *mode)
{
if (!mode)
return NULL;
if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
dev->devt == MKDEV(TTYAUX_MAJOR, 2))
*mode = 0666;
return NULL;
}
const struct class tty_class = {
.name = "tty",
.devnode = tty_devnode,
};
static int __init tty_class_init(void)
{
return class_register(&tty_class);
}
postcore_initcall(tty_class_init);
/* 3/2004 jmc: why do these devices exist? */
static struct cdev tty_cdev, console_cdev;
static ssize_t show_cons_active(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct console *cs[16];
int i = 0;
struct console *c;
ssize_t count = 0;
/*
* Hold the console_list_lock to guarantee that no consoles are
* unregistered until all console processing is complete.
* This also allows safe traversal of the console list and
* race-free reading of @flags.
*/
console_list_lock();
for_each_console(c) {
if (!c->device)
continue;
if (!c->write)
continue;
if ((c->flags & CON_ENABLED) == 0)
continue;
cs[i++] = c;
if (i >= ARRAY_SIZE(cs))
break;
}
/*
* Take console_lock to serialize device() callback with
* other console operations. For example, fg_console is
* modified under console_lock when switching vt.
*/
console_lock();
while (i--) {
int index = cs[i]->index;
struct tty_driver *drv = cs[i]->device(cs[i], &index);
/* don't resolve tty0 as some programs depend on it */
if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
count += tty_line_name(drv, index, buf + count);
else
count += sprintf(buf + count, "%s%d",
cs[i]->name, cs[i]->index);
count += sprintf(buf + count, "%c", i ? ' ':'\n');
}
console_unlock();
console_list_unlock();
return count;
}
static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
static struct attribute *cons_dev_attrs[] = {
&dev_attr_active.attr,
NULL
};
ATTRIBUTE_GROUPS(cons_dev);
static struct device *consdev;
void console_sysfs_notify(void)
{
if (consdev)
sysfs_notify(&consdev->kobj, NULL, "active");
}
static struct ctl_table tty_table[] = {
{
.procname = "legacy_tiocsti",
.data = &tty_legacy_tiocsti,
.maxlen = sizeof(tty_legacy_tiocsti),
.mode = 0644,
.proc_handler = proc_dobool,
},
{
.procname = "ldisc_autoload",
.data = &tty_ldisc_autoload,
.maxlen = sizeof(tty_ldisc_autoload),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
/*
* Ok, now we can initialize the rest of the tty devices and can count
* on memory allocations, interrupts etc..
*/
int __init tty_init(void)
{
register_sysctl_init("dev/tty", tty_table);
cdev_init(&tty_cdev, &tty_fops);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
panic("Couldn't register /dev/tty driver\n");
device_create(&tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
cdev_init(&console_cdev, &console_fops);
if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
panic("Couldn't register /dev/console driver\n");
consdev = device_create_with_groups(&tty_class, NULL,
MKDEV(TTYAUX_MAJOR, 1), NULL,
cons_dev_groups, "console");
if (IS_ERR(consdev))
consdev = NULL;
#ifdef CONFIG_VT
vty_init(&console_fops);
#endif
return 0;
}
| linux-master | drivers/tty/tty_io.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/ratelimit.h>
#include "tty.h"
#undef LDISC_DEBUG_HANGUP
#ifdef LDISC_DEBUG_HANGUP
#define tty_ldisc_debug(tty, f, args...) tty_debug(tty, f, ##args)
#else
#define tty_ldisc_debug(tty, f, args...)
#endif
/* lockdep nested classes for tty->ldisc_sem */
enum {
LDISC_SEM_NORMAL,
LDISC_SEM_OTHER,
};
/*
* This guards the refcounted line discipline lists. The lock
* must be taken with irqs off because there are hangup path
* callers who will do ldisc lookups and cannot sleep.
*/
static DEFINE_RAW_SPINLOCK(tty_ldiscs_lock);
/* Line disc dispatch table */
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
/**
* tty_register_ldisc - install a line discipline
* @new_ldisc: pointer to the ldisc object
*
* Installs a new line discipline into the kernel. The discipline is set up as
* unreferenced and then made available to the kernel from this point onwards.
*
* Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
{
unsigned long flags;
if (new_ldisc->num < N_TTY || new_ldisc->num >= NR_LDISCS)
return -EINVAL;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
tty_ldiscs[new_ldisc->num] = new_ldisc;
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return 0;
}
EXPORT_SYMBOL(tty_register_ldisc);
/**
* tty_unregister_ldisc - unload a line discipline
* @ldisc: ldisc number
*
* Remove a line discipline from the kernel providing it is not currently in
* use.
*
* Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc)
{
unsigned long flags;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
tty_ldiscs[ldisc->num] = NULL;
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
}
EXPORT_SYMBOL(tty_unregister_ldisc);
static struct tty_ldisc_ops *get_ldops(int disc)
{
unsigned long flags;
struct tty_ldisc_ops *ldops, *ret;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
ret = ERR_PTR(-EINVAL);
ldops = tty_ldiscs[disc];
if (ldops) {
ret = ERR_PTR(-EAGAIN);
if (try_module_get(ldops->owner))
ret = ldops;
}
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
}
static void put_ldops(struct tty_ldisc_ops *ldops)
{
unsigned long flags;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
module_put(ldops->owner);
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
}
int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
/**
* tty_ldisc_get - take a reference to an ldisc
* @tty: tty device
* @disc: ldisc number
*
* Takes a reference to a line discipline. Deals with refcounts and module
* locking counts. If the discipline is not available, its module loaded, if
* possible.
*
* Returns:
* * -%EINVAL if the discipline index is not [%N_TTY .. %NR_LDISCS] or if the
* discipline is not registered
* * -%EAGAIN if request_module() failed to load or register the discipline
* * -%ENOMEM if allocation failure
* * Otherwise, returns a pointer to the discipline and bumps the ref count
*
* Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
struct tty_ldisc_ops *ldops;
if (disc < N_TTY || disc >= NR_LDISCS)
return ERR_PTR(-EINVAL);
/*
* Get the ldisc ops - we may need to request them to be loaded
* dynamically and try again.
*/
ldops = get_ldops(disc);
if (IS_ERR(ldops)) {
if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
return ERR_PTR(-EPERM);
request_module("tty-ldisc-%d", disc);
ldops = get_ldops(disc);
if (IS_ERR(ldops))
return ERR_CAST(ldops);
}
/*
* There is no way to handle allocation failure of only 16 bytes.
* Let's simplify error handling and save more memory.
*/
ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
return ld;
}
/**
* tty_ldisc_put - release the ldisc
* @ld: lisdsc to release
*
* Complement of tty_ldisc_get().
*/
static void tty_ldisc_put(struct tty_ldisc *ld)
{
if (WARN_ON_ONCE(!ld))
return;
put_ldops(ld->ops);
kfree(ld);
}
static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
{
return (*pos < NR_LDISCS) ? pos : NULL;
}
static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < NR_LDISCS) ? pos : NULL;
}
static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
{
}
static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
{
int i = *(loff_t *)v;
struct tty_ldisc_ops *ldops;
ldops = get_ldops(i);
if (IS_ERR(ldops))
return 0;
seq_printf(m, "%-10s %2d\n", ldops->name ? ldops->name : "???", i);
put_ldops(ldops);
return 0;
}
const struct seq_operations tty_ldiscs_seq_ops = {
.start = tty_ldiscs_seq_start,
.next = tty_ldiscs_seq_next,
.stop = tty_ldiscs_seq_stop,
.show = tty_ldiscs_seq_show,
};
/**
* tty_ldisc_ref_wait - wait for the tty ldisc
* @tty: tty device
*
* Dereference the line discipline for the terminal and take a reference to it.
* If the line discipline is in flux then wait patiently until it changes.
*
* Returns: %NULL if the tty has been hungup and not re-opened with a new file
* descriptor, otherwise valid ldisc reference
*
* Note 1: Must not be called from an IRQ/timer context. The caller must also
* be careful not to hold other locks that will deadlock against a discipline
* change, such as an existing ldisc reference (which we check for).
*
* Note 2: a file_operations routine (read/poll/write) should use this function
* to wait for any ldisc lifetime events to finish.
*/
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
struct tty_ldisc *ld;
ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
ld = tty->ldisc;
if (!ld)
ldsem_up_read(&tty->ldisc_sem);
return ld;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
/**
* tty_ldisc_ref - get the tty ldisc
* @tty: tty device
*
* Dereference the line discipline for the terminal and take a reference to it.
* If the line discipline is in flux then return %NULL. Can be called from IRQ
* and timer functions.
*/
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
struct tty_ldisc *ld = NULL;
if (ldsem_down_read_trylock(&tty->ldisc_sem)) {
ld = tty->ldisc;
if (!ld)
ldsem_up_read(&tty->ldisc_sem);
}
return ld;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
/**
* tty_ldisc_deref - free a tty ldisc reference
* @ld: reference to free up
*
* Undoes the effect of tty_ldisc_ref() or tty_ldisc_ref_wait(). May be called
* in IRQ context.
*/
void tty_ldisc_deref(struct tty_ldisc *ld)
{
ldsem_up_read(&ld->tty->ldisc_sem);
}
EXPORT_SYMBOL_GPL(tty_ldisc_deref);
static inline int
__tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write(&tty->ldisc_sem, timeout);
}
static inline int
__tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write_nested(&tty->ldisc_sem,
LDISC_SEM_OTHER, timeout);
}
static inline void __tty_ldisc_unlock(struct tty_struct *tty)
{
ldsem_up_write(&tty->ldisc_sem);
}
int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
int ret;
/* Kindly asking blocked readers to release the read side */
set_bit(TTY_LDISC_CHANGING, &tty->flags);
wake_up_interruptible_all(&tty->read_wait);
wake_up_interruptible_all(&tty->write_wait);
ret = __tty_ldisc_lock(tty, timeout);
if (!ret)
return -EBUSY;
set_bit(TTY_LDISC_HALTED, &tty->flags);
return 0;
}
void tty_ldisc_unlock(struct tty_struct *tty)
{
clear_bit(TTY_LDISC_HALTED, &tty->flags);
/* Can be cleared here - ldisc_unlock will wake up writers firstly */
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
__tty_ldisc_unlock(tty);
}
static int
tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
unsigned long timeout)
{
int ret;
if (tty < tty2) {
ret = __tty_ldisc_lock(tty, timeout);
if (ret) {
ret = __tty_ldisc_lock_nested(tty2, timeout);
if (!ret)
__tty_ldisc_unlock(tty);
}
} else {
/* if this is possible, it has lots of implications */
WARN_ON_ONCE(tty == tty2);
if (tty2 && tty != tty2) {
ret = __tty_ldisc_lock(tty2, timeout);
if (ret) {
ret = __tty_ldisc_lock_nested(tty, timeout);
if (!ret)
__tty_ldisc_unlock(tty2);
}
} else
ret = __tty_ldisc_lock(tty, timeout);
}
if (!ret)
return -EBUSY;
set_bit(TTY_LDISC_HALTED, &tty->flags);
if (tty2)
set_bit(TTY_LDISC_HALTED, &tty2->flags);
return 0;
}
static void tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
{
tty_ldisc_lock_pair_timeout(tty, tty2, MAX_SCHEDULE_TIMEOUT);
}
static void tty_ldisc_unlock_pair(struct tty_struct *tty,
struct tty_struct *tty2)
{
__tty_ldisc_unlock(tty);
if (tty2)
__tty_ldisc_unlock(tty2);
}
/**
* tty_ldisc_flush - flush line discipline queue
* @tty: tty to flush ldisc for
*
* Flush the line discipline queue (if any) and the tty flip buffers for this
* @tty.
*/
void tty_ldisc_flush(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_ref(tty);
tty_buffer_flush(tty, ld);
if (ld)
tty_ldisc_deref(ld);
}
EXPORT_SYMBOL_GPL(tty_ldisc_flush);
/**
* tty_set_termios_ldisc - set ldisc field
* @tty: tty structure
* @disc: line discipline number
*
* This is probably overkill for real world processors but they are not on hot
* paths so a little discipline won't do any harm.
*
* The line discipline-related tty_struct fields are reset to prevent the ldisc
* driver from re-using stale information for the new ldisc instance.
*
* Locking: takes termios_rwsem
*/
static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
{
down_write(&tty->termios_rwsem);
tty->termios.c_line = disc;
up_write(&tty->termios_rwsem);
tty->disc_data = NULL;
tty->receive_room = 0;
}
/**
* tty_ldisc_open - open a line discipline
* @tty: tty we are opening the ldisc on
* @ld: discipline to open
*
* A helper opening method. Also a convenient debugging and check point.
*
* Locking: always called with BTM already held.
*/
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
{
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
if (ld->ops->open) {
int ret;
/* BTM here locks versus a hangup event */
ret = ld->ops->open(tty);
if (ret)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
tty_ldisc_debug(tty, "%p: opened\n", ld);
return ret;
}
return 0;
}
/**
* tty_ldisc_close - close a line discipline
* @tty: tty we are opening the ldisc on
* @ld: discipline to close
*
* A helper close method. Also a convenient debugging and check point.
*/
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
lockdep_assert_held_write(&tty->ldisc_sem);
WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
clear_bit(TTY_LDISC_OPEN, &tty->flags);
if (ld->ops->close)
ld->ops->close(tty);
tty_ldisc_debug(tty, "%p: closed\n", ld);
}
/**
* tty_ldisc_failto - helper for ldisc failback
* @tty: tty to open the ldisc on
* @ld: ldisc we are trying to fail back to
*
* Helper to try and recover a tty when switching back to the old ldisc fails
* and we need something attached.
*/
static int tty_ldisc_failto(struct tty_struct *tty, int ld)
{
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
int r;
lockdep_assert_held_write(&tty->ldisc_sem);
if (IS_ERR(disc))
return PTR_ERR(disc);
tty->ldisc = disc;
tty_set_termios_ldisc(tty, ld);
r = tty_ldisc_open(tty, disc);
if (r < 0)
tty_ldisc_put(disc);
return r;
}
/**
* tty_ldisc_restore - helper for tty ldisc change
* @tty: tty to recover
* @old: previous ldisc
*
* Restore the previous line discipline or %N_TTY when a line discipline change
* fails due to an open error
*/
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
/* There is an outstanding reference here so this is safe */
if (tty_ldisc_failto(tty, old->ops->num) < 0) {
const char *name = tty_name(tty);
pr_warn("Falling back ldisc for %s.\n", name);
/*
* The traditional behaviour is to fall back to N_TTY, we
* want to avoid falling back to N_NULL unless we have no
* choice to avoid the risk of breaking anything
*/
if (tty_ldisc_failto(tty, N_TTY) < 0 &&
tty_ldisc_failto(tty, N_NULL) < 0)
panic("Couldn't open N_NULL ldisc for %s.", name);
}
}
/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @disc: the line discipline number
*
* Set the discipline of a tty line. Must be called from a process context. The
* ldisc change logic has to protect itself against any overlapping ldisc
* change (including on the other end of pty pairs), the close of one side of a
* tty/pty pair, and eventually hangup.
*/
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
int retval;
struct tty_ldisc *old_ldisc, *new_ldisc;
new_ldisc = tty_ldisc_get(tty, disc);
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
tty_lock(tty);
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
goto err;
if (!tty->ldisc) {
retval = -EIO;
goto out;
}
/* Check the no-op case */
if (tty->ldisc->ops->num == disc)
goto out;
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by hangup */
retval = -EIO;
goto out;
}
old_ldisc = tty->ldisc;
/* Shutdown the old discipline. */
tty_ldisc_close(tty, old_ldisc);
/* Now set up the new line discipline. */
tty->ldisc = new_ldisc;
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
/* Back to the old one or N_TTY if we can't */
tty_ldisc_put(new_ldisc);
tty_ldisc_restore(tty, old_ldisc);
}
if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
down_read(&tty->termios_rwsem);
tty->ops->set_ldisc(tty);
up_read(&tty->termios_rwsem);
}
/*
* At this point we hold a reference to the new ldisc and a
* reference to the old ldisc, or we hold two references to
* the old ldisc (if it was restored as part of error cleanup
* above). In either case, releasing a single reference from
* the old ldisc is correct.
*/
new_ldisc = old_ldisc;
out:
tty_ldisc_unlock(tty);
/*
* Restart the work queue in case no characters kick it off. Safe if
* already running
*/
tty_buffer_restart_work(tty->port);
err:
tty_ldisc_put(new_ldisc); /* drop the extra reference */
tty_unlock(tty);
return retval;
}
EXPORT_SYMBOL_GPL(tty_set_ldisc);
/**
* tty_ldisc_kill - teardown ldisc
* @tty: tty being released
*
* Perform final close of the ldisc and reset @tty->ldisc
*/
static void tty_ldisc_kill(struct tty_struct *tty)
{
lockdep_assert_held_write(&tty->ldisc_sem);
if (!tty->ldisc)
return;
/*
* Now kill off the ldisc
*/
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
/* Force an oops if we mess this up */
tty->ldisc = NULL;
}
/**
* tty_reset_termios - reset terminal state
* @tty: tty to reset
*
* Restore a terminal to the driver default state.
*/
static void tty_reset_termios(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
tty->termios = tty->driver->init_termios;
tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
up_write(&tty->termios_rwsem);
}
/**
* tty_ldisc_reinit - reinitialise the tty ldisc
* @tty: tty to reinit
* @disc: line discipline to reinitialize
*
* Completely reinitialize the line discipline state, by closing the current
* instance, if there is one, and opening a new instance. If an error occurs
* opening the new non-%N_TTY instance, the instance is dropped and @tty->ldisc
* reset to %NULL. The caller can then retry with %N_TTY instead.
*
* Returns: 0 if successful, otherwise error code < 0
*/
int tty_ldisc_reinit(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
int retval;
lockdep_assert_held_write(&tty->ldisc_sem);
ld = tty_ldisc_get(tty, disc);
if (IS_ERR(ld)) {
BUG_ON(disc == N_TTY);
return PTR_ERR(ld);
}
if (tty->ldisc) {
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
}
/* switch the line discipline */
tty->ldisc = ld;
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
return retval;
}
/**
* tty_ldisc_hangup - hangup ldisc reset
* @tty: tty being hung up
* @reinit: whether to re-initialise the tty
*
* Some tty devices reset their termios when they receive a hangup event. In
* that situation we must also switch back to %N_TTY properly before we reset
* the termios data.
*
* Locking: We can take the ldisc mutex as the rest of the code is careful to
* allow for this.
*
* In the pty pair case this occurs in the close() path of the tty itself so we
* must be careful about locking rules.
*/
void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
{
struct tty_ldisc *ld;
tty_ldisc_debug(tty, "%p: hangup\n", tty->ldisc);
ld = tty_ldisc_ref(tty);
if (ld != NULL) {
if (ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
ld->ops->write_wakeup)
ld->ops->write_wakeup(tty);
if (ld->ops->hangup)
ld->ops->hangup(tty);
tty_ldisc_deref(ld);
}
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
/*
* Shutdown the current line discipline, and reset it to
* N_TTY if need be.
*
* Avoid racing set_ldisc or tty_ldisc_release
*/
tty_ldisc_lock(tty, MAX_SCHEDULE_TIMEOUT);
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
tty_reset_termios(tty);
if (tty->ldisc) {
if (reinit) {
if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
tty_ldisc_reinit(tty, N_TTY) < 0)
WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
} else
tty_ldisc_kill(tty);
}
tty_ldisc_unlock(tty);
}
/**
* tty_ldisc_setup - open line discipline
* @tty: tty being shut down
* @o_tty: pair tty for pty/tty pairs
*
* Called during the initial open of a tty/pty pair in order to set up the line
* disciplines and bind them to the @tty. This has no locking issues as the
* device isn't yet active.
*/
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
int retval = tty_ldisc_open(tty, tty->ldisc);
if (retval)
return retval;
if (o_tty) {
/*
* Called without o_tty->ldisc_sem held, as o_tty has been
* just allocated and no one has a reference to it.
*/
retval = tty_ldisc_open(o_tty, o_tty->ldisc);
if (retval) {
tty_ldisc_close(tty, tty->ldisc);
return retval;
}
}
return 0;
}
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down (or one end of pty pair)
*
* Called during the final close of a tty or a pty pair in order to shut down
* the line discpline layer. On exit, each tty's ldisc is %NULL.
*/
void tty_ldisc_release(struct tty_struct *tty)
{
struct tty_struct *o_tty = tty->link;
/*
* Shutdown this line discipline. As this is the final close,
* it does not race with the set_ldisc code path.
*/
tty_ldisc_lock_pair(tty, o_tty);
tty_ldisc_kill(tty);
if (o_tty)
tty_ldisc_kill(o_tty);
tty_ldisc_unlock_pair(tty, o_tty);
/*
* And the memory resources remaining (buffers, termios) will be
* disposed of when the kref hits zero
*/
tty_ldisc_debug(tty, "released\n");
}
/**
* tty_ldisc_init - ldisc setup for new tty
* @tty: tty being allocated
*
* Set up the line discipline objects for a newly allocated tty. Note that the
* tty structure is not completely set up when this call is made.
*/
int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
return PTR_ERR(ld);
tty->ldisc = ld;
return 0;
}
/**
* tty_ldisc_deinit - ldisc cleanup for new tty
* @tty: tty that was allocated recently
*
* The tty structure must not be completely set up (tty_ldisc_setup()) when
* this call is made.
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
/* no ldisc_sem, tty is being destroyed */
if (tty->ldisc)
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
| linux-master | drivers/tty/tty_ldisc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tty buffer allocation management
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/minmax.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include "tty.h"
#define MIN_TTYB_SIZE 256
#define TTYB_ALIGN_MASK 0xff
/*
* Byte threshold to limit memory consumption for flip buffers.
* The actual memory limit is > 2x this amount.
*/
#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
/*
* We default to dicing tty buffer allocations to this many characters
* in order to avoid multiple page allocations. We know the size of
* tty_buffer itself but it must also be taken into account that the
* buffer is 256 byte aligned. See tty_buffer_find for the allocation
* logic this must match.
*/
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~TTYB_ALIGN_MASK)
/**
* tty_buffer_lock_exclusive - gain exclusive access to buffer
* @port: tty port owning the flip buffer
*
* Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding
* the buffer work and any pending flush from using the flip buffer. Data can
* continue to be added concurrently to the flip buffer from the driver side.
*
* See also tty_buffer_unlock_exclusive().
*/
void tty_buffer_lock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
atomic_inc(&buf->priority);
mutex_lock(&buf->lock);
}
EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
/**
* tty_buffer_unlock_exclusive - release exclusive access
* @port: tty port owning the flip buffer
*
* The buffer work is restarted if there is data in the flip buffer.
*
* See also tty_buffer_lock_exclusive().
*/
void tty_buffer_unlock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
int restart;
restart = buf->head->commit != buf->head->read;
atomic_dec(&buf->priority);
mutex_unlock(&buf->lock);
if (restart)
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
/**
* tty_buffer_space_avail - return unused buffer space
* @port: tty port owning the flip buffer
*
* Returns: the # of bytes which can be written by the driver without reaching
* the buffer limit.
*
* Note: this does not guarantee that memory is available to write the returned
* # of bytes (use tty_prepare_flip_string() to pre-allocate if memory
* guarantee is required).
*/
unsigned int tty_buffer_space_avail(struct tty_port *port)
{
int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
return max(space, 0);
}
EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
static void tty_buffer_reset(struct tty_buffer *p, size_t size)
{
p->used = 0;
p->size = size;
p->next = NULL;
p->commit = 0;
p->lookahead = 0;
p->read = 0;
p->flags = true;
}
/**
* tty_buffer_free_all - free buffers used by a tty
* @port: tty port to free from
*
* Remove all the buffers pending on a tty whether queued with data or in the
* free ring. Must be called when the tty is no longer in use.
*/
void tty_buffer_free_all(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *p, *next;
struct llist_node *llist;
unsigned int freed = 0;
int still_used;
while ((p = buf->head) != NULL) {
buf->head = p->next;
freed += p->size;
if (p->size > 0)
kfree(p);
}
llist = llist_del_all(&buf->free);
llist_for_each_entry_safe(p, next, llist, free)
kfree(p);
tty_buffer_reset(&buf->sentinel, 0);
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
still_used = atomic_xchg(&buf->mem_used, 0);
WARN(still_used != freed, "we still have not freed %d bytes!",
still_used - freed);
}
/**
* tty_buffer_alloc - allocate a tty buffer
* @port: tty port
* @size: desired size (characters)
*
* Allocate a new tty buffer to hold the desired number of characters. We
* round our buffers off in 256 character chunks to get better allocation
* behaviour.
*
* Returns: %NULL if out of memory or the allocation would exceed the per
* device queue.
*/
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
{
struct llist_node *free;
struct tty_buffer *p;
/* Round the buffer size out */
size = __ALIGN_MASK(size, TTYB_ALIGN_MASK);
if (size <= MIN_TTYB_SIZE) {
free = llist_del_first(&port->buf.free);
if (free) {
p = llist_entry(free, struct tty_buffer, free);
goto found;
}
}
/* Should possibly check if this fails for the largest buffer we
* have queued and recycle that ?
*/
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
p = kmalloc(struct_size(p, data, 2 * size), GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
found:
tty_buffer_reset(p, size);
atomic_add(size, &port->buf.mem_used);
return p;
}
/**
* tty_buffer_free - free a tty buffer
* @port: tty port owning the buffer
* @b: the buffer to free
*
* Free a tty buffer, or add it to the free list according to our internal
* strategy.
*/
static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
{
struct tty_bufhead *buf = &port->buf;
/* Dumb strategy for now - should keep some stats */
WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
if (b->size > MIN_TTYB_SIZE)
kfree(b);
else if (b->size > 0)
llist_add(&b->free, &buf->free);
}
/**
* tty_buffer_flush - flush full tty buffers
* @tty: tty to flush
* @ld: optional ldisc ptr (must be referenced)
*
* Flush all the buffers containing receive data. If @ld != %NULL, flush the
* ldisc input buffer.
*
* Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
{
struct tty_port *port = tty->port;
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *next;
atomic_inc(&buf->priority);
mutex_lock(&buf->lock);
/* paired w/ release in __tty_buffer_request_room; ensures there are
* no pending memory accesses to the freed buffer
*/
while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
tty_buffer_free(port, buf->head);
buf->head = next;
}
buf->head->read = buf->head->commit;
buf->head->lookahead = buf->head->read;
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
atomic_dec(&buf->priority);
mutex_unlock(&buf->lock);
}
/**
* __tty_buffer_request_room - grow tty buffer if needed
* @port: tty port
* @size: size desired
* @flags: buffer has to store flags along character data
*
* Make at least @size bytes of linear space available for the tty buffer.
*
* Will change over to a new buffer if the current buffer is encoded as
* %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags
* buffer.
*
* Returns: the size we managed to find.
*/
static int __tty_buffer_request_room(struct tty_port *port, size_t size,
bool flags)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *n, *b = buf->tail;
size_t left = (b->flags ? 1 : 2) * b->size - b->used;
bool change = !b->flags && flags;
if (!change && left >= size)
return size;
/* This is the slow path - looking for new buffers to use */
n = tty_buffer_alloc(port, size);
if (n == NULL)
return change ? 0 : left;
n->flags = flags;
buf->tail = n;
/*
* Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
* ensures they see all buffer data.
*/
smp_store_release(&b->commit, b->used);
/*
* Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
* ensures the latest commit value can be read before the head
* is advanced to the next buffer.
*/
smp_store_release(&b->next, n);
return size;
}
int tty_buffer_request_room(struct tty_port *port, size_t size)
{
return __tty_buffer_request_room(port, size, true);
}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
const u8 *flags, bool mutable_flags,
size_t size)
{
bool need_flags = mutable_flags || flags[0] != TTY_NORMAL;
size_t copied = 0;
do {
size_t goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
size_t space = __tty_buffer_request_room(port, goal, need_flags);
struct tty_buffer *tb = port->buf.tail;
if (unlikely(space == 0))
break;
memcpy(char_buf_ptr(tb, tb->used), chars, space);
if (mutable_flags) {
memcpy(flag_buf_ptr(tb, tb->used), flags, space);
flags += space;
} else if (tb->flags) {
memset(flag_buf_ptr(tb, tb->used), flags[0], space);
} else {
/* tb->flags should be available once requested */
WARN_ON_ONCE(need_flags);
}
tb->used += space;
copied += space;
chars += space;
/* There is a small chance that we need to split the data over
* several buffers. If this is the case we must loop.
*/
} while (unlikely(size > copied));
return copied;
}
EXPORT_SYMBOL(__tty_insert_flip_string_flags);
/**
* tty_prepare_flip_string - make room for characters
* @port: tty port
* @chars: return pointer for character write area
* @size: desired size
*
* Prepare a block of space in the buffer for data.
*
* This is used for drivers that need their own block copy routines into the
* buffer. There is no guarantee the buffer is a DMA target!
*
* Returns: the length available and buffer pointer (@chars) to the space which
* is now allocated and accounted for as ready for normal characters.
*/
size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size)
{
size_t space = __tty_buffer_request_room(port, size, false);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
*chars = char_buf_ptr(tb, tb->used);
if (tb->flags)
memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb->used += space;
}
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
/**
* tty_ldisc_receive_buf - forward data to line discipline
* @ld: line discipline to process input
* @p: char buffer
* @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer
* @count: number of bytes to process
*
* Callers other than flush_to_ldisc() need to exclude the kworker from
* concurrent use of the line discipline, see paste_selection().
*
* Returns: the number of bytes processed.
*/
size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
size_t count)
{
if (ld->ops->receive_buf2)
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
count = min_t(size_t, count, ld->tty->receive_room);
if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
return count;
}
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
{
head->lookahead = max(head->lookahead, head->read);
while (head) {
struct tty_buffer *next;
unsigned int count;
/*
* Paired w/ release in __tty_buffer_request_room();
* ensures commit value read is not stale if the head
* is advancing to the next buffer.
*/
next = smp_load_acquire(&head->next);
/*
* Paired w/ release in __tty_buffer_request_room() or in
* tty_buffer_flush(); ensures we see the committed buffer data.
*/
count = smp_load_acquire(&head->commit) - head->lookahead;
if (!count) {
head = next;
continue;
}
if (port->client_ops->lookahead_buf) {
u8 *p, *f = NULL;
p = char_buf_ptr(head, head->lookahead);
if (head->flags)
f = flag_buf_ptr(head, head->lookahead);
port->client_ops->lookahead_buf(port, p, f, count);
}
head->lookahead += count;
}
}
static size_t
receive_buf(struct tty_port *port, struct tty_buffer *head, size_t count)
{
u8 *p = char_buf_ptr(head, head->read);
const u8 *f = NULL;
size_t n;
if (head->flags)
f = flag_buf_ptr(head, head->read);
n = port->client_ops->receive_buf(port, p, f, count);
if (n > 0)
memset(p, 0, n);
return n;
}
/**
* flush_to_ldisc - flush data from buffer to ldisc
* @work: tty structure passed from work queue.
*
* This routine is called out of the software interrupt to flush data from the
* buffer chain to the line discipline.
*
* The receive_buf() method is single threaded for each tty instance.
*
* Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_port *port = container_of(work, struct tty_port, buf.work);
struct tty_bufhead *buf = &port->buf;
mutex_lock(&buf->lock);
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
size_t count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
break;
/* paired w/ release in __tty_buffer_request_room();
* ensures commit value read is not stale if the head
* is advancing to the next buffer
*/
next = smp_load_acquire(&head->next);
/* paired w/ release in __tty_buffer_request_room() or in
* tty_buffer_flush(); ensures we see the committed buffer data
*/
count = smp_load_acquire(&head->commit) - head->read;
if (!count) {
if (next == NULL)
break;
buf->head = next;
tty_buffer_free(port, head);
continue;
}
rcvd = receive_buf(port, head, count);
head->read += rcvd;
if (rcvd < count)
lookahead_bufs(port, head);
if (!rcvd)
break;
if (need_resched())
cond_resched();
}
mutex_unlock(&buf->lock);
}
static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
{
/*
* Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
* buffer data.
*/
smp_store_release(&tail->commit, tail->used);
}
/**
* tty_flip_buffer_push - push terminal buffers
* @port: tty port to push
*
* Queue a push of the terminal flip buffers to the line discipline. Can be
* called from IRQ/atomic context.
*
* In the event of the queue being busy for flipping the work will be held off
* and retried later.
*/
void tty_flip_buffer_push(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
tty_flip_buffer_commit(buf->tail);
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
* tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
* push
* @port: tty port
* @chars: characters
* @size: size
*
* The function combines tty_insert_flip_string() and tty_flip_buffer_push()
* with the exception of properly holding the @port->lock.
*
* To be used only internally (by pty currently).
*
* Returns: the number added.
*/
int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
const u8 *chars, size_t size)
{
struct tty_bufhead *buf = &port->buf;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
size = tty_insert_flip_string(port, chars, size);
if (size)
tty_flip_buffer_commit(buf->tail);
spin_unlock_irqrestore(&port->lock, flags);
queue_work(system_unbound_wq, &buf->work);
return size;
}
/**
* tty_buffer_init - prepare a tty buffer structure
* @port: tty port to initialise
*
* Set up the initial state of the buffer management for a tty device. Must be
* called before the other tty buffer functions are used.
*/
void tty_buffer_init(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
mutex_init(&buf->lock);
tty_buffer_reset(&buf->sentinel, 0);
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
init_llist_head(&buf->free);
atomic_set(&buf->mem_used, 0);
atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;
}
/**
* tty_buffer_set_limit - change the tty buffer memory limit
* @port: tty port to change
* @limit: memory limit to set
*
* Change the tty buffer memory limit.
*
* Must be called before the other tty buffer functions are used.
*/
int tty_buffer_set_limit(struct tty_port *port, int limit)
{
if (limit < MIN_TTYB_SIZE)
return -EINVAL;
port->buf.mem_limit = limit;
return 0;
}
EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
/* slave ptys can claim nested buffer lock when handling BRK and INTR */
void tty_buffer_set_lock_subclass(struct tty_port *port)
{
lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE);
}
bool tty_buffer_restart_work(struct tty_port *port)
{
return queue_work(system_unbound_wq, &port->buf.work);
}
bool tty_buffer_cancel_work(struct tty_port *port)
{
return cancel_work_sync(&port->buf.work);
}
void tty_buffer_flush_work(struct tty_port *port)
{
flush_work(&port->buf.work);
}
| linux-master | drivers/tty/tty_buffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tty port functions
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/serdev.h>
#include "tty.h"
static size_t tty_port_default_receive_buf(struct tty_port *port, const u8 *p,
const u8 *f, size_t count)
{
struct tty_struct *tty;
struct tty_ldisc *ld;
tty = READ_ONCE(port->itty);
if (!tty)
return 0;
ld = tty_ldisc_ref(tty);
if (!ld)
return 0;
count = tty_ldisc_receive_buf(ld, p, f, count);
tty_ldisc_deref(ld);
return count;
}
static void tty_port_default_lookahead_buf(struct tty_port *port, const u8 *p,
const u8 *f, size_t count)
{
struct tty_struct *tty;
struct tty_ldisc *ld;
tty = READ_ONCE(port->itty);
if (!tty)
return;
ld = tty_ldisc_ref(tty);
if (!ld)
return;
if (ld->ops->lookahead_buf)
ld->ops->lookahead_buf(ld->tty, p, f, count);
tty_ldisc_deref(ld);
}
static void tty_port_default_wakeup(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
}
}
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
.lookahead_buf = tty_port_default_lookahead_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
/**
* tty_port_init -- initialize tty_port
* @port: tty_port to initialize
*
* Initializes the state of struct tty_port. When a port was initialized using
* this function, one has to destroy the port by tty_port_destroy(). Either
* indirectly by using &tty_port refcounting (tty_port_put()) or directly if
* refcounting is not used.
*/
void tty_port_init(struct tty_port *port)
{
memset(port, 0, sizeof(*port));
tty_buffer_init(port);
init_waitqueue_head(&port->open_wait);
init_waitqueue_head(&port->delta_msr_wait);
mutex_init(&port->mutex);
mutex_init(&port->buf_mutex);
spin_lock_init(&port->lock);
port->close_delay = (50 * HZ) / 100;
port->closing_wait = (3000 * HZ) / 100;
port->client_ops = &tty_port_default_client_ops;
kref_init(&port->kref);
}
EXPORT_SYMBOL(tty_port_init);
/**
* tty_port_link_device - link tty and tty_port
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
*
* Provide the tty layer with a link from a tty (specified by @index) to a
* tty_port (@port). Use this only if neither tty_port_register_device() nor
* tty_port_install() is used in the driver. If used, this has to be called
* before tty_register_driver().
*/
void tty_port_link_device(struct tty_port *port,
struct tty_driver *driver, unsigned index)
{
if (WARN_ON(index >= driver->num))
return;
driver->ports[index] = port;
}
EXPORT_SYMBOL_GPL(tty_port_link_device);
/**
* tty_port_register_device - register tty device
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
* @device: parent if exists, otherwise NULL
*
* It is the same as tty_register_device() except the provided @port is linked
* to a concrete tty specified by @index. Use this or tty_port_install() (or
* both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device)
{
return tty_port_register_device_attr(port, driver, index, device, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tty_port_register_device);
/**
* tty_port_register_device_attr - register tty device
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
* @device: parent if exists, otherwise NULL
* @drvdata: Driver data to be set to device.
* @attr_grp: Attribute group to be set on device.
*
* It is the same as tty_register_device_attr() except the provided @port is
* linked to a concrete tty specified by @index. Use this or tty_port_install()
* (or both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device_attr(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device, void *drvdata,
const struct attribute_group **attr_grp)
{
tty_port_link_device(port, driver, index);
return tty_register_device_attr(driver, index, device, drvdata,
attr_grp);
}
EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
/**
* tty_port_register_device_attr_serdev - register tty or serdev device
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
* @device: parent if exists, otherwise NULL
* @drvdata: driver data for the device
* @attr_grp: attribute group for the device
*
* Register a serdev or tty device depending on if the parent device has any
* defined serdev clients or not.
*/
struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device, void *drvdata,
const struct attribute_group **attr_grp)
{
struct device *dev;
tty_port_link_device(port, driver, index);
dev = serdev_tty_port_register(port, device, driver, index);
if (PTR_ERR(dev) != -ENODEV) {
/* Skip creating cdev if we registered a serdev device */
return dev;
}
return tty_register_device_attr(driver, index, device, drvdata,
attr_grp);
}
EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
/**
* tty_port_register_device_serdev - register tty or serdev device
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
* @device: parent if exists, otherwise NULL
*
* Register a serdev or tty device depending on if the parent device has any
* defined serdev clients or not.
*/
struct device *tty_port_register_device_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device)
{
return tty_port_register_device_attr_serdev(port, driver, index,
device, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
/**
* tty_port_unregister_device - deregister a tty or serdev device
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
*
* If a tty or serdev device is registered with a call to
* tty_port_register_device_serdev() then this function must be called when
* the device is gone.
*/
void tty_port_unregister_device(struct tty_port *port,
struct tty_driver *driver, unsigned index)
{
int ret;
ret = serdev_tty_port_unregister(port);
if (ret == 0)
return;
tty_unregister_device(driver, index);
}
EXPORT_SYMBOL_GPL(tty_port_unregister_device);
int tty_port_alloc_xmit_buf(struct tty_port *port)
{
/* We may sleep in get_zeroed_page() */
mutex_lock(&port->buf_mutex);
if (port->xmit_buf == NULL) {
port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
if (port->xmit_buf)
kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE);
}
mutex_unlock(&port->buf_mutex);
if (port->xmit_buf == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(tty_port_alloc_xmit_buf);
void tty_port_free_xmit_buf(struct tty_port *port)
{
mutex_lock(&port->buf_mutex);
free_page((unsigned long)port->xmit_buf);
port->xmit_buf = NULL;
INIT_KFIFO(port->xmit_fifo);
mutex_unlock(&port->buf_mutex);
}
EXPORT_SYMBOL(tty_port_free_xmit_buf);
/**
* tty_port_destroy -- destroy inited port
* @port: tty port to be destroyed
*
* When a port was initialized using tty_port_init(), one has to destroy the
* port by this function. Either indirectly by using &tty_port refcounting
* (tty_port_put()) or directly if refcounting is not used.
*/
void tty_port_destroy(struct tty_port *port)
{
tty_buffer_cancel_work(port);
tty_buffer_free_all(port);
}
EXPORT_SYMBOL(tty_port_destroy);
static void tty_port_destructor(struct kref *kref)
{
struct tty_port *port = container_of(kref, struct tty_port, kref);
/* check if last port ref was dropped before tty release */
if (WARN_ON(port->itty))
return;
free_page((unsigned long)port->xmit_buf);
tty_port_destroy(port);
if (port->ops && port->ops->destruct)
port->ops->destruct(port);
else
kfree(port);
}
/**
* tty_port_put -- drop a reference to tty_port
* @port: port to drop a reference of (can be NULL)
*
* The final put will destroy and free up the @port using
* @port->ops->destruct() hook, or using kfree() if not provided.
*/
void tty_port_put(struct tty_port *port)
{
if (port)
kref_put(&port->kref, tty_port_destructor);
}
EXPORT_SYMBOL(tty_port_put);
/**
* tty_port_tty_get - get a tty reference
* @port: tty port
*
* Return a refcount protected tty instance or %NULL if the port is not
* associated with a tty (eg due to close or hangup).
*/
struct tty_struct *tty_port_tty_get(struct tty_port *port)
{
unsigned long flags;
struct tty_struct *tty;
spin_lock_irqsave(&port->lock, flags);
tty = tty_kref_get(port->tty);
spin_unlock_irqrestore(&port->lock, flags);
return tty;
}
EXPORT_SYMBOL(tty_port_tty_get);
/**
* tty_port_tty_set - set the tty of a port
* @port: tty port
* @tty: the tty
*
* Associate the port and tty pair. Manages any internal refcounts. Pass %NULL
* to deassociate a port.
*/
void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
tty_kref_put(port->tty);
port->tty = tty_kref_get(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL(tty_port_tty_set);
/**
* tty_port_shutdown - internal helper to shutdown the device
* @port: tty port to be shut down
* @tty: the associated tty
*
* It is used by tty_port_hangup() and tty_port_close(). Its task is to
* shutdown the device if it was initialized (note consoles remain
* functioning). It lowers DTR/RTS (if @tty has HUPCL set) and invokes
* @port->ops->shutdown().
*/
static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
{
mutex_lock(&port->mutex);
if (port->console)
goto out;
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
/*
* Drop DTR/RTS if HUPCL is set. This causes any attached
* modem to hang up the line.
*/
if (tty && C_HUPCL(tty))
tty_port_lower_dtr_rts(port);
if (port->ops->shutdown)
port->ops->shutdown(port);
}
out:
mutex_unlock(&port->mutex);
}
/**
* tty_port_hangup - hangup helper
* @port: tty port
*
* Perform port level tty hangup flag and count changes. Drop the tty
* reference.
*
* Caller holds tty lock.
*/
void tty_port_hangup(struct tty_port *port)
{
struct tty_struct *tty;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
tty = port->tty;
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
tty_port_set_active(port, false);
tty_port_shutdown(port, tty);
tty_kref_put(tty);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
}
EXPORT_SYMBOL(tty_port_hangup);
/**
* tty_port_tty_hangup - helper to hang up a tty
* @port: tty port
* @check_clocal: hang only ttys with %CLOCAL unset?
*/
void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
struct tty_struct *tty = tty_port_tty_get(port);
if (tty && (!check_clocal || !C_CLOCAL(tty)))
tty_hangup(tty);
tty_kref_put(tty);
}
EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
/**
* tty_port_tty_wakeup - helper to wake up a tty
* @port: tty port
*/
void tty_port_tty_wakeup(struct tty_port *port)
{
port->client_ops->write_wakeup(port);
}
EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
/**
* tty_port_carrier_raised - carrier raised check
* @port: tty port
*
* Wrapper for the carrier detect logic. For the moment this is used
* to hide some internal details. This will eventually become entirely
* internal to the tty port.
*/
bool tty_port_carrier_raised(struct tty_port *port)
{
if (port->ops->carrier_raised == NULL)
return true;
return port->ops->carrier_raised(port);
}
EXPORT_SYMBOL(tty_port_carrier_raised);
/**
* tty_port_raise_dtr_rts - Raise DTR/RTS
* @port: tty port
*
* Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
* some internal details. This will eventually become entirely internal to the
* tty port.
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
if (port->ops->dtr_rts)
port->ops->dtr_rts(port, true);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
/**
* tty_port_lower_dtr_rts - Lower DTR/RTS
* @port: tty port
*
* Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
* some internal details. This will eventually become entirely internal to the
* tty port.
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
if (port->ops->dtr_rts)
port->ops->dtr_rts(port, false);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
/**
* tty_port_block_til_ready - Waiting logic for tty open
* @port: the tty port being opened
* @tty: the tty device being bound
* @filp: the file pointer of the opener or %NULL
*
* Implement the core POSIX/SuS tty behaviour when opening a tty device.
* Handles:
*
* - hangup (both before and during)
* - non blocking open
* - rts/dtr/dcd
* - signals
* - port flags and counts
*
* The passed @port must implement the @port->ops->carrier_raised method if it
* can do carrier detect and the @port->ops->dtr_rts method if it supports
* software management of these lines. Note that the dtr/rts raise is done each
* iteration as a hangup may have previously dropped them while we wait.
*
* Caller holds tty lock.
*
* Note: May drop and reacquire tty lock when blocking, so @tty and @port may
* have changed state (eg., may have been hung up).
*/
int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
{
int do_clocal = 0, retval;
unsigned long flags;
DEFINE_WAIT(wait);
/* if non-blocking mode is set we can pass directly to open unless
* the port has just hung up or is in another error state.
*/
if (tty_io_error(tty)) {
tty_port_set_active(port, true);
return 0;
}
if (filp == NULL || (filp->f_flags & O_NONBLOCK)) {
/* Indicate we are open */
if (C_BAUD(tty))
tty_port_raise_dtr_rts(port);
tty_port_set_active(port, true);
return 0;
}
if (C_CLOCAL(tty))
do_clocal = 1;
/* Block waiting until we can proceed. We may need to wait for the
* carrier, but we must also wait for any close that is in progress
* before the next open may complete.
*/
retval = 0;
/* The port lock protects the port counts */
spin_lock_irqsave(&port->lock, flags);
port->count--;
port->blocked_open++;
spin_unlock_irqrestore(&port->lock, flags);
while (1) {
/* Indicate we are open */
if (C_BAUD(tty) && tty_port_initialized(port))
tty_port_raise_dtr_rts(port);
prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
/* Check for a hangup or uninitialised port.
* Return accordingly.
*/
if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
if (port->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
else
retval = -ERESTARTSYS;
break;
}
/*
* Probe the carrier. For devices with no carrier detect
* tty_port_carrier_raised will always return true.
* Never ask drivers if CLOCAL is set, this causes troubles
* on some hardware.
*/
if (do_clocal || tty_port_carrier_raised(port))
break;
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
tty_unlock(tty);
schedule();
tty_lock(tty);
}
finish_wait(&port->open_wait, &wait);
/* Update counts. A parallel hangup will have set count to zero and
* we must not mess that up further.
*/
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp))
port->count++;
port->blocked_open--;
spin_unlock_irqrestore(&port->lock, flags);
if (retval == 0)
tty_port_set_active(port, true);
return retval;
}
EXPORT_SYMBOL(tty_port_block_til_ready);
static void tty_port_drain_delay(struct tty_port *port, struct tty_struct *tty)
{
unsigned int bps = tty_get_baud_rate(tty);
long timeout;
if (bps > 1200) {
timeout = (HZ * 10 * port->drain_delay) / bps;
timeout = max_t(long, timeout, HZ / 10);
} else {
timeout = 2 * HZ;
}
schedule_timeout_interruptible(timeout);
}
/**
* tty_port_close_start - helper for tty->ops->close, part 1/2
* @port: tty_port of the device
* @tty: tty being closed
* @filp: passed file pointer
*
* Decrements and checks open count. Flushes the port if this is the last
* close. That means, dropping the data from the outpu buffer on the device and
* waiting for sending logic to finish. The rest of close handling is performed
* in tty_port_close_end().
*
* Locking: Caller holds tty lock.
*
* Return: 1 if this is the last close, otherwise 0
*/
int tty_port_close_start(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
{
unsigned long flags;
if (tty_hung_up_p(filp))
return 0;
spin_lock_irqsave(&port->lock, flags);
if (tty->count == 1 && port->count != 1) {
tty_warn(tty, "%s: tty->count = 1 port count = %d\n", __func__,
port->count);
port->count = 1;
}
if (--port->count < 0) {
tty_warn(tty, "%s: bad port count (%d)\n", __func__,
port->count);
port->count = 0;
}
if (port->count) {
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
spin_unlock_irqrestore(&port->lock, flags);
tty->closing = 1;
if (tty_port_initialized(port)) {
/* Don't block on a stalled port, just pull the chain */
if (tty->flow.tco_stopped)
tty_driver_flush_buffer(tty);
if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, port->closing_wait);
if (port->drain_delay)
tty_port_drain_delay(port, tty);
}
/* Flush the ldisc buffering */
tty_ldisc_flush(tty);
/* Report to caller this is the last port reference */
return 1;
}
EXPORT_SYMBOL(tty_port_close_start);
/**
* tty_port_close_end - helper for tty->ops->close, part 2/2
* @port: tty_port of the device
* @tty: tty being closed
*
* This is a continuation of the first part: tty_port_close_start(). This
* should be called after turning off the device. It flushes the data from the
* line discipline and delays the close by @port->close_delay.
*
* Locking: Caller holds tty lock.
*/
void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
tty_ldisc_flush(tty);
tty->closing = 0;
spin_lock_irqsave(&port->lock, flags);
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
msleep_interruptible(jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
}
spin_unlock_irqrestore(&port->lock, flags);
tty_port_set_active(port, false);
}
EXPORT_SYMBOL(tty_port_close_end);
/**
* tty_port_close - generic tty->ops->close handler
* @port: tty_port of the device
* @tty: tty being closed
* @filp: passed file pointer
*
* It is a generic helper to be used in driver's @tty->ops->close. It wraps a
* sequence of tty_port_close_start(), tty_port_shutdown(), and
* tty_port_close_end(). The latter two are called only if this is the last
* close. See the respective functions for the details.
*
* Locking: Caller holds tty lock
*/
void tty_port_close(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
{
if (tty_port_close_start(port, tty, filp) == 0)
return;
tty_port_shutdown(port, tty);
if (!port->console)
set_bit(TTY_IO_ERROR, &tty->flags);
tty_port_close_end(port, tty);
tty_port_tty_set(port, NULL);
}
EXPORT_SYMBOL(tty_port_close);
/**
* tty_port_install - generic tty->ops->install handler
* @port: tty_port of the device
* @driver: tty_driver for this device
* @tty: tty to be installed
*
* It is the same as tty_standard_install() except the provided @port is linked
* to a concrete tty specified by @tty. Use this or tty_port_register_device()
* (or both). Call tty_port_link_device() as a last resort.
*/
int tty_port_install(struct tty_port *port, struct tty_driver *driver,
struct tty_struct *tty)
{
tty->port = port;
return tty_standard_install(driver, tty);
}
EXPORT_SYMBOL_GPL(tty_port_install);
/**
* tty_port_open - generic tty->ops->open handler
* @port: tty_port of the device
* @tty: tty to be opened
* @filp: passed file pointer
*
* It is a generic helper to be used in driver's @tty->ops->open. It activates
* the devices using @port->ops->activate if not active already. And waits for
* the device to be ready using tty_port_block_til_ready() (e.g. raises
* DTR/CTS and waits for carrier).
*
* Note that @port->ops->shutdown is not called when @port->ops->activate
* returns an error (on the contrary, @tty->ops->close is).
*
* Locking: Caller holds tty lock.
*
* Note: may drop and reacquire tty lock (in tty_port_block_til_ready()) so
* @tty and @port may have changed state (eg., may be hung up now).
*/
int tty_port_open(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
{
spin_lock_irq(&port->lock);
++port->count;
spin_unlock_irq(&port->lock);
tty_port_tty_set(port, tty);
/*
* Do the device-specific open only if the hardware isn't
* already initialized. Serialize open and shutdown using the
* port mutex.
*/
mutex_lock(&port->mutex);
if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
if (port->ops->activate) {
int retval = port->ops->activate(port, tty);
if (retval) {
mutex_unlock(&port->mutex);
return retval;
}
}
tty_port_set_initialized(port, true);
}
mutex_unlock(&port->mutex);
return tty_port_block_til_ready(port, tty, filp);
}
EXPORT_SYMBOL(tty_port_open);
| linux-master | drivers/tty/tty_port.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* conmakehash.c
*
* Create arrays for initializing the kernel folded tables (using a hash
* table turned out to be to limiting...) Unfortunately we can't simply
* preinitialize the tables at compile time since kfree() cannot accept
* memory not allocated by kmalloc(), and doing our own memory management
* just for this seems like massive overkill.
*
* Copyright (C) 1995-1997 H. Peter Anvin
*/
#include <stdio.h>
#include <stdlib.h>
#include <sysexits.h>
#include <string.h>
#include <ctype.h>
#define MAX_FONTLEN 256
typedef unsigned short unicode;
static void usage(char *argv0)
{
fprintf(stderr, "Usage: \n"
" %s chartable [hashsize] [hashstep] [maxhashlevel]\n", argv0);
exit(EX_USAGE);
}
static int getunicode(char **p0)
{
char *p = *p0;
while (*p == ' ' || *p == '\t')
p++;
if (*p != 'U' || p[1] != '+' ||
!isxdigit(p[2]) || !isxdigit(p[3]) || !isxdigit(p[4]) ||
!isxdigit(p[5]) || isxdigit(p[6]))
return -1;
*p0 = p+6;
return strtol(p+2,0,16);
}
unicode unitable[MAX_FONTLEN][255];
/* Massive overkill, but who cares? */
int unicount[MAX_FONTLEN];
static void addpair(int fp, int un)
{
int i;
if ( un <= 0xfffe )
{
/* Check it isn't a duplicate */
for ( i = 0 ; i < unicount[fp] ; i++ )
if ( unitable[fp][i] == un )
return;
/* Add to list */
if ( unicount[fp] > 254 )
{
fprintf(stderr, "ERROR: Only 255 unicodes/glyph permitted!\n");
exit(EX_DATAERR);
}
unitable[fp][unicount[fp]] = un;
unicount[fp]++;
}
/* otherwise: ignore */
}
int main(int argc, char *argv[])
{
FILE *ctbl;
char *tblname;
char buffer[65536];
int fontlen;
int i, nuni, nent;
int fp0, fp1, un0, un1;
char *p, *p1;
if ( argc < 2 || argc > 5 )
usage(argv[0]);
if ( !strcmp(argv[1],"-") )
{
ctbl = stdin;
tblname = "stdin";
}
else
{
ctbl = fopen(tblname = argv[1], "r");
if ( !ctbl )
{
perror(tblname);
exit(EX_NOINPUT);
}
}
/* For now we assume the default font is always 256 characters. */
fontlen = 256;
/* Initialize table */
for ( i = 0 ; i < fontlen ; i++ )
unicount[i] = 0;
/* Now we come to the tricky part. Parse the input table. */
while ( fgets(buffer, sizeof(buffer), ctbl) != NULL )
{
if ( (p = strchr(buffer, '\n')) != NULL )
*p = '\0';
else
fprintf(stderr, "%s: Warning: line too long\n", tblname);
p = buffer;
/*
* Syntax accepted:
* <fontpos> <unicode> <unicode> ...
* <range> idem
* <range> <unicode range>
*
* where <range> ::= <fontpos>-<fontpos>
* and <unicode> ::= U+<h><h><h><h>
* and <h> ::= <hexadecimal digit>
*/
while (*p == ' ' || *p == '\t')
p++;
if (!*p || *p == '#')
continue; /* skip comment or blank line */
fp0 = strtol(p, &p1, 0);
if (p1 == p)
{
fprintf(stderr, "Bad input line: %s\n", buffer);
exit(EX_DATAERR);
}
p = p1;
while (*p == ' ' || *p == '\t')
p++;
if (*p == '-')
{
p++;
fp1 = strtol(p, &p1, 0);
if (p1 == p)
{
fprintf(stderr, "Bad input line: %s\n", buffer);
exit(EX_DATAERR);
}
p = p1;
}
else
fp1 = 0;
if ( fp0 < 0 || fp0 >= fontlen )
{
fprintf(stderr,
"%s: Glyph number (0x%x) larger than font length\n",
tblname, fp0);
exit(EX_DATAERR);
}
if ( fp1 && (fp1 < fp0 || fp1 >= fontlen) )
{
fprintf(stderr,
"%s: Bad end of range (0x%x)\n",
tblname, fp1);
exit(EX_DATAERR);
}
if (fp1)
{
/* we have a range; expect the word "idem" or a Unicode range of the
same length */
while (*p == ' ' || *p == '\t')
p++;
if (!strncmp(p, "idem", 4))
{
for (i=fp0; i<=fp1; i++)
addpair(i,i);
p += 4;
}
else
{
un0 = getunicode(&p);
while (*p == ' ' || *p == '\t')
p++;
if (*p != '-')
{
fprintf(stderr,
"%s: Corresponding to a range of font positions, there should be a Unicode range\n",
tblname);
exit(EX_DATAERR);
}
p++;
un1 = getunicode(&p);
if (un0 < 0 || un1 < 0)
{
fprintf(stderr,
"%s: Bad Unicode range corresponding to font position range 0x%x-0x%x\n",
tblname, fp0, fp1);
exit(EX_DATAERR);
}
if (un1 - un0 != fp1 - fp0)
{
fprintf(stderr,
"%s: Unicode range U+%x-U+%x not of the same length as font position range 0x%x-0x%x\n",
tblname, un0, un1, fp0, fp1);
exit(EX_DATAERR);
}
for(i=fp0; i<=fp1; i++)
addpair(i,un0-fp0+i);
}
}
else
{
/* no range; expect a list of unicode values for a single font position */
while ( (un0 = getunicode(&p)) >= 0 )
addpair(fp0, un0);
}
while (*p == ' ' || *p == '\t')
p++;
if (*p && *p != '#')
fprintf(stderr, "%s: trailing junk (%s) ignored\n", tblname, p);
}
/* Okay, we hit EOF, now output hash table */
fclose(ctbl);
/* Compute total size of Unicode list */
nuni = 0;
for ( i = 0 ; i < fontlen ; i++ )
nuni += unicount[i];
printf("\
/*\n\
* Do not edit this file; it was automatically generated by\n\
*\n\
* conmakehash %s > [this file]\n\
*\n\
*/\n\
\n\
#include <linux/types.h>\n\
\n\
u8 dfont_unicount[%d] = \n\
{\n\t", argv[1], fontlen);
for ( i = 0 ; i < fontlen ; i++ )
{
printf("%3d", unicount[i]);
if ( i == fontlen-1 )
printf("\n};\n");
else if ( i % 8 == 7 )
printf(",\n\t");
else
printf(", ");
}
printf("\nu16 dfont_unitable[%d] = \n{\n\t", nuni);
fp0 = 0;
nent = 0;
for ( i = 0 ; i < nuni ; i++ )
{
while ( nent >= unicount[fp0] )
{
fp0++;
nent = 0;
}
printf("0x%04x", unitable[fp0][nent++]);
if ( i == nuni-1 )
printf("\n};\n");
else if ( i % 8 == 7 )
printf(",\n\t");
else
printf(", ");
}
exit(EX_OK);
}
| linux-master | drivers/tty/vt/conmakehash.c |
// SPDX-License-Identifier: GPL-2.0
/*
* consolemap.c
*
* Mapping from internal code (such as Latin-1 or Unicode or IBM PC code)
* to font positions.
*
* aeb, 950210
*
* Support for multiple unimaps by Jakub Jelinek <[email protected]>, July 1998
*
* Fix bug in inverse translation. Stanislav Voronyi <[email protected]>, Dec 1998
*
* In order to prevent the following circular lock dependency:
* &mm->mmap_lock --> cpu_hotplug.lock --> console_lock --> &mm->mmap_lock
*
* We cannot allow page fault to happen while holding the console_lock.
* Therefore, all the userspace copy operations have to be done outside
* the console_lock critical sections.
*
* As all the affected functions are all called directly from vt_ioctl(), we
* can allocate some small buffers directly on stack without worrying about
* stack overflow.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/uaccess.h>
#include <linux/console.h>
#include <linux/consolemap.h>
#include <linux/vt_kern.h>
#include <linux/string.h>
static unsigned short translations[][E_TABSZ] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
[LAT1_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* VT100 graphics mapped to Unicode */
[GRAF_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x2192, 0x2190, 0x2191, 0x2193, 0x002f,
0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x00a0,
0x25c6, 0x2592, 0x2409, 0x240c, 0x240d, 0x240a, 0x00b0, 0x00b1,
0x2591, 0x240b, 0x2518, 0x2510, 0x250c, 0x2514, 0x253c, 0x23ba,
0x23bb, 0x2500, 0x23bc, 0x23bd, 0x251c, 0x2524, 0x2534, 0x252c,
0x2502, 0x2264, 0x2265, 0x03c0, 0x2260, 0x00a3, 0x00b7, 0x007f,
0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
[IBMPC_MAP] = {
0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x2302,
0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7,
0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5,
0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192,
0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510,
0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567,
0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580,
0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
},
/* User mapping -- default to codes for direct font mapping */
[USER_MAP] = {
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
0xf018, 0xf019, 0xf01a, 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
0xf028, 0xf029, 0xf02a, 0xf02b, 0xf02c, 0xf02d, 0xf02e, 0xf02f,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf03a, 0xf03b, 0xf03c, 0xf03d, 0xf03e, 0xf03f,
0xf040, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f,
0xf050, 0xf051, 0xf052, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057,
0xf058, 0xf059, 0xf05a, 0xf05b, 0xf05c, 0xf05d, 0xf05e, 0xf05f,
0xf060, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f,
0xf070, 0xf071, 0xf072, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077,
0xf078, 0xf079, 0xf07a, 0xf07b, 0xf07c, 0xf07d, 0xf07e, 0xf07f,
0xf080, 0xf081, 0xf082, 0xf083, 0xf084, 0xf085, 0xf086, 0xf087,
0xf088, 0xf089, 0xf08a, 0xf08b, 0xf08c, 0xf08d, 0xf08e, 0xf08f,
0xf090, 0xf091, 0xf092, 0xf093, 0xf094, 0xf095, 0xf096, 0xf097,
0xf098, 0xf099, 0xf09a, 0xf09b, 0xf09c, 0xf09d, 0xf09e, 0xf09f,
0xf0a0, 0xf0a1, 0xf0a2, 0xf0a3, 0xf0a4, 0xf0a5, 0xf0a6, 0xf0a7,
0xf0a8, 0xf0a9, 0xf0aa, 0xf0ab, 0xf0ac, 0xf0ad, 0xf0ae, 0xf0af,
0xf0b0, 0xf0b1, 0xf0b2, 0xf0b3, 0xf0b4, 0xf0b5, 0xf0b6, 0xf0b7,
0xf0b8, 0xf0b9, 0xf0ba, 0xf0bb, 0xf0bc, 0xf0bd, 0xf0be, 0xf0bf,
0xf0c0, 0xf0c1, 0xf0c2, 0xf0c3, 0xf0c4, 0xf0c5, 0xf0c6, 0xf0c7,
0xf0c8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0cc, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0d0, 0xf0d1, 0xf0d2, 0xf0d3, 0xf0d4, 0xf0d5, 0xf0d6, 0xf0d7,
0xf0d8, 0xf0d9, 0xf0da, 0xf0db, 0xf0dc, 0xf0dd, 0xf0de, 0xf0df,
0xf0e0, 0xf0e1, 0xf0e2, 0xf0e3, 0xf0e4, 0xf0e5, 0xf0e6, 0xf0e7,
0xf0e8, 0xf0e9, 0xf0ea, 0xf0eb, 0xf0ec, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0f0, 0xf0f1, 0xf0f2, 0xf0f3, 0xf0f4, 0xf0f5, 0xf0f6, 0xf0f7,
0xf0f8, 0xf0f9, 0xf0fa, 0xf0fb, 0xf0fc, 0xf0fd, 0xf0fe, 0xf0ff
}
};
/* The standard kernel character-to-font mappings are not invertible
-- this is just a best effort. */
#define MAX_GLYPH 512 /* Max possible glyph value */
static enum translation_map inv_translate[MAX_NR_CONSOLES];
#define UNI_DIRS 32U
#define UNI_DIR_ROWS 32U
#define UNI_ROW_GLYPHS 64U
#define UNI_DIR_BITS GENMASK(15, 11)
#define UNI_ROW_BITS GENMASK(10, 6)
#define UNI_GLYPH_BITS GENMASK( 5, 0)
#define UNI_DIR(uni) FIELD_GET(UNI_DIR_BITS, (uni))
#define UNI_ROW(uni) FIELD_GET(UNI_ROW_BITS, (uni))
#define UNI_GLYPH(uni) FIELD_GET(UNI_GLYPH_BITS, (uni))
#define UNI(dir, row, glyph) (FIELD_PREP(UNI_DIR_BITS, (dir)) | \
FIELD_PREP(UNI_ROW_BITS, (row)) | \
FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
/**
* struct uni_pagedict -- unicode directory
*
* @uni_pgdir: 32*32*64 table with glyphs
* @refcount: reference count of this structure
* @sum: checksum
* @inverse_translations: best-effort inverse mapping
* @inverse_trans_unicode: best-effort inverse mapping to unicode
*/
struct uni_pagedict {
u16 **uni_pgdir[UNI_DIRS];
unsigned long refcount;
unsigned long sum;
unsigned char *inverse_translations[LAST_MAP + 1];
u16 *inverse_trans_unicode;
};
static struct uni_pagedict *dflt;
static void set_inverse_transl(struct vc_data *conp, struct uni_pagedict *dict,
enum translation_map m)
{
unsigned short *t = translations[m];
unsigned char *inv;
if (!dict)
return;
inv = dict->inverse_translations[m];
if (!inv) {
inv = dict->inverse_translations[m] = kmalloc(MAX_GLYPH,
GFP_KERNEL);
if (!inv)
return;
}
memset(inv, 0, MAX_GLYPH);
for (unsigned int ch = 0; ch < ARRAY_SIZE(translations[m]); ch++) {
int glyph = conv_uni_to_pc(conp, t[ch]);
if (glyph >= 0 && glyph < MAX_GLYPH && inv[glyph] < 32) {
/* prefer '-' above SHY etc. */
inv[glyph] = ch;
}
}
}
static void set_inverse_trans_unicode(struct uni_pagedict *dict)
{
unsigned int d, r, g;
u16 *inv;
if (!dict)
return;
inv = dict->inverse_trans_unicode;
if (!inv) {
inv = dict->inverse_trans_unicode = kmalloc_array(MAX_GLYPH,
sizeof(*inv), GFP_KERNEL);
if (!inv)
return;
}
memset(inv, 0, MAX_GLYPH * sizeof(*inv));
for (d = 0; d < UNI_DIRS; d++) {
u16 **dir = dict->uni_pgdir[d];
if (!dir)
continue;
for (r = 0; r < UNI_DIR_ROWS; r++) {
u16 *row = dir[r];
if (!row)
continue;
for (g = 0; g < UNI_ROW_GLYPHS; g++) {
u16 glyph = row[g];
if (glyph < MAX_GLYPH && inv[glyph] < 32)
inv[glyph] = UNI(d, r, g);
}
}
}
}
unsigned short *set_translate(enum translation_map m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
}
/*
* Inverse translation is impossible for several reasons:
* 1. The font<->character maps are not 1-1.
* 2. The text may have been written while a different translation map
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode)
{
struct uni_pagedict *p;
enum translation_map m;
if (glyph >= MAX_GLYPH)
return 0;
p = *conp->uni_pagedict_loc;
if (!p)
return glyph;
if (use_unicode) {
if (!p->inverse_trans_unicode)
return glyph;
return p->inverse_trans_unicode[glyph];
}
m = inv_translate[conp->vc_num];
if (!p->inverse_translations[m])
return glyph;
return p->inverse_translations[m][glyph];
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
struct uni_pagedict *p, *q = NULL;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
p = *vc_cons[i].d->uni_pagedict_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
set_inverse_trans_unicode(p);
q = p;
}
}
}
/*
* Load customizable translation table
* arg points to a 256 byte translation table.
*
* The "old" variants are for translation directly to font (using the
* 0xf000-0xf0ff "transparent" Unicodes) whereas the "new" variants set
* Unicodes explicitly.
*/
int con_set_trans_old(unsigned char __user * arg)
{
unsigned short inbuf[E_TABSZ];
unsigned int i;
unsigned char ch;
for (i = 0; i < ARRAY_SIZE(inbuf); i++) {
if (get_user(ch, &arg[i]))
return -EFAULT;
inbuf[i] = UNI_DIRECT_BASE | ch;
}
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
}
int con_get_trans_old(unsigned char __user * arg)
{
int i, ch;
unsigned short *p = translations[USER_MAP];
unsigned char outbuf[E_TABSZ];
console_lock();
for (i = 0; i < ARRAY_SIZE(outbuf); i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
outbuf[i] = (ch & ~0xff) ? 0 : ch;
}
console_unlock();
return copy_to_user(arg, outbuf, sizeof(outbuf)) ? -EFAULT : 0;
}
int con_set_trans_new(ushort __user * arg)
{
unsigned short inbuf[E_TABSZ];
if (copy_from_user(inbuf, arg, sizeof(inbuf)))
return -EFAULT;
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
}
int con_get_trans_new(ushort __user * arg)
{
unsigned short outbuf[E_TABSZ];
console_lock();
memcpy(outbuf, translations[USER_MAP], sizeof(outbuf));
console_unlock();
return copy_to_user(arg, outbuf, sizeof(outbuf)) ? -EFAULT : 0;
}
/*
* Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
* A hashtable is somewhat of a pain to deal with, so use a
* "paged table" instead. Simulation has shown the memory cost of
* this 3-level paged table scheme to be comparable to a hash table.
*/
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
static void con_release_unimap(struct uni_pagedict *dict)
{
unsigned int d, r;
if (dict == dflt)
dflt = NULL;
for (d = 0; d < UNI_DIRS; d++) {
u16 **dir = dict->uni_pgdir[d];
if (dir != NULL) {
for (r = 0; r < UNI_DIR_ROWS; r++)
kfree(dir[r]);
kfree(dir);
}
dict->uni_pgdir[d] = NULL;
}
for (r = 0; r < ARRAY_SIZE(dict->inverse_translations); r++) {
kfree(dict->inverse_translations[r]);
dict->inverse_translations[r] = NULL;
}
kfree(dict->inverse_trans_unicode);
dict->inverse_trans_unicode = NULL;
}
/* Caller must hold the console lock */
void con_free_unimap(struct vc_data *vc)
{
struct uni_pagedict *p;
p = *vc->uni_pagedict_loc;
if (!p)
return;
*vc->uni_pagedict_loc = NULL;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
static int con_unify_unimap(struct vc_data *conp, struct uni_pagedict *dict1)
{
struct uni_pagedict *dict2;
unsigned int cons, d, r;
for (cons = 0; cons < MAX_NR_CONSOLES; cons++) {
if (!vc_cons_allocated(cons))
continue;
dict2 = *vc_cons[cons].d->uni_pagedict_loc;
if (!dict2 || dict2 == dict1 || dict2->sum != dict1->sum)
continue;
for (d = 0; d < UNI_DIRS; d++) {
u16 **dir1 = dict1->uni_pgdir[d];
u16 **dir2 = dict2->uni_pgdir[d];
if (!dir1 && !dir2)
continue;
if (!dir1 || !dir2)
break;
for (r = 0; r < UNI_DIR_ROWS; r++) {
if (!dir1[r] && !dir2[r])
continue;
if (!dir1[r] || !dir2[r])
break;
if (memcmp(dir1[r], dir2[r], UNI_ROW_GLYPHS *
sizeof(*dir1[r])))
break;
}
if (r < UNI_DIR_ROWS)
break;
}
if (d == UNI_DIRS) {
dict2->refcount++;
*conp->uni_pagedict_loc = dict2;
con_release_unimap(dict1);
kfree(dict1);
return 1;
}
}
return 0;
}
static int
con_insert_unipair(struct uni_pagedict *p, u_short unicode, u_short fontpos)
{
u16 **dir, *row;
unsigned int n;
n = UNI_DIR(unicode);
dir = p->uni_pgdir[n];
if (!dir) {
dir = p->uni_pgdir[n] = kcalloc(UNI_DIR_ROWS, sizeof(*dir),
GFP_KERNEL);
if (!dir)
return -ENOMEM;
}
n = UNI_ROW(unicode);
row = dir[n];
if (!row) {
row = dir[n] = kmalloc_array(UNI_ROW_GLYPHS, sizeof(*row),
GFP_KERNEL);
if (!row)
return -ENOMEM;
/* No glyphs for the characters (yet) */
memset(row, 0xff, UNI_ROW_GLYPHS * sizeof(*row));
}
row[UNI_GLYPH(unicode)] = fontpos;
p->sum += (fontpos << 20U) + unicode;
return 0;
}
static int con_allocate_new(struct vc_data *vc)
{
struct uni_pagedict *new, *old = *vc->uni_pagedict_loc;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
new->refcount = 1;
*vc->uni_pagedict_loc = new;
if (old)
old->refcount--;
return 0;
}
/* Caller must hold the lock */
static int con_do_clear_unimap(struct vc_data *vc)
{
struct uni_pagedict *old = *vc->uni_pagedict_loc;
if (!old || old->refcount > 1)
return con_allocate_new(vc);
old->sum = 0;
con_release_unimap(old);
return 0;
}
int con_clear_unimap(struct vc_data *vc)
{
int ret;
console_lock();
ret = con_do_clear_unimap(vc);
console_unlock();
return ret;
}
static struct uni_pagedict *con_unshare_unimap(struct vc_data *vc,
struct uni_pagedict *old)
{
struct uni_pagedict *new;
unsigned int d, r, g;
int ret;
u16 uni = 0;
ret = con_allocate_new(vc);
if (ret)
return ERR_PTR(ret);
new = *vc->uni_pagedict_loc;
/*
* uni_pgdir is a 32*32*64 table with rows allocated when its first
* entry is added. The unicode value must still be incremented for
* empty rows. We are copying entries from "old" to "new".
*/
for (d = 0; d < UNI_DIRS; d++) {
u16 **dir = old->uni_pgdir[d];
if (!dir) {
/* Account for empty table */
uni += UNI_DIR_ROWS * UNI_ROW_GLYPHS;
continue;
}
for (r = 0; r < UNI_DIR_ROWS; r++) {
u16 *row = dir[r];
if (!row) {
/* Account for row of 64 empty entries */
uni += UNI_ROW_GLYPHS;
continue;
}
for (g = 0; g < UNI_ROW_GLYPHS; g++, uni++) {
if (row[g] == 0xffff)
continue;
/*
* Found one, copy entry for unicode uni with
* fontpos value row[g].
*/
ret = con_insert_unipair(new, uni, row[g]);
if (ret) {
old->refcount++;
*vc->uni_pagedict_loc = old;
con_release_unimap(new);
kfree(new);
return ERR_PTR(ret);
}
}
}
}
return new;
}
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
int err = 0, err1;
struct uni_pagedict *dict;
struct unipair *unilist, *plist;
if (!ct)
return 0;
unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
dict = *vc->uni_pagedict_loc;
if (!dict) {
err = -EINVAL;
goto out_unlock;
}
if (dict->refcount > 1) {
dict = con_unshare_unimap(vc, dict);
if (IS_ERR(dict)) {
err = PTR_ERR(dict);
goto out_unlock;
}
} else if (dict == dflt) {
dflt = NULL;
}
/*
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
err1 = con_insert_unipair(dict, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
/*
* Merge with fontmaps of any other virtual consoles.
*/
if (con_unify_unimap(vc, dict))
goto out_unlock;
for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
set_inverse_transl(vc, dict, m);
set_inverse_trans_unicode(dict);
out_unlock:
console_unlock();
kvfree(unilist);
return err;
}
/**
* con_set_default_unimap - set default unicode map
* @vc: the console we are updating
*
* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
* The representation used was the most compact I could come up
* with. This routine is executed at video setup, and when the
* PIO_FONTRESET ioctl is called.
*
* The caller must hold the console lock
*/
int con_set_default_unimap(struct vc_data *vc)
{
struct uni_pagedict *dict;
unsigned int fontpos, count;
int err = 0, err1;
u16 *dfont;
if (dflt) {
dict = *vc->uni_pagedict_loc;
if (dict == dflt)
return 0;
dflt->refcount++;
*vc->uni_pagedict_loc = dflt;
if (dict && !--dict->refcount) {
con_release_unimap(dict);
kfree(dict);
}
return 0;
}
/* The default font is always 256 characters */
err = con_do_clear_unimap(vc);
if (err)
return err;
dict = *vc->uni_pagedict_loc;
dfont = dfont_unitable;
for (fontpos = 0; fontpos < 256U; fontpos++)
for (count = dfont_unicount[fontpos]; count; count--) {
err1 = con_insert_unipair(dict, *(dfont++), fontpos);
if (err1)
err = err1;
}
if (con_unify_unimap(vc, dict)) {
dflt = *vc->uni_pagedict_loc;
return err;
}
for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
set_inverse_transl(vc, dict, m);
set_inverse_trans_unicode(dict);
dflt = dict;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
/**
* con_copy_unimap - copy unimap between two vts
* @dst_vc: target
* @src_vc: source
*
* The caller must hold the console lock when invoking this method
*/
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
struct uni_pagedict *src;
if (!*src_vc->uni_pagedict_loc)
return -EINVAL;
if (*dst_vc->uni_pagedict_loc == *src_vc->uni_pagedict_loc)
return 0;
con_free_unimap(dst_vc);
src = *src_vc->uni_pagedict_loc;
src->refcount++;
*dst_vc->uni_pagedict_loc = src;
return 0;
}
EXPORT_SYMBOL(con_copy_unimap);
/*
* con_get_unimap - get the unicode map
*
* Read the console unicode data for this console. Called from the ioctl
* handlers.
*/
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
struct unipair __user *list)
{
ushort ect;
struct uni_pagedict *dict;
struct unipair *unilist;
unsigned int d, r, g;
int ret = 0;
unilist = kvmalloc_array(ct, sizeof(*unilist), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
dict = *vc->uni_pagedict_loc;
if (!dict)
goto unlock;
for (d = 0; d < UNI_DIRS; d++) {
u16 **dir = dict->uni_pgdir[d];
if (!dir)
continue;
for (r = 0; r < UNI_DIR_ROWS; r++) {
u16 *row = dir[r];
if (!row)
continue;
for (g = 0; g < UNI_ROW_GLYPHS; g++, row++) {
if (*row >= MAX_GLYPH)
continue;
if (ect < ct) {
unilist[ect].unicode = UNI(d, r, g);
unilist[ect].fontpos = *row;
}
ect++;
}
}
}
unlock:
console_unlock();
if (copy_to_user(list, unilist, min(ect, ct) * sizeof(*unilist)))
ret = -EFAULT;
if (put_user(ect, uct))
ret = -EFAULT;
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
/*
* Always use USER_MAP. These functions are used by the keyboard,
* which shouldn't be affected by G0/G1 switching, etc.
* If the user map still contains default values, i.e. the
* direct-to-font mapping, then assume user is using Latin1.
*
* FIXME: at some point we need to decide if we want to lock the table
* update element itself via the keyboard_event_lock for consistency with the
* keyboard driver as well as the consoles
*/
/* may be called during an interrupt */
u32 conv_8bit_to_uni(unsigned char c)
{
unsigned short uni = translations[USER_MAP][c];
return uni == (0xf000 | c) ? c : uni;
}
int conv_uni_to_8bit(u32 uni)
{
int c;
for (c = 0; c < ARRAY_SIZE(translations[USER_MAP]); c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
int conv_uni_to_pc(struct vc_data *conp, long ucs)
{
struct uni_pagedict *dict;
u16 **dir, *row, glyph;
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
else if (ucs < 0x20)
return -1; /* Not a printable character */
else if (ucs == 0xfeff || (ucs >= 0x200b && ucs <= 0x200f))
return -2; /* Zero-width space */
/*
* UNI_DIRECT_BASE indicates the start of the region in the User Zone
* which always has a 1:1 mapping to the currently loaded font. The
* UNI_DIRECT_MASK indicates the bit span of the region.
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
dict = *conp->uni_pagedict_loc;
if (!dict)
return -3;
dir = dict->uni_pgdir[UNI_DIR(ucs)];
if (!dir)
return -4;
row = dir[UNI_ROW(ucs)];
if (!row)
return -4;
glyph = row[UNI_GLYPH(ucs)];
if (glyph >= MAX_GLYPH)
return -4;
return glyph;
}
/*
* This is called at sys_setup time, after memory and the console are
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
void __init
console_map_init(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons_allocated(i) && !*vc_cons[i].d->uni_pagedict_loc)
con_set_default_unimap(vc_cons[i].d);
}
| linux-master | drivers/tty/vt/consolemap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Written for linux by Johan Myreen as a translation from
* the assembly version by Linus (with diacriticals added)
*
* Some additional features added by Christoph Niemann (ChN), March 1993
*
* Loadable keymaps by Risto Kankkunen, May 1993
*
* Diacriticals redone & other small changes, [email protected], June 1993
* Added decr/incr_console, dynamic keymaps, Unicode support,
* dynamic function/string keys, led setting, Sept 1994
* `Sticky' modifier keys, 951006.
*
* 11-11-96: SAK should now work in the raw mode (Martin Mares)
*
* Modified to provide 'generic' keyboard support by Hamish Macdonald
* Merge with the m68k keyboard driver and split-off of the PC low-level
* parts by Geert Uytterhoeven, May 1997
*
* 27-05-97: Added support for the Magic SysRq Key (Martin Mares)
* 30-07-98: Dead keys redone, [email protected].
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/consolemap.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#include <linux/kbd_diacr.h>
#include <linux/kbd_kern.h>
#include <linux/leds.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/tty_flip.h>
#include <linux/tty.h>
#include <linux/uaccess.h>
#include <linux/vt_kern.h>
#include <asm/irq_regs.h>
/*
* Exported functions/variables
*/
#define KBD_DEFMODE (BIT(VC_REPEAT) | BIT(VC_META))
#if defined(CONFIG_X86) || defined(CONFIG_PARISC)
#include <asm/kbdleds.h>
#else
static inline int kbd_defleds(void)
{
return 0;
}
#endif
#define KBD_DEFLOCK 0
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_pad,\
k_dead, k_cons, k_cur, k_shift,\
k_meta, k_ascii, k_lock, k_lowercase,\
k_slock, k_dead2, k_brl, k_ignore
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
#define FN_HANDLERS\
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
fn_show_state, fn_send_intr, fn_lastcons, fn_caps_toggle,\
fn_num, fn_hold, fn_scroll_forw, fn_scroll_back,\
fn_boot_it, fn_caps_on, fn_compose, fn_SAK,\
fn_dec_console, fn_inc_console, fn_spawn_con, fn_bare_num
typedef void (fn_handler_fn)(struct vc_data *vc);
static fn_handler_fn FN_HANDLERS;
static fn_handler_fn *fn_handler[] = { FN_HANDLERS };
/*
* Variables exported for vt_ioctl.c
*/
struct vt_spawn_console vt_spawn_con = {
.lock = __SPIN_LOCK_UNLOCKED(vt_spawn_con.lock),
.pid = NULL,
.sig = 0,
};
/*
* Internal Data.
*/
static struct kbd_struct kbd_table[MAX_NR_CONSOLES];
static struct kbd_struct *kbd = kbd_table;
/* maximum values each key_handler can handle */
static const unsigned char max_vals[] = {
[ KT_LATIN ] = 255,
[ KT_FN ] = ARRAY_SIZE(func_table) - 1,
[ KT_SPEC ] = ARRAY_SIZE(fn_handler) - 1,
[ KT_PAD ] = NR_PAD - 1,
[ KT_DEAD ] = NR_DEAD - 1,
[ KT_CONS ] = 255,
[ KT_CUR ] = 3,
[ KT_SHIFT ] = NR_SHIFT - 1,
[ KT_META ] = 255,
[ KT_ASCII ] = NR_ASCII - 1,
[ KT_LOCK ] = NR_LOCK - 1,
[ KT_LETTER ] = 255,
[ KT_SLOCK ] = NR_LOCK - 1,
[ KT_DEAD2 ] = 255,
[ KT_BRL ] = NR_BRL - 1,
};
static const int NR_TYPES = ARRAY_SIZE(max_vals);
static void kbd_bh(struct tasklet_struct *unused);
static DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh);
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock);
static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static DECLARE_BITMAP(key_down, KEY_CNT); /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
/* Handles a number being assembled on the number pad */
static bool npadch_active;
static unsigned int npadch_value;
static unsigned int diacr;
static bool rep; /* flag telling character repeat */
static int shift_state = 0;
static unsigned int ledstate = -1U; /* undefined */
static unsigned char ledioctl;
static bool vt_switch;
/*
* Notifier list for console keyboard events
*/
static ATOMIC_NOTIFIER_HEAD(keyboard_notifier_list);
int register_keyboard_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&keyboard_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_keyboard_notifier);
int unregister_keyboard_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&keyboard_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_keyboard_notifier);
/*
* Translation of scancodes to keycodes. We set them on only the first
* keyboard in the list that accepts the scancode and keycode.
* Explanation for not choosing the first attached keyboard anymore:
* USB keyboards for example have two event devices: one for all "normal"
* keys and one for extra function keys (like "volume up", "make coffee",
* etc.). So this means that scancodes for the extra function keys won't
* be valid for the first event device, but will be for the second.
*/
struct getset_keycode_data {
struct input_keymap_entry ke;
int error;
};
static int getkeycode_helper(struct input_handle *handle, void *data)
{
struct getset_keycode_data *d = data;
d->error = input_get_keycode(handle->dev, &d->ke);
return d->error == 0; /* stop as soon as we successfully get one */
}
static int getkeycode(unsigned int scancode)
{
struct getset_keycode_data d = {
.ke = {
.flags = 0,
.len = sizeof(scancode),
.keycode = 0,
},
.error = -ENODEV,
};
memcpy(d.ke.scancode, &scancode, sizeof(scancode));
input_handler_for_each_handle(&kbd_handler, &d, getkeycode_helper);
return d.error ?: d.ke.keycode;
}
static int setkeycode_helper(struct input_handle *handle, void *data)
{
struct getset_keycode_data *d = data;
d->error = input_set_keycode(handle->dev, &d->ke);
return d->error == 0; /* stop as soon as we successfully set one */
}
static int setkeycode(unsigned int scancode, unsigned int keycode)
{
struct getset_keycode_data d = {
.ke = {
.flags = 0,
.len = sizeof(scancode),
.keycode = keycode,
},
.error = -ENODEV,
};
memcpy(d.ke.scancode, &scancode, sizeof(scancode));
input_handler_for_each_handle(&kbd_handler, &d, setkeycode_helper);
return d.error;
}
/*
* Making beeps and bells. Note that we prefer beeps to bells, but when
* shutting the sound off we do both.
*/
static int kd_sound_helper(struct input_handle *handle, void *data)
{
unsigned int *hz = data;
struct input_dev *dev = handle->dev;
if (test_bit(EV_SND, dev->evbit)) {
if (test_bit(SND_TONE, dev->sndbit)) {
input_inject_event(handle, EV_SND, SND_TONE, *hz);
if (*hz)
return 0;
}
if (test_bit(SND_BELL, dev->sndbit))
input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
}
return 0;
}
static void kd_nosound(struct timer_list *unused)
{
static unsigned int zero;
input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper);
}
static DEFINE_TIMER(kd_mksound_timer, kd_nosound);
void kd_mksound(unsigned int hz, unsigned int ticks)
{
del_timer_sync(&kd_mksound_timer);
input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper);
if (hz && ticks)
mod_timer(&kd_mksound_timer, jiffies + ticks);
}
EXPORT_SYMBOL(kd_mksound);
/*
* Setting the keyboard rate.
*/
static int kbd_rate_helper(struct input_handle *handle, void *data)
{
struct input_dev *dev = handle->dev;
struct kbd_repeat *rpt = data;
if (test_bit(EV_REP, dev->evbit)) {
if (rpt[0].delay > 0)
input_inject_event(handle,
EV_REP, REP_DELAY, rpt[0].delay);
if (rpt[0].period > 0)
input_inject_event(handle,
EV_REP, REP_PERIOD, rpt[0].period);
rpt[1].delay = dev->rep[REP_DELAY];
rpt[1].period = dev->rep[REP_PERIOD];
}
return 0;
}
int kbd_rate(struct kbd_repeat *rpt)
{
struct kbd_repeat data[2] = { *rpt };
input_handler_for_each_handle(&kbd_handler, data, kbd_rate_helper);
*rpt = data[1]; /* Copy currently used settings */
return 0;
}
/*
* Helper Functions.
*/
static void put_queue(struct vc_data *vc, int ch)
{
tty_insert_flip_char(&vc->port, ch, 0);
tty_flip_buffer_push(&vc->port);
}
static void puts_queue(struct vc_data *vc, const char *cp)
{
tty_insert_flip_string(&vc->port, cp, strlen(cp));
tty_flip_buffer_push(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
{
static char buf[] = { 0x1b, 'O', 0x00, 0x00 };
buf[1] = (mode ? 'O' : '[');
buf[2] = key;
puts_queue(vc, buf);
}
/*
* Many other routines do put_queue, but I think either
* they produce ASCII, or they produce some user-assigned
* string, and in both cases we might assume that it is
* in utf-8 already.
*/
static void to_utf8(struct vc_data *vc, uint c)
{
if (c < 0x80)
/* 0******* */
put_queue(vc, c);
else if (c < 0x800) {
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
} else if (c < 0x10000) {
if (c >= 0xD800 && c < 0xE000)
return;
if (c == 0xFFFF)
return;
/* 1110**** 10****** 10****** */
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
} else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
put_queue(vc, 0xf0 | (c >> 18));
put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
}
}
/* FIXME: review locking for vt.c callers */
static void set_leds(void)
{
tasklet_schedule(&keyboard_tasklet);
}
/*
* Called after returning from RAW mode or when changing consoles - recompute
* shift_down[] and shift_state from key_down[] maybe called when keymap is
* undefined, so that shiftkey release is seen. The caller must hold the
* kbd_event_lock.
*/
static void do_compute_shiftstate(void)
{
unsigned int k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
sym = U(key_maps[0][k]);
if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
val = KVAL(sym);
if (val == KVAL(K_CAPSSHIFT))
val = KVAL(K_SHIFT);
shift_down[val]++;
shift_state |= BIT(val);
}
}
/* We still have to export this method to vt.c */
void vt_set_leds_compute_shiftstate(void)
{
unsigned long flags;
/*
* When VT is switched, the keyboard led needs to be set once.
* Ensure that after the switch is completed, the state of the
* keyboard LED is consistent with the state of the keyboard lock.
*/
vt_switch = true;
set_leds();
spin_lock_irqsave(&kbd_event_lock, flags);
do_compute_shiftstate();
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
{
unsigned int d = diacr;
unsigned int i;
diacr = 0;
if ((d & ~0xff) == BRL_UC_ROW) {
if ((ch & ~0xff) == BRL_UC_ROW)
return d | ch;
} else {
for (i = 0; i < accent_table_size; i++)
if (accent_table[i].diacr == d && accent_table[i].base == ch)
return accent_table[i].result;
}
if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
return d;
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, d);
else {
int c = conv_uni_to_8bit(d);
if (c != -1)
put_queue(vc, c);
}
return ch;
}
/*
* Special function handlers
*/
static void fn_enter(struct vc_data *vc)
{
if (diacr) {
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, diacr);
else {
int c = conv_uni_to_8bit(diacr);
if (c != -1)
put_queue(vc, c);
}
diacr = 0;
}
put_queue(vc, '\r');
if (vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, '\n');
}
static void fn_caps_toggle(struct vc_data *vc)
{
if (rep)
return;
chg_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_caps_on(struct vc_data *vc)
{
if (rep)
return;
set_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_show_ptregs(struct vc_data *vc)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
}
static void fn_hold(struct vc_data *vc)
{
struct tty_struct *tty = vc->port.tty;
if (rep || !tty)
return;
/*
* Note: SCROLLOCK will be set (cleared) by stop_tty (start_tty);
* these routines are also activated by ^S/^Q.
* (And SCROLLOCK can also be set by the ioctl KDSKBLED.)
*/
if (tty->flow.stopped)
start_tty(tty);
else
stop_tty(tty);
}
static void fn_num(struct vc_data *vc)
{
if (vc_kbd_mode(kbd, VC_APPLIC))
applkey(vc, 'P', 1);
else
fn_bare_num(vc);
}
/*
* Bind this to Shift-NumLock if you work in application keypad mode
* but want to be able to change the NumLock flag.
* Bind this to NumLock if you prefer that the NumLock key always
* changes the NumLock flag.
*/
static void fn_bare_num(struct vc_data *vc)
{
if (!rep)
chg_vc_kbd_led(kbd, VC_NUMLOCK);
}
static void fn_lastcons(struct vc_data *vc)
{
/* switch to the last used console, ChN */
set_console(last_console);
}
static void fn_dec_console(struct vc_data *vc)
{
int i, cur = fg_console;
/* Currently switching? Queue this next switch relative to that. */
if (want_console != -1)
cur = want_console;
for (i = cur - 1; i != cur; i--) {
if (i == -1)
i = MAX_NR_CONSOLES - 1;
if (vc_cons_allocated(i))
break;
}
set_console(i);
}
static void fn_inc_console(struct vc_data *vc)
{
int i, cur = fg_console;
/* Currently switching? Queue this next switch relative to that. */
if (want_console != -1)
cur = want_console;
for (i = cur+1; i != cur; i++) {
if (i == MAX_NR_CONSOLES)
i = 0;
if (vc_cons_allocated(i))
break;
}
set_console(i);
}
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
tty_flip_buffer_push(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
{
scrollfront(vc, 0);
}
static void fn_scroll_back(struct vc_data *vc)
{
scrollback(vc);
}
static void fn_show_mem(struct vc_data *vc)
{
show_mem();
}
static void fn_show_state(struct vc_data *vc)
{
show_state();
}
static void fn_boot_it(struct vc_data *vc)
{
ctrl_alt_del();
}
static void fn_compose(struct vc_data *vc)
{
dead_key_next = true;
}
static void fn_spawn_con(struct vc_data *vc)
{
spin_lock(&vt_spawn_con.lock);
if (vt_spawn_con.pid)
if (kill_pid(vt_spawn_con.pid, vt_spawn_con.sig, 1)) {
put_pid(vt_spawn_con.pid);
vt_spawn_con.pid = NULL;
}
spin_unlock(&vt_spawn_con.lock);
}
static void fn_SAK(struct vc_data *vc)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
}
static void fn_null(struct vc_data *vc)
{
do_compute_shiftstate();
}
/*
* Special key handlers
*/
static void k_ignore(struct vc_data *vc, unsigned char value, char up_flag)
{
}
static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if (value >= ARRAY_SIZE(fn_handler))
return;
if ((kbd->kbdmode == VC_RAW ||
kbd->kbdmode == VC_MEDIUMRAW ||
kbd->kbdmode == VC_OFF) &&
value != KVAL(K_SAK))
return; /* SAK is allowed even in raw mode */
fn_handler[value](vc);
}
static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
{
pr_err("k_lowercase was called - impossible\n");
}
static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return; /* no action, if this is a key release */
if (diacr)
value = handle_diacr(vc, value);
if (dead_key_next) {
dead_key_next = false;
diacr = value;
return;
}
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, value);
else {
int c = conv_uni_to_8bit(value);
if (c != -1)
put_queue(vc, c);
}
}
/*
* Handle dead key. Note that we now may have several
* dead keys modifying the same character. Very useful
* for Vietnamese.
*/
static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return;
diacr = (diacr ? handle_diacr(vc, value) : value);
}
static void k_self(struct vc_data *vc, unsigned char value, char up_flag)
{
k_unicode(vc, conv_8bit_to_uni(value), up_flag);
}
static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
{
k_deadunicode(vc, value, up_flag);
}
/*
* Obsolete - for backwards compatibility only
*/
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
static const unsigned char ret_diacr[NR_DEAD] = {
'`', /* dead_grave */
'\'', /* dead_acute */
'^', /* dead_circumflex */
'~', /* dead_tilda */
'"', /* dead_diaeresis */
',', /* dead_cedilla */
'_', /* dead_macron */
'U', /* dead_breve */
'.', /* dead_abovedot */
'*', /* dead_abovering */
'=', /* dead_doubleacute */
'c', /* dead_caron */
'k', /* dead_ogonek */
'i', /* dead_iota */
'#', /* dead_voiced_sound */
'o', /* dead_semivoiced_sound */
'!', /* dead_belowdot */
'?', /* dead_hook */
'+', /* dead_horn */
'-', /* dead_stroke */
')', /* dead_abovecomma */
'(', /* dead_abovereversedcomma */
':', /* dead_doublegrave */
'n', /* dead_invertedbreve */
';', /* dead_belowcomma */
'$', /* dead_currency */
'@', /* dead_greek */
};
k_deadunicode(vc, ret_diacr[value], up_flag);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
set_console(value);
}
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if ((unsigned)value < ARRAY_SIZE(func_table)) {
unsigned long flags;
spin_lock_irqsave(&func_buf_lock, flags);
if (func_table[value])
puts_queue(vc, func_table[value]);
spin_unlock_irqrestore(&func_buf_lock, flags);
} else
pr_err("k_fn called with value=%d\n", value);
}
static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
{
static const char cur_chars[] = "BDCA";
if (up_flag)
return;
applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
}
static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
{
static const char pad_chars[] = "0123456789+-*/\015,.?()#";
static const char app_map[] = "pqrstuvwxylSRQMnnmPQS";
if (up_flag)
return; /* no action, if this is a key release */
/* kludge... shift forces cursor/number keys */
if (vc_kbd_mode(kbd, VC_APPLIC) && !shift_down[KG_SHIFT]) {
applkey(vc, app_map[value], 1);
return;
}
if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
switch (value) {
case KVAL(K_PCOMMA):
case KVAL(K_PDOT):
k_fn(vc, KVAL(K_REMOVE), 0);
return;
case KVAL(K_P0):
k_fn(vc, KVAL(K_INSERT), 0);
return;
case KVAL(K_P1):
k_fn(vc, KVAL(K_SELECT), 0);
return;
case KVAL(K_P2):
k_cur(vc, KVAL(K_DOWN), 0);
return;
case KVAL(K_P3):
k_fn(vc, KVAL(K_PGDN), 0);
return;
case KVAL(K_P4):
k_cur(vc, KVAL(K_LEFT), 0);
return;
case KVAL(K_P6):
k_cur(vc, KVAL(K_RIGHT), 0);
return;
case KVAL(K_P7):
k_fn(vc, KVAL(K_FIND), 0);
return;
case KVAL(K_P8):
k_cur(vc, KVAL(K_UP), 0);
return;
case KVAL(K_P9):
k_fn(vc, KVAL(K_PGUP), 0);
return;
case KVAL(K_P5):
applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
return;
}
}
put_queue(vc, pad_chars[value]);
if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, '\n');
}
static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
{
int old_state = shift_state;
if (rep)
return;
/*
* Mimic typewriter:
* a CapsShift key acts like Shift but undoes CapsLock
*/
if (value == KVAL(K_CAPSSHIFT)) {
value = KVAL(K_SHIFT);
if (!up_flag)
clr_vc_kbd_led(kbd, VC_CAPSLOCK);
}
if (up_flag) {
/*
* handle the case that two shift or control
* keys are depressed simultaneously
*/
if (shift_down[value])
shift_down[value]--;
} else
shift_down[value]++;
if (shift_down[value])
shift_state |= BIT(value);
else
shift_state &= ~BIT(value);
/* kludge */
if (up_flag && shift_state != old_state && npadch_active) {
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, npadch_value);
else
put_queue(vc, npadch_value & 0xff);
npadch_active = false;
}
}
static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if (vc_kbd_mode(kbd, VC_META)) {
put_queue(vc, '\033');
put_queue(vc, value);
} else
put_queue(vc, value | BIT(7));
}
static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
{
unsigned int base;
if (up_flag)
return;
if (value < 10) {
/* decimal input of code, while Alt depressed */
base = 10;
} else {
/* hexadecimal input of code, while AltGr depressed */
value -= 10;
base = 16;
}
if (!npadch_active) {
npadch_value = 0;
npadch_active = true;
}
npadch_value = npadch_value * base + value;
}
static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag || rep)
return;
chg_vc_kbd_lock(kbd, value);
}
static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
{
k_shift(vc, value, up_flag);
if (up_flag || rep)
return;
chg_vc_kbd_slock(kbd, value);
/* try to make Alt, oops, AltGr and such work */
if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
kbd->slockstate = 0;
chg_vc_kbd_slock(kbd, value);
}
}
/* by default, 300ms interval for combination release */
static unsigned brl_timeout = 300;
MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)");
module_param(brl_timeout, uint, 0644);
static unsigned brl_nbchords = 1;
MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)");
module_param(brl_nbchords, uint, 0644);
static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
{
static unsigned long chords;
static unsigned committed;
if (!brl_nbchords)
k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag);
else {
committed |= pattern;
chords++;
if (chords == brl_nbchords) {
k_unicode(vc, BRL_UC_ROW | committed, up_flag);
chords = 0;
committed = 0;
}
}
}
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
pr_warn("keyboard mode must be unicode for braille patterns\n");
return;
}
if (!value) {
k_unicode(vc, BRL_UC_ROW, up_flag);
return;
}
if (value > 8)
return;
if (!up_flag) {
pressed |= BIT(value - 1);
if (!brl_timeout)
committing = pressed;
} else if (brl_timeout) {
if (!committing ||
time_after(jiffies,
releasestart + msecs_to_jiffies(brl_timeout))) {
committing = pressed;
releasestart = jiffies;
}
pressed &= ~BIT(value - 1);
if (!pressed && committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
} else {
if (committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
pressed &= ~BIT(value - 1);
}
}
#if IS_ENABLED(CONFIG_INPUT_LEDS) && IS_ENABLED(CONFIG_LEDS_TRIGGERS)
struct kbd_led_trigger {
struct led_trigger trigger;
unsigned int mask;
};
static int kbd_led_trigger_activate(struct led_classdev *cdev)
{
struct kbd_led_trigger *trigger =
container_of(cdev->trigger, struct kbd_led_trigger, trigger);
tasklet_disable(&keyboard_tasklet);
if (ledstate != -1U)
led_trigger_event(&trigger->trigger,
ledstate & trigger->mask ?
LED_FULL : LED_OFF);
tasklet_enable(&keyboard_tasklet);
return 0;
}
#define KBD_LED_TRIGGER(_led_bit, _name) { \
.trigger = { \
.name = _name, \
.activate = kbd_led_trigger_activate, \
}, \
.mask = BIT(_led_bit), \
}
#define KBD_LOCKSTATE_TRIGGER(_led_bit, _name) \
KBD_LED_TRIGGER((_led_bit) + 8, _name)
static struct kbd_led_trigger kbd_led_triggers[] = {
KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"),
KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"),
KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"),
KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"),
KBD_LOCKSTATE_TRIGGER(VC_SHIFTLOCK, "kbd-shiftlock"),
KBD_LOCKSTATE_TRIGGER(VC_ALTGRLOCK, "kbd-altgrlock"),
KBD_LOCKSTATE_TRIGGER(VC_CTRLLOCK, "kbd-ctrllock"),
KBD_LOCKSTATE_TRIGGER(VC_ALTLOCK, "kbd-altlock"),
KBD_LOCKSTATE_TRIGGER(VC_SHIFTLLOCK, "kbd-shiftllock"),
KBD_LOCKSTATE_TRIGGER(VC_SHIFTRLOCK, "kbd-shiftrlock"),
KBD_LOCKSTATE_TRIGGER(VC_CTRLLLOCK, "kbd-ctrlllock"),
KBD_LOCKSTATE_TRIGGER(VC_CTRLRLOCK, "kbd-ctrlrlock"),
};
static void kbd_propagate_led_state(unsigned int old_state,
unsigned int new_state)
{
struct kbd_led_trigger *trigger;
unsigned int changed = old_state ^ new_state;
int i;
for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); i++) {
trigger = &kbd_led_triggers[i];
if (changed & trigger->mask)
led_trigger_event(&trigger->trigger,
new_state & trigger->mask ?
LED_FULL : LED_OFF);
}
}
static int kbd_update_leds_helper(struct input_handle *handle, void *data)
{
unsigned int led_state = *(unsigned int *)data;
if (test_bit(EV_LED, handle->dev->evbit))
kbd_propagate_led_state(~led_state, led_state);
return 0;
}
static void kbd_init_leds(void)
{
int error;
int i;
for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); i++) {
error = led_trigger_register(&kbd_led_triggers[i].trigger);
if (error)
pr_err("error %d while registering trigger %s\n",
error, kbd_led_triggers[i].trigger.name);
}
}
#else
static int kbd_update_leds_helper(struct input_handle *handle, void *data)
{
unsigned int leds = *(unsigned int *)data;
if (test_bit(EV_LED, handle->dev->evbit)) {
input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & BIT(0)));
input_inject_event(handle, EV_LED, LED_NUML, !!(leds & BIT(1)));
input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & BIT(2)));
input_inject_event(handle, EV_SYN, SYN_REPORT, 0);
}
return 0;
}
static void kbd_propagate_led_state(unsigned int old_state,
unsigned int new_state)
{
input_handler_for_each_handle(&kbd_handler, &new_state,
kbd_update_leds_helper);
}
static void kbd_init_leds(void)
{
}
#endif
/*
* The leds display either (i) the status of NumLock, CapsLock, ScrollLock,
* or (ii) whatever pattern of lights people want to show using KDSETLED,
* or (iii) specified bits of specified words in kernel memory.
*/
static unsigned char getledstate(void)
{
return ledstate & 0xff;
}
void setledstate(struct kbd_struct *kb, unsigned int led)
{
unsigned long flags;
spin_lock_irqsave(&led_lock, flags);
if (!(led & ~7)) {
ledioctl = led;
kb->ledmode = LED_SHOW_IOCTL;
} else
kb->ledmode = LED_SHOW_FLAGS;
set_leds();
spin_unlock_irqrestore(&led_lock, flags);
}
static inline unsigned char getleds(void)
{
struct kbd_struct *kb = kbd_table + fg_console;
if (kb->ledmode == LED_SHOW_IOCTL)
return ledioctl;
return kb->ledflagstate;
}
/**
* vt_get_leds - helper for braille console
* @console: console to read
* @flag: flag we want to check
*
* Check the status of a keyboard led flag and report it back
*/
int vt_get_leds(unsigned int console, int flag)
{
struct kbd_struct *kb = &kbd_table[console];
int ret;
unsigned long flags;
spin_lock_irqsave(&led_lock, flags);
ret = vc_kbd_led(kb, flag);
spin_unlock_irqrestore(&led_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(vt_get_leds);
/**
* vt_set_led_state - set LED state of a console
* @console: console to set
* @leds: LED bits
*
* Set the LEDs on a console. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*/
void vt_set_led_state(unsigned int console, int leds)
{
struct kbd_struct *kb = &kbd_table[console];
setledstate(kb, leds);
}
/**
* vt_kbd_con_start - Keyboard side of console start
* @console: console
*
* Handle console start. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*
* FIXME: We eventually need to hold the kbd lock here to protect
* the LED updating. We can't do it yet because fn_hold calls stop_tty
* and start_tty under the kbd_event_lock, while normal tty paths
* don't hold the lock. We probably need to split out an LED lock
* but not during an -rc release!
*/
void vt_kbd_con_start(unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
spin_lock_irqsave(&led_lock, flags);
clr_vc_kbd_led(kb, VC_SCROLLOCK);
set_leds();
spin_unlock_irqrestore(&led_lock, flags);
}
/**
* vt_kbd_con_stop - Keyboard side of console stop
* @console: console
*
* Handle console stop. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*/
void vt_kbd_con_stop(unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
spin_lock_irqsave(&led_lock, flags);
set_vc_kbd_led(kb, VC_SCROLLOCK);
set_leds();
spin_unlock_irqrestore(&led_lock, flags);
}
/*
* This is the tasklet that updates LED state of LEDs using standard
* keyboard triggers. The reason we use tasklet is that we need to
* handle the scenario when keyboard handler is not registered yet
* but we already getting updates from the VT to update led state.
*/
static void kbd_bh(struct tasklet_struct *unused)
{
unsigned int leds;
unsigned long flags;
spin_lock_irqsave(&led_lock, flags);
leds = getleds();
leds |= (unsigned int)kbd->lockstate << 8;
spin_unlock_irqrestore(&led_lock, flags);
if (vt_switch) {
ledstate = ~leds;
vt_switch = false;
}
if (leds != ledstate) {
kbd_propagate_led_state(ledstate, leds);
ledstate = leds;
}
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\
(defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC))
static inline bool kbd_is_hw_raw(const struct input_dev *dev)
{
if (!test_bit(EV_MSC, dev->evbit) || !test_bit(MSC_RAW, dev->mscbit))
return false;
return dev->id.bustype == BUS_I8042 &&
dev->id.vendor == 0x0001 && dev->id.product == 0x0001;
}
static const unsigned short x86_keycodes[256] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84,118, 86, 87, 88,115,120,119,121,112,123, 92,
284,285,309, 0,312, 91,327,328,329,331,333,335,336,337,338,339,
367,288,302,304,350, 89,334,326,267,126,268,269,125,347,348,349,
360,261,262,263,268,376,100,101,321,316,373,286,289,102,351,355,
103,104,105,275,287,279,258,106,274,107,294,364,358,363,362,361,
291,108,381,281,290,272,292,305,280, 99,112,257,306,359,113,114,
264,117,271,374,379,265,266, 93, 94, 95, 85,259,375,260, 90,116,
377,109,111,277,278,282,283,295,296,297,299,300,301,293,303,307,
308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330,
332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
#ifdef CONFIG_SPARC
static int sparc_l1_a_state;
extern void sun_do_break(void);
#endif
static int emulate_raw(struct vc_data *vc, unsigned int keycode,
unsigned char up_flag)
{
int code;
switch (keycode) {
case KEY_PAUSE:
put_queue(vc, 0xe1);
put_queue(vc, 0x1d | up_flag);
put_queue(vc, 0x45 | up_flag);
break;
case KEY_HANGEUL:
if (!up_flag)
put_queue(vc, 0xf2);
break;
case KEY_HANJA:
if (!up_flag)
put_queue(vc, 0xf1);
break;
case KEY_SYSRQ:
/*
* Real AT keyboards (that's what we're trying
* to emulate here) emit 0xe0 0x2a 0xe0 0x37 when
* pressing PrtSc/SysRq alone, but simply 0x54
* when pressing Alt+PrtSc/SysRq.
*/
if (test_bit(KEY_LEFTALT, key_down) ||
test_bit(KEY_RIGHTALT, key_down)) {
put_queue(vc, 0x54 | up_flag);
} else {
put_queue(vc, 0xe0);
put_queue(vc, 0x2a | up_flag);
put_queue(vc, 0xe0);
put_queue(vc, 0x37 | up_flag);
}
break;
default:
if (keycode > 255)
return -1;
code = x86_keycodes[keycode];
if (!code)
return -1;
if (code & 0x100)
put_queue(vc, 0xe0);
put_queue(vc, (code & 0x7f) | up_flag);
break;
}
return 0;
}
#else
static inline bool kbd_is_hw_raw(const struct input_dev *dev)
{
return false;
}
static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char up_flag)
{
if (keycode > 127)
return -1;
put_queue(vc, keycode | up_flag);
return 0;
}
#endif
static void kbd_rawcode(unsigned char data)
{
struct vc_data *vc = vc_cons[fg_console].d;
kbd = &kbd_table[vc->vc_num];
if (kbd->kbdmode == VC_RAW)
put_queue(vc, data);
}
static void kbd_keycode(unsigned int keycode, int down, bool hw_raw)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned short keysym, *key_map;
unsigned char type;
bool raw_mode;
struct tty_struct *tty;
int shift_final;
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
int rc;
tty = vc->port.tty;
if (tty && (!tty->driver_data)) {
/* No driver data? Strange. Okay we fix it then. */
tty->driver_data = vc;
}
kbd = &kbd_table[vc->vc_num];
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
#endif
rep = (down == 2);
raw_mode = (kbd->kbdmode == VC_RAW);
if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
pr_warn("can't emulate rawmode for keycode %d\n",
keycode);
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
sparc_l1_a_state = false;
sun_do_break();
}
#endif
if (kbd->kbdmode == VC_MEDIUMRAW) {
/*
* This is extended medium raw mode, with keys above 127
* encoded as 0, high 7 bits, low 7 bits, with the 0 bearing
* the 'up' flag if needed. 0 is reserved, so this shouldn't
* interfere with anything else. The two bytes after 0 will
* always have the up flag set not to interfere with older
* applications. This allows for 16384 different keycodes,
* which should be enough.
*/
if (keycode < 128) {
put_queue(vc, keycode | (!down << 7));
} else {
put_queue(vc, !down << 7);
put_queue(vc, (keycode >> 7) | BIT(7));
put_queue(vc, keycode | BIT(7));
}
raw_mode = true;
}
assign_bit(keycode, key_down, down);
if (rep &&
(!vc_kbd_mode(kbd, VC_REPEAT) ||
(tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
/*
* Don't repeat a key if the input buffers are not empty and the
* characters get aren't echoed locally. This makes key repeat
* usable with slow applications and under heavy loads.
*/
return;
}
param.shift = shift_final = (shift_state | kbd->slockstate) ^ kbd->lockstate;
param.ledstate = kbd->ledflagstate;
key_map = key_maps[shift_final];
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_KEYCODE, ¶m);
if (rc == NOTIFY_STOP || !key_map) {
atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_UNBOUND_KEYCODE, ¶m);
do_compute_shiftstate();
kbd->slockstate = 0;
return;
}
if (keycode < NR_KEYS)
keysym = key_map[keycode];
else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
else
return;
type = KTYP(keysym);
if (type < 0xf0) {
param.value = keysym;
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_UNICODE, ¶m);
if (rc != NOTIFY_STOP)
if (down && !raw_mode)
k_unicode(vc, keysym, !down);
return;
}
type -= 0xf0;
if (type == KT_LETTER) {
type = KT_LATIN;
if (vc_kbd_led(kbd, VC_CAPSLOCK)) {
key_map = key_maps[shift_final ^ BIT(KG_SHIFT)];
if (key_map)
keysym = key_map[keycode];
}
}
param.value = keysym;
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_KEYSYM, ¶m);
if (rc == NOTIFY_STOP)
return;
if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT)
return;
(*k_handler[type])(vc, keysym & 0xff, !down);
param.ledstate = kbd->ledflagstate;
atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, ¶m);
if (type != KT_SLOCK)
kbd->slockstate = 0;
}
static void kbd_event(struct input_handle *handle, unsigned int event_type,
unsigned int event_code, int value)
{
/* We are called with interrupts disabled, just take the lock */
spin_lock(&kbd_event_lock);
if (event_type == EV_MSC && event_code == MSC_RAW &&
kbd_is_hw_raw(handle->dev))
kbd_rawcode(value);
if (event_type == EV_KEY && event_code <= KEY_MAX)
kbd_keycode(event_code, value, kbd_is_hw_raw(handle->dev));
spin_unlock(&kbd_event_lock);
tasklet_schedule(&keyboard_tasklet);
do_poke_blanked_console = 1;
schedule_console_callback();
}
static bool kbd_match(struct input_handler *handler, struct input_dev *dev)
{
if (test_bit(EV_SND, dev->evbit))
return true;
if (test_bit(EV_KEY, dev->evbit)) {
if (find_next_bit(dev->keybit, BTN_MISC, KEY_RESERVED) <
BTN_MISC)
return true;
if (find_next_bit(dev->keybit, KEY_BRL_DOT10 + 1,
KEY_BRL_DOT1) <= KEY_BRL_DOT10)
return true;
}
return false;
}
/*
* When a keyboard (or other input device) is found, the kbd_connect
* function is called. The function then looks at the device, and if it
* likes it, it can open it and get events from it. In this (kbd_connect)
* function, we should decide which VT to bind that keyboard to initially.
*/
static int kbd_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "kbd";
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void kbd_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
/*
* Start keyboard handler on the new keyboard by refreshing LED state to
* match the rest of the system.
*/
static void kbd_start(struct input_handle *handle)
{
tasklet_disable(&keyboard_tasklet);
if (ledstate != -1U)
kbd_update_leds_helper(handle, &ledstate);
tasklet_enable(&keyboard_tasklet);
}
static const struct input_device_id kbd_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_KEY) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_SND) },
},
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, kbd_ids);
static struct input_handler kbd_handler = {
.event = kbd_event,
.match = kbd_match,
.connect = kbd_connect,
.disconnect = kbd_disconnect,
.start = kbd_start,
.name = "kbd",
.id_table = kbd_ids,
};
int __init kbd_init(void)
{
int i;
int error;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
kbd_table[i].ledflagstate = kbd_defleds();
kbd_table[i].default_ledflagstate = kbd_defleds();
kbd_table[i].ledmode = LED_SHOW_FLAGS;
kbd_table[i].lockstate = KBD_DEFLOCK;
kbd_table[i].slockstate = 0;
kbd_table[i].modeflags = KBD_DEFMODE;
kbd_table[i].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
}
kbd_init_leds();
error = input_register_handler(&kbd_handler);
if (error)
return error;
tasklet_enable(&keyboard_tasklet);
tasklet_schedule(&keyboard_tasklet);
return 0;
}
/* Ioctl support code */
/**
* vt_do_diacrit - diacritical table updates
* @cmd: ioctl request
* @udp: pointer to user data for ioctl
* @perm: permissions check computed by caller
*
* Update the diacritical tables atomically and safely. Lock them
* against simultaneous keypresses
*/
int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
{
unsigned long flags;
int asize;
int ret = 0;
switch (cmd) {
case KDGKBDIACR:
{
struct kbdiacrs __user *a = udp;
struct kbdiacr *dia;
int i;
dia = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacr),
GFP_KERNEL);
if (!dia)
return -ENOMEM;
/* Lock the diacriticals table, make a copy and then
copy it after we unlock */
spin_lock_irqsave(&kbd_event_lock, flags);
asize = accent_table_size;
for (i = 0; i < asize; i++) {
dia[i].diacr = conv_uni_to_8bit(
accent_table[i].diacr);
dia[i].base = conv_uni_to_8bit(
accent_table[i].base);
dia[i].result = conv_uni_to_8bit(
accent_table[i].result);
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
if (put_user(asize, &a->kb_cnt))
ret = -EFAULT;
else if (copy_to_user(a->kbdiacr, dia,
asize * sizeof(struct kbdiacr)))
ret = -EFAULT;
kfree(dia);
return ret;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = udp;
void *buf;
buf = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacruc),
GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
/* Lock the diacriticals table, make a copy and then
copy it after we unlock */
spin_lock_irqsave(&kbd_event_lock, flags);
asize = accent_table_size;
memcpy(buf, accent_table, asize * sizeof(struct kbdiacruc));
spin_unlock_irqrestore(&kbd_event_lock, flags);
if (put_user(asize, &a->kb_cnt))
ret = -EFAULT;
else if (copy_to_user(a->kbdiacruc, buf,
asize*sizeof(struct kbdiacruc)))
ret = -EFAULT;
kfree(buf);
return ret;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = udp;
struct kbdiacr *dia = NULL;
unsigned int ct;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
if (ct) {
dia = memdup_user(a->kbdiacr,
sizeof(struct kbdiacr) * ct);
if (IS_ERR(dia))
return PTR_ERR(dia);
}
spin_lock_irqsave(&kbd_event_lock, flags);
accent_table_size = ct;
for (i = 0; i < ct; i++) {
accent_table[i].diacr =
conv_8bit_to_uni(dia[i].diacr);
accent_table[i].base =
conv_8bit_to_uni(dia[i].base);
accent_table[i].result =
conv_8bit_to_uni(dia[i].result);
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(dia);
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = udp;
unsigned int ct;
void *buf = NULL;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
if (ct) {
buf = memdup_user(a->kbdiacruc,
ct * sizeof(struct kbdiacruc));
if (IS_ERR(buf))
return PTR_ERR(buf);
}
spin_lock_irqsave(&kbd_event_lock, flags);
if (ct)
memcpy(accent_table, buf,
ct * sizeof(struct kbdiacruc));
accent_table_size = ct;
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(buf);
return 0;
}
}
return ret;
}
/**
* vt_do_kdskbmode - set keyboard mode ioctl
* @console: the console to use
* @arg: the requested mode
*
* Update the keyboard mode bits while holding the correct locks.
* Return 0 for success or an error code.
*/
int vt_do_kdskbmode(unsigned int console, unsigned int arg)
{
struct kbd_struct *kb = &kbd_table[console];
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
switch(arg) {
case K_RAW:
kb->kbdmode = VC_RAW;
break;
case K_MEDIUMRAW:
kb->kbdmode = VC_MEDIUMRAW;
break;
case K_XLATE:
kb->kbdmode = VC_XLATE;
do_compute_shiftstate();
break;
case K_UNICODE:
kb->kbdmode = VC_UNICODE;
do_compute_shiftstate();
break;
case K_OFF:
kb->kbdmode = VC_OFF;
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
return ret;
}
/**
* vt_do_kdskbmeta - set keyboard meta state
* @console: the console to use
* @arg: the requested meta state
*
* Update the keyboard meta bits while holding the correct locks.
* Return 0 for success or an error code.
*/
int vt_do_kdskbmeta(unsigned int console, unsigned int arg)
{
struct kbd_struct *kb = &kbd_table[console];
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
switch(arg) {
case K_METABIT:
clr_vc_kbd_mode(kb, VC_META);
break;
case K_ESCPREFIX:
set_vc_kbd_mode(kb, VC_META);
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
return ret;
}
int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
int perm)
{
struct kbkeycode tmp;
int kc = 0;
if (copy_from_user(&tmp, user_kbkc, sizeof(struct kbkeycode)))
return -EFAULT;
switch (cmd) {
case KDGETKEYCODE:
kc = getkeycode(tmp.scancode);
if (kc >= 0)
kc = put_user(kc, &user_kbkc->keycode);
break;
case KDSETKEYCODE:
if (!perm)
return -EPERM;
kc = setkeycode(tmp.scancode, tmp.keycode);
break;
}
return kc;
}
static unsigned short vt_kdgkbent(unsigned char kbdmode, unsigned char idx,
unsigned char map)
{
unsigned short *key_map, val;
unsigned long flags;
/* Ensure another thread doesn't free it under us */
spin_lock_irqsave(&kbd_event_lock, flags);
key_map = key_maps[map];
if (key_map) {
val = U(key_map[idx]);
if (kbdmode != VC_UNICODE && KTYP(val) >= NR_TYPES)
val = K_HOLE;
} else
val = idx ? K_HOLE : K_NOSUCHMAP;
spin_unlock_irqrestore(&kbd_event_lock, flags);
return val;
}
static int vt_kdskbent(unsigned char kbdmode, unsigned char idx,
unsigned char map, unsigned short val)
{
unsigned long flags;
unsigned short *key_map, *new_map, oldval;
if (!idx && val == K_NOSUCHMAP) {
spin_lock_irqsave(&kbd_event_lock, flags);
/* deallocate map */
key_map = key_maps[map];
if (map && key_map) {
key_maps[map] = NULL;
if (key_map[0] == U(K_ALLOCATED)) {
kfree(key_map);
keymap_count--;
}
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
return 0;
}
if (KTYP(val) < NR_TYPES) {
if (KVAL(val) > max_vals[KTYP(val)])
return -EINVAL;
} else if (kbdmode != VC_UNICODE)
return -EINVAL;
/* ++Geert: non-PC keyboards may generate keycode zero */
#if !defined(__mc68000__) && !defined(__powerpc__)
/* assignment to entry 0 only tests validity of args */
if (!idx)
return 0;
#endif
new_map = kmalloc(sizeof(plain_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
spin_lock_irqsave(&kbd_event_lock, flags);
key_map = key_maps[map];
if (key_map == NULL) {
int j;
if (keymap_count >= MAX_NR_OF_USER_KEYMAPS &&
!capable(CAP_SYS_RESOURCE)) {
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(new_map);
return -EPERM;
}
key_maps[map] = new_map;
key_map = new_map;
key_map[0] = U(K_ALLOCATED);
for (j = 1; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
keymap_count++;
} else
kfree(new_map);
oldval = U(key_map[idx]);
if (val == oldval)
goto out;
/* Attention Key */
if ((oldval == K_SAK || val == K_SAK) && !capable(CAP_SYS_ADMIN)) {
spin_unlock_irqrestore(&kbd_event_lock, flags);
return -EPERM;
}
key_map[idx] = U(val);
if (!map && (KTYP(oldval) == KT_SHIFT || KTYP(val) == KT_SHIFT))
do_compute_shiftstate();
out:
spin_unlock_irqrestore(&kbd_event_lock, flags);
return 0;
}
int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
struct kbentry kbe;
if (copy_from_user(&kbe, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
switch (cmd) {
case KDGKBENT:
return put_user(vt_kdgkbent(kb->kbdmode, kbe.kb_index,
kbe.kb_table),
&user_kbe->kb_value);
case KDSKBENT:
if (!perm || !capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
return vt_kdskbent(kb->kbdmode, kbe.kb_index, kbe.kb_table,
kbe.kb_value);
}
return 0;
}
static char *vt_kdskbsent(char *kbs, unsigned char cur)
{
static DECLARE_BITMAP(is_kmalloc, MAX_NR_FUNC);
char *cur_f = func_table[cur];
if (cur_f && strlen(cur_f) >= strlen(kbs)) {
strcpy(cur_f, kbs);
return kbs;
}
func_table[cur] = kbs;
return __test_and_set_bit(cur, is_kmalloc) ? cur_f : NULL;
}
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
{
unsigned char kb_func;
unsigned long flags;
char *kbs;
int ret;
if (get_user(kb_func, &user_kdgkb->kb_func))
return -EFAULT;
kb_func = array_index_nospec(kb_func, MAX_NR_FUNC);
switch (cmd) {
case KDGKBSENT: {
/* size should have been a struct member */
ssize_t len = sizeof(user_kdgkb->kb_string);
kbs = kmalloc(len, GFP_KERNEL);
if (!kbs)
return -ENOMEM;
spin_lock_irqsave(&func_buf_lock, flags);
len = strlcpy(kbs, func_table[kb_func] ? : "", len);
spin_unlock_irqrestore(&func_buf_lock, flags);
ret = copy_to_user(user_kdgkb->kb_string, kbs, len + 1) ?
-EFAULT : 0;
break;
}
case KDSKBSENT:
if (!perm || !capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
kbs = strndup_user(user_kdgkb->kb_string,
sizeof(user_kdgkb->kb_string));
if (IS_ERR(kbs))
return PTR_ERR(kbs);
spin_lock_irqsave(&func_buf_lock, flags);
kbs = vt_kdskbsent(kbs, kb_func);
spin_unlock_irqrestore(&func_buf_lock, flags);
ret = 0;
break;
}
kfree(kbs);
return ret;
}
int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
unsigned char ucval;
switch(cmd) {
/* the ioctls below read/set the flags usually shown in the leds */
/* don't use them - they will go away without warning */
case KDGKBLED:
spin_lock_irqsave(&kbd_event_lock, flags);
ucval = kb->ledflagstate | (kb->default_ledflagstate << 4);
spin_unlock_irqrestore(&kbd_event_lock, flags);
return put_user(ucval, (char __user *)arg);
case KDSKBLED:
if (!perm)
return -EPERM;
if (arg & ~0x77)
return -EINVAL;
spin_lock_irqsave(&led_lock, flags);
kb->ledflagstate = (arg & 7);
kb->default_ledflagstate = ((arg >> 4) & 7);
set_leds();
spin_unlock_irqrestore(&led_lock, flags);
return 0;
/* the ioctls below only set the lights, not the functions */
/* for those, see KDGKBLED and KDSKBLED above */
case KDGETLED:
ucval = getledstate();
return put_user(ucval, (char __user *)arg);
case KDSETLED:
if (!perm)
return -EPERM;
setledstate(kb, arg);
return 0;
}
return -ENOIOCTLCMD;
}
int vt_do_kdgkbmode(unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
/* This is a spot read so needs no locking */
switch (kb->kbdmode) {
case VC_RAW:
return K_RAW;
case VC_MEDIUMRAW:
return K_MEDIUMRAW;
case VC_UNICODE:
return K_UNICODE;
case VC_OFF:
return K_OFF;
default:
return K_XLATE;
}
}
/**
* vt_do_kdgkbmeta - report meta status
* @console: console to report
*
* Report the meta flag status of this console
*/
int vt_do_kdgkbmeta(unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
/* Again a spot read so no locking */
return vc_kbd_mode(kb, VC_META) ? K_ESCPREFIX : K_METABIT;
}
/**
* vt_reset_unicode - reset the unicode status
* @console: console being reset
*
* Restore the unicode console state to its default
*/
void vt_reset_unicode(unsigned int console)
{
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
kbd_table[console].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_get_shift_state - shift bit state
*
* Report the shift bits from the keyboard state. We have to export
* this to support some oddities in the vt layer.
*/
int vt_get_shift_state(void)
{
/* Don't lock as this is a transient report */
return shift_state;
}
/**
* vt_reset_keyboard - reset keyboard state
* @console: console to reset
*
* Reset the keyboard bits for a console as part of a general console
* reset event
*/
void vt_reset_keyboard(unsigned int console)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
set_vc_kbd_mode(kb, VC_REPEAT);
clr_vc_kbd_mode(kb, VC_CKMODE);
clr_vc_kbd_mode(kb, VC_APPLIC);
clr_vc_kbd_mode(kb, VC_CRLF);
kb->lockstate = 0;
kb->slockstate = 0;
spin_lock(&led_lock);
kb->ledmode = LED_SHOW_FLAGS;
kb->ledflagstate = kb->default_ledflagstate;
spin_unlock(&led_lock);
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_get_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Report back a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
int vt_get_kbd_mode_bit(unsigned int console, int bit)
{
struct kbd_struct *kb = &kbd_table[console];
return vc_kbd_mode(kb, bit);
}
/**
* vt_set_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Set a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
void vt_set_kbd_mode_bit(unsigned int console, int bit)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
set_vc_kbd_mode(kb, bit);
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_clr_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Report back a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
void vt_clr_kbd_mode_bit(unsigned int console, int bit)
{
struct kbd_struct *kb = &kbd_table[console];
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
clr_vc_kbd_mode(kb, bit);
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
| linux-master | drivers/tty/vt/keyboard.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This module exports the functions:
*
* 'int set_selection_user(struct tiocl_selection __user *,
* struct tty_struct *)'
* 'int set_selection_kernel(struct tiocl_selection *, struct tty_struct *)'
* 'void clear_selection(void)'
* 'int paste_selection(struct tty_struct *)'
* 'int sel_loadlut(char __user *)'
*
* Now that /dev/vcs exists, most of this can disappear again.
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/consolemap.h>
#include <linux/selection.h>
#include <linux/tiocl.h>
#include <linux/console.h>
#include <linux/tty_flip.h>
#include <linux/sched/signal.h>
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define is_space_on_vt(c) ((c) == ' ')
/* FIXME: all this needs locking */
static struct vc_selection {
struct mutex lock;
struct vc_data *cons; /* must not be deallocated */
char *buffer;
unsigned int buf_len;
volatile int start; /* cleared by clear_selection */
int end;
} vc_sel = {
.lock = __MUTEX_INITIALIZER(vc_sel.lock),
.start = -1,
};
/* clear_selection, highlight and highlight_pointer can be called
from interrupt (via scrollback/front) */
/* set reverse video on characters s-e of console with selection. */
static inline void highlight(const int s, const int e)
{
invert_screen(vc_sel.cons, s, e-s+2, true);
}
/* use complementary color to show the pointer */
static inline void highlight_pointer(const int where)
{
complement_pos(vc_sel.cons, where);
}
static u32
sel_pos(int n, bool unicode)
{
if (unicode)
return screen_glyph_unicode(vc_sel.cons, n / 2);
return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n),
false);
}
/**
* clear_selection - remove current selection
*
* Remove the current selection highlight, if any from the console
* holding the selection. The caller must hold the console lock.
*/
void clear_selection(void)
{
highlight_pointer(-1); /* hide the pointer */
if (vc_sel.start != -1) {
highlight(vc_sel.start, vc_sel.end);
vc_sel.start = -1;
}
}
EXPORT_SYMBOL_GPL(clear_selection);
bool vc_is_sel(struct vc_data *vc)
{
return vc == vc_sel.cons;
}
/*
* User settable table: what characters are to be considered alphabetic?
* 128 bits. Locked by the console lock.
*/
static u32 inwordLut[]={
0x00000000, /* control chars */
0x03FFE000, /* digits and "-./" */
0x87FFFFFE, /* uppercase and '_' */
0x07FFFFFE, /* lowercase */
};
static inline int inword(const u32 c)
{
return c > 0x7f || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
}
/**
* sel_loadlut() - load the LUT table
* @p: user table
*
* Load the LUT table from user space. The caller must hold the console
* lock. Make a temporary copy so a partial update doesn't make a mess.
*/
int sel_loadlut(char __user *p)
{
u32 tmplut[ARRAY_SIZE(inwordLut)];
if (copy_from_user(tmplut, (u32 __user *)(p+4), sizeof(inwordLut)))
return -EFAULT;
memcpy(inwordLut, tmplut, sizeof(inwordLut));
return 0;
}
/* does screen address p correspond to character at LH/RH edge of screen? */
static inline int atedge(const int p, int size_row)
{
return (!(p % size_row) || !((p + 2) % size_row));
}
/* stores the char in UTF8 and returns the number of bytes used (1-4) */
static int store_utf8(u32 c, char *p)
{
if (c < 0x80) {
/* 0******* */
p[0] = c;
return 1;
} else if (c < 0x800) {
/* 110***** 10****** */
p[0] = 0xc0 | (c >> 6);
p[1] = 0x80 | (c & 0x3f);
return 2;
} else if (c < 0x10000) {
/* 1110**** 10****** 10****** */
p[0] = 0xe0 | (c >> 12);
p[1] = 0x80 | ((c >> 6) & 0x3f);
p[2] = 0x80 | (c & 0x3f);
return 3;
} else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
p[0] = 0xf0 | (c >> 18);
p[1] = 0x80 | ((c >> 12) & 0x3f);
p[2] = 0x80 | ((c >> 6) & 0x3f);
p[3] = 0x80 | (c & 0x3f);
return 4;
} else {
/* outside Unicode, replace with U+FFFD */
p[0] = 0xef;
p[1] = 0xbf;
p[2] = 0xbd;
return 3;
}
}
/**
* set_selection_user - set the current selection.
* @sel: user selection info
* @tty: the console tty
*
* Invoked by the ioctl handle for the vt layer.
*
* The entire selection process is managed under the console_lock. It's
* a lot under the lock but its hardly a performance path
*/
int set_selection_user(const struct tiocl_selection __user *sel,
struct tty_struct *tty)
{
struct tiocl_selection v;
if (copy_from_user(&v, sel, sizeof(*sel)))
return -EFAULT;
return set_selection_kernel(&v, tty);
}
static int vc_selection_store_chars(struct vc_data *vc, bool unicode)
{
char *bp, *obp;
unsigned int i;
/* Allocate a new buffer before freeing the old one ... */
/* chars can take up to 4 bytes with unicode */
bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
GFP_KERNEL | __GFP_NOWARN);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
return -ENOMEM;
}
kfree(vc_sel.buffer);
vc_sel.buffer = bp;
obp = bp;
for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
u32 c = sel_pos(i, unicode);
if (unicode)
bp += store_utf8(c, bp);
else
*bp++ = c;
if (!is_space_on_vt(c))
obp = bp;
if (!((i + 2) % vc->vc_size_row)) {
/* strip trailing blanks from line and add newline,
unless non-space at end of line. */
if (obp != bp) {
bp = obp;
*bp++ = '\r';
}
obp = bp;
}
}
vc_sel.buf_len = bp - vc_sel.buffer;
return 0;
}
static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
int pe)
{
int new_sel_start, new_sel_end, spc;
bool unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
switch (mode) {
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
new_sel_end = pe;
break;
case TIOCL_SELWORD: /* word-by-word selection */
spc = is_space_on_vt(sel_pos(ps, unicode));
for (new_sel_start = ps; ; ps -= 2) {
if ((spc && !is_space_on_vt(sel_pos(ps, unicode))) ||
(!spc && !inword(sel_pos(ps, unicode))))
break;
new_sel_start = ps;
if (!(ps % vc->vc_size_row))
break;
}
spc = is_space_on_vt(sel_pos(pe, unicode));
for (new_sel_end = pe; ; pe += 2) {
if ((spc && !is_space_on_vt(sel_pos(pe, unicode))) ||
(!spc && !inword(sel_pos(pe, unicode))))
break;
new_sel_end = pe;
if (!((pe + 2) % vc->vc_size_row))
break;
}
break;
case TIOCL_SELLINE: /* line-by-line selection */
new_sel_start = rounddown(ps, vc->vc_size_row);
new_sel_end = rounddown(pe, vc->vc_size_row) +
vc->vc_size_row - 2;
break;
case TIOCL_SELPOINTER:
highlight_pointer(pe);
return 0;
default:
return -EINVAL;
}
/* remove the pointer */
highlight_pointer(-1);
/* select to end of line if on trailing space */
if (new_sel_end > new_sel_start &&
!atedge(new_sel_end, vc->vc_size_row) &&
is_space_on_vt(sel_pos(new_sel_end, unicode))) {
for (pe = new_sel_end + 2; ; pe += 2)
if (!is_space_on_vt(sel_pos(pe, unicode)) ||
atedge(pe, vc->vc_size_row))
break;
if (is_space_on_vt(sel_pos(pe, unicode)))
new_sel_end = pe;
}
if (vc_sel.start == -1) /* no current selection */
highlight(new_sel_start, new_sel_end);
else if (new_sel_start == vc_sel.start)
{
if (new_sel_end == vc_sel.end) /* no action required */
return 0;
else if (new_sel_end > vc_sel.end) /* extend to right */
highlight(vc_sel.end + 2, new_sel_end);
else /* contract from right */
highlight(new_sel_end + 2, vc_sel.end);
}
else if (new_sel_end == vc_sel.end)
{
if (new_sel_start < vc_sel.start) /* extend to left */
highlight(new_sel_start, vc_sel.start - 2);
else /* contract from left */
highlight(vc_sel.start, new_sel_start - 2);
}
else /* some other case; start selection from scratch */
{
clear_selection();
highlight(new_sel_start, new_sel_end);
}
vc_sel.start = new_sel_start;
vc_sel.end = new_sel_end;
return vc_selection_store_chars(vc, unicode);
}
static int vc_selection(struct vc_data *vc, struct tiocl_selection *v,
struct tty_struct *tty)
{
int ps, pe;
poke_blanked_console();
if (v->sel_mode == TIOCL_SELCLEAR) {
/* useful for screendump without selection highlights */
clear_selection();
return 0;
}
v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
v->ys);
return 0;
}
ps = v->ys * vc->vc_size_row + (v->xs << 1);
pe = v->ye * vc->vc_size_row + (v->xe << 1);
if (ps > pe) /* make vc_sel.start <= vc_sel.end */
swap(ps, pe);
if (vc_sel.cons != vc) {
clear_selection();
vc_sel.cons = vc;
}
return vc_do_selection(vc, v->sel_mode, ps, pe);
}
int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
{
int ret;
mutex_lock(&vc_sel.lock);
console_lock();
ret = vc_selection(vc_cons[fg_console].d, v, tty);
console_unlock();
mutex_unlock(&vc_sel.lock);
return ret;
}
EXPORT_SYMBOL_GPL(set_selection_kernel);
/* Insert the contents of the selection buffer into the
* queue of the tty associated with the current console.
* Invoked by ioctl().
*
* Locking: called without locks. Calls the ldisc wrongly with
* unsafe methods,
*/
int paste_selection(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
int pasted = 0;
size_t count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
console_lock();
poke_blanked_console();
console_unlock();
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return -EIO; /* ldisc was hung up */
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
mutex_lock(&vc_sel.lock);
while (vc_sel.buffer && vc_sel.buf_len > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (tty_throttled(tty)) {
mutex_unlock(&vc_sel.lock);
schedule();
mutex_lock(&vc_sel.lock);
continue;
}
__set_current_state(TASK_RUNNING);
count = vc_sel.buf_len - pasted;
count = tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted, NULL,
count);
pasted += count;
}
mutex_unlock(&vc_sel.lock);
remove_wait_queue(&vc->paste_wait, &wait);
__set_current_state(TASK_RUNNING);
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
return ret;
}
EXPORT_SYMBOL_GPL(paste_selection);
| linux-master | drivers/tty/vt/selection.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Provide access to virtual console memory.
* /dev/vcs: the screen as it is being viewed right now (possibly scrolled)
* /dev/vcsN: the screen of /dev/ttyN (1 <= N <= 63)
* [minor: N]
*
* /dev/vcsaN: idem, but including attributes, and prefixed with
* the 4 bytes lines,columns,x,y (as screendump used to give).
* Attribute/character pair is in native endianity.
* [minor: N+128]
*
* /dev/vcsuN: similar to /dev/vcsaN but using 4-byte unicode values
* instead of 1-byte screen glyph values.
* [minor: N+64]
*
* /dev/vcsuaN: same idea as /dev/vcsaN for unicode (not yet implemented).
*
* This replaces screendump and part of selection, so that the system
* administrator can control access using file system permissions.
*
* [email protected] - efter Friedas begravelse - 950211
*
* [email protected] - modified not to send characters to wrong console
* - fixed some fatal off-by-one bugs (0-- no longer == -1 -> looping and looping and looping...)
* - making it shorter - scr_readw are macros which expand in PRETTY long code
*/
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/tty.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/vt_kern.h>
#include <linux/selection.h>
#include <linux/kbd_kern.h>
#include <linux/console.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#define HEADER_SIZE 4u
#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
/*
* Our minor space:
*
* 0 ... 63 glyph mode without attributes
* 64 ... 127 unicode mode without attributes
* 128 ... 191 glyph mode with attributes
* 192 ... 255 unused (reserved for unicode with attributes)
*
* This relies on MAX_NR_CONSOLES being <= 63, meaning 63 actual consoles
* with minors 0, 64, 128 and 192 being proxies for the foreground console.
*/
#if MAX_NR_CONSOLES > 63
#warning "/dev/vcs* devices may not accommodate more than 63 consoles"
#endif
#define console(inode) (iminor(inode) & 63)
#define use_unicode(inode) (iminor(inode) & 64)
#define use_attributes(inode) (iminor(inode) & 128)
struct vcs_poll_data {
struct notifier_block notifier;
unsigned int cons_num;
int event;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
};
static int
vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
struct vcs_poll_data *poll =
container_of(nb, struct vcs_poll_data, notifier);
int currcons = poll->cons_num;
int fa_band;
switch (code) {
case VT_UPDATE:
fa_band = POLL_PRI;
break;
case VT_DEALLOCATE:
fa_band = POLL_HUP;
break;
default:
return NOTIFY_DONE;
}
if (currcons == 0)
currcons = fg_console;
else
currcons--;
if (currcons != vc->vc_num)
return NOTIFY_DONE;
poll->event = code;
wake_up_interruptible(&poll->waitq);
kill_fasync(&poll->fasync, SIGIO, fa_band);
return NOTIFY_OK;
}
static void
vcs_poll_data_free(struct vcs_poll_data *poll)
{
unregister_vt_notifier(&poll->notifier);
kfree(poll);
}
static struct vcs_poll_data *
vcs_poll_data_get(struct file *file)
{
struct vcs_poll_data *poll = file->private_data, *kill = NULL;
if (poll)
return poll;
poll = kzalloc(sizeof(*poll), GFP_KERNEL);
if (!poll)
return NULL;
poll->cons_num = console(file_inode(file));
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
/*
* In order not to lose any update event, we must pretend one might
* have occurred before we have a chance to register our notifier.
* This is also how user space has come to detect which kernels
* support POLLPRI on /dev/vcs* devices i.e. using poll() with
* POLLPRI and a zero timeout.
*/
poll->event = VT_UPDATE;
if (register_vt_notifier(&poll->notifier) != 0) {
kfree(poll);
return NULL;
}
/*
* This code may be called either through ->poll() or ->fasync().
* If we have two threads using the same file descriptor, they could
* both enter this function, both notice that the structure hasn't
* been allocated yet and go ahead allocating it in parallel, but
* only one of them must survive and be shared otherwise we'd leak
* memory with a dangling notifier callback.
*/
spin_lock(&file->f_lock);
if (!file->private_data) {
file->private_data = poll;
} else {
/* someone else raced ahead of us */
kill = poll;
poll = file->private_data;
}
spin_unlock(&file->f_lock);
if (kill)
vcs_poll_data_free(kill);
return poll;
}
/**
* vcs_vc -- return VC for @inode
* @inode: inode for which to return a VC
* @viewed: returns whether this console is currently foreground (viewed)
*
* Must be called with console_lock.
*/
static struct vc_data *vcs_vc(struct inode *inode, bool *viewed)
{
unsigned int currcons = console(inode);
WARN_CONSOLE_UNLOCKED();
if (currcons == 0) {
currcons = fg_console;
if (viewed)
*viewed = true;
} else {
currcons--;
if (viewed)
*viewed = false;
}
return vc_cons[currcons].d;
}
/**
* vcs_size -- return size for a VC in @vc
* @vc: which VC
* @attr: does it use attributes?
* @unicode: is it unicode?
*
* Must be called with console_lock.
*/
static int vcs_size(const struct vc_data *vc, bool attr, bool unicode)
{
int size;
WARN_CONSOLE_UNLOCKED();
size = vc->vc_rows * vc->vc_cols;
if (attr) {
if (unicode)
return -EOPNOTSUPP;
size = 2 * size + HEADER_SIZE;
} else if (unicode)
size *= 4;
return size;
}
static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
int size;
console_lock();
vc = vcs_vc(inode, NULL);
if (!vc) {
console_unlock();
return -ENXIO;
}
size = vcs_size(vc, use_attributes(inode), use_unicode(inode));
console_unlock();
if (size < 0)
return size;
return fixed_size_llseek(file, offset, orig, size);
}
static int vcs_read_buf_uni(struct vc_data *vc, char *con_buf,
unsigned int pos, unsigned int count, bool viewed)
{
unsigned int nr, row, col, maxcol = vc->vc_cols;
int ret;
ret = vc_uniscr_check(vc);
if (ret)
return ret;
pos /= 4;
row = pos / maxcol;
col = pos % maxcol;
nr = maxcol - col;
do {
if (nr > count / 4)
nr = count / 4;
vc_uniscr_copy_line(vc, con_buf, viewed, row, col, nr);
con_buf += nr * 4;
count -= nr * 4;
row++;
col = 0;
nr = maxcol;
} while (count);
return 0;
}
static void vcs_read_buf_noattr(const struct vc_data *vc, char *con_buf,
unsigned int pos, unsigned int count, bool viewed)
{
u16 *org;
unsigned int col, maxcol = vc->vc_cols;
org = screen_pos(vc, pos, viewed);
col = pos % maxcol;
pos += maxcol - col;
while (count-- > 0) {
*con_buf++ = (vcs_scr_readw(vc, org++) & 0xff);
if (++col == maxcol) {
org = screen_pos(vc, pos, viewed);
col = 0;
pos += maxcol;
}
}
}
static unsigned int vcs_read_buf(const struct vc_data *vc, char *con_buf,
unsigned int pos, unsigned int count, bool viewed,
unsigned int *skip)
{
u16 *org, *con_buf16;
unsigned int col, maxcol = vc->vc_cols;
unsigned int filled = count;
if (pos < HEADER_SIZE) {
/* clamp header values if they don't fit */
con_buf[0] = min(vc->vc_rows, 0xFFu);
con_buf[1] = min(vc->vc_cols, 0xFFu);
getconsxy(vc, con_buf + 2);
*skip += pos;
count += pos;
if (count > CON_BUF_SIZE) {
count = CON_BUF_SIZE;
filled = count - pos;
}
/* Advance state pointers and move on. */
count -= min(HEADER_SIZE, count);
pos = HEADER_SIZE;
con_buf += HEADER_SIZE;
/* If count >= 0, then pos is even... */
} else if (pos & 1) {
/*
* Skip first byte for output if start address is odd. Update
* region sizes up/down depending on free space in buffer.
*/
(*skip)++;
if (count < CON_BUF_SIZE)
count++;
else
filled--;
}
if (!count)
return filled;
pos -= HEADER_SIZE;
pos /= 2;
col = pos % maxcol;
org = screen_pos(vc, pos, viewed);
pos += maxcol - col;
/*
* Buffer has even length, so we can always copy character + attribute.
* We do not copy last byte to userspace if count is odd.
*/
count = (count + 1) / 2;
con_buf16 = (u16 *)con_buf;
while (count) {
*con_buf16++ = vcs_scr_readw(vc, org++);
count--;
if (++col == maxcol) {
org = screen_pos(vc, pos, viewed);
col = 0;
pos += maxcol;
}
}
return filled;
}
static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
struct vcs_poll_data *poll;
unsigned int read;
ssize_t ret;
char *con_buf;
loff_t pos;
bool viewed, attr, uni_mode;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
pos = *ppos;
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
console_lock();
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
ret = -EINVAL;
if (pos < 0)
goto unlock_out;
/* we enforce 32-bit alignment for pos and count in unicode mode */
if (uni_mode && (pos | count) & 3)
goto unlock_out;
poll = file->private_data;
if (count && poll)
poll->event = 0;
read = 0;
ret = 0;
while (count) {
unsigned int this_round, skip = 0;
int size;
vc = vcs_vc(inode, &viewed);
if (!vc) {
ret = -ENXIO;
break;
}
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
*/
size = vcs_size(vc, attr, uni_mode);
if (size < 0) {
ret = size;
break;
}
if (pos >= size)
break;
if (count > size - pos)
count = size - pos;
this_round = count;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
/* Perform the whole read into the local con_buf.
* Then we can drop the console spinlock and safely
* attempt to move it to userspace.
*/
if (uni_mode) {
ret = vcs_read_buf_uni(vc, con_buf, pos, this_round,
viewed);
if (ret)
break;
} else if (!attr) {
vcs_read_buf_noattr(vc, con_buf, pos, this_round,
viewed);
} else {
this_round = vcs_read_buf(vc, con_buf, pos, this_round,
viewed, &skip);
}
/* Finally, release the console semaphore while we push
* all the data to userspace from our temporary buffer.
*
* AKPM: Even though it's a semaphore, we should drop it because
* the pagefault handling code may want to call printk().
*/
console_unlock();
ret = copy_to_user(buf, con_buf + skip, this_round);
console_lock();
if (ret) {
read += this_round - ret;
ret = -EFAULT;
break;
}
buf += this_round;
pos += this_round;
read += this_round;
count -= this_round;
}
*ppos += read;
if (read)
ret = read;
unlock_out:
console_unlock();
free_page((unsigned long) con_buf);
return ret;
}
static u16 *vcs_write_buf_noattr(struct vc_data *vc, const char *con_buf,
unsigned int pos, unsigned int count, bool viewed, u16 **org0)
{
u16 *org;
unsigned int col, maxcol = vc->vc_cols;
*org0 = org = screen_pos(vc, pos, viewed);
col = pos % maxcol;
pos += maxcol - col;
while (count > 0) {
unsigned char c = *con_buf++;
count--;
vcs_scr_writew(vc,
(vcs_scr_readw(vc, org) & 0xff00) | c, org);
org++;
if (++col == maxcol) {
org = screen_pos(vc, pos, viewed);
col = 0;
pos += maxcol;
}
}
return org;
}
/*
* Compilers (gcc 10) are unable to optimize the swap in cpu_to_le16. So do it
* the poor man way.
*/
static inline u16 vc_compile_le16(u8 hi, u8 lo)
{
#ifdef __BIG_ENDIAN
return (lo << 8u) | hi;
#else
return (hi << 8u) | lo;
#endif
}
static u16 *vcs_write_buf(struct vc_data *vc, const char *con_buf,
unsigned int pos, unsigned int count, bool viewed, u16 **org0)
{
u16 *org;
unsigned int col, maxcol = vc->vc_cols;
unsigned char c;
/* header */
if (pos < HEADER_SIZE) {
char header[HEADER_SIZE];
getconsxy(vc, header + 2);
while (pos < HEADER_SIZE && count > 0) {
count--;
header[pos++] = *con_buf++;
}
if (!viewed)
putconsxy(vc, header + 2);
}
if (!count)
return NULL;
pos -= HEADER_SIZE;
col = (pos/2) % maxcol;
*org0 = org = screen_pos(vc, pos/2, viewed);
/* odd pos -- the first single character */
if (pos & 1) {
count--;
c = *con_buf++;
vcs_scr_writew(vc, vc_compile_le16(c, vcs_scr_readw(vc, org)),
org);
org++;
pos++;
if (++col == maxcol) {
org = screen_pos(vc, pos/2, viewed);
col = 0;
}
}
pos /= 2;
pos += maxcol - col;
/* even pos -- handle attr+character pairs */
while (count > 1) {
unsigned short w;
w = get_unaligned(((unsigned short *)con_buf));
vcs_scr_writew(vc, w, org++);
con_buf += 2;
count -= 2;
if (++col == maxcol) {
org = screen_pos(vc, pos, viewed);
col = 0;
pos += maxcol;
}
}
if (!count)
return org;
/* odd pos -- the remaining character */
c = *con_buf++;
vcs_scr_writew(vc, vc_compile_le16(vcs_scr_readw(vc, org) >> 8, c),
org);
return org;
}
static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
char *con_buf;
u16 *org0, *org;
unsigned int written;
int size;
ssize_t ret;
loff_t pos;
bool viewed, attr;
if (use_unicode(inode))
return -EOPNOTSUPP;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
pos = *ppos;
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
console_lock();
attr = use_attributes(inode);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
goto unlock_out;
size = vcs_size(vc, attr, false);
if (size < 0) {
ret = size;
goto unlock_out;
}
ret = -EINVAL;
if (pos < 0 || pos > size)
goto unlock_out;
if (count > size - pos)
count = size - pos;
written = 0;
while (count) {
unsigned int this_round = count;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
/* Temporarily drop the console lock so that we can read
* in the write data from userspace safely.
*/
console_unlock();
ret = copy_from_user(con_buf, buf, this_round);
console_lock();
if (ret) {
this_round -= ret;
if (!this_round) {
/* Abort loop if no data were copied. Otherwise
* fail with -EFAULT.
*/
if (written)
break;
ret = -EFAULT;
goto unlock_out;
}
}
/* The vc might have been freed or vcs_size might have changed
* while we slept to grab the user buffer, so recheck.
* Return data written up to now on failure.
*/
vc = vcs_vc(inode, &viewed);
if (!vc) {
if (written)
break;
ret = -ENXIO;
goto unlock_out;
}
size = vcs_size(vc, attr, false);
if (size < 0) {
if (written)
break;
ret = size;
goto unlock_out;
}
if (pos >= size)
break;
if (this_round > size - pos)
this_round = size - pos;
/* OK, now actually push the write to the console
* under the lock using the local kernel buffer.
*/
if (attr)
org = vcs_write_buf(vc, con_buf, pos, this_round,
viewed, &org0);
else
org = vcs_write_buf_noattr(vc, con_buf, pos, this_round,
viewed, &org0);
count -= this_round;
written += this_round;
buf += this_round;
pos += this_round;
if (org)
update_region(vc, (unsigned long)(org0), org - org0);
}
*ppos += written;
ret = written;
if (written)
vcs_scr_updated(vc);
unlock_out:
console_unlock();
free_page((unsigned long) con_buf);
return ret;
}
static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
__poll_t ret = DEFAULT_POLLMASK|EPOLLERR;
if (poll) {
poll_wait(file, &poll->waitq, wait);
switch (poll->event) {
case VT_UPDATE:
ret = DEFAULT_POLLMASK|EPOLLPRI;
break;
case VT_DEALLOCATE:
ret = DEFAULT_POLLMASK|EPOLLHUP|EPOLLERR;
break;
case 0:
ret = DEFAULT_POLLMASK;
break;
}
}
return ret;
}
static int
vcs_fasync(int fd, struct file *file, int on)
{
struct vcs_poll_data *poll = file->private_data;
if (!poll) {
/* don't allocate anything if all we want is disable fasync */
if (!on)
return 0;
poll = vcs_poll_data_get(file);
if (!poll)
return -ENOMEM;
}
return fasync_helper(fd, file, on, &poll->fasync);
}
static int
vcs_open(struct inode *inode, struct file *filp)
{
unsigned int currcons = console(inode);
bool attr = use_attributes(inode);
bool uni_mode = use_unicode(inode);
int ret = 0;
/* we currently don't support attributes in unicode mode */
if (attr && uni_mode)
return -EOPNOTSUPP;
console_lock();
if(currcons && !vc_cons_allocated(currcons-1))
ret = -ENXIO;
console_unlock();
return ret;
}
static int vcs_release(struct inode *inode, struct file *file)
{
struct vcs_poll_data *poll = file->private_data;
if (poll)
vcs_poll_data_free(poll);
return 0;
}
static const struct file_operations vcs_fops = {
.llseek = vcs_lseek,
.read = vcs_read,
.write = vcs_write,
.poll = vcs_poll,
.fasync = vcs_fasync,
.open = vcs_open,
.release = vcs_release,
};
static struct class *vc_class;
void vcs_make_sysfs(int index)
{
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL,
"vcs%u", index + 1);
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 65), NULL,
"vcsu%u", index + 1);
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL,
"vcsa%u", index + 1);
}
void vcs_remove_sysfs(int index)
{
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 1));
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 65));
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 129));
}
int __init vcs_init(void)
{
unsigned int i;
if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops))
panic("unable to get major %d for vcs device", VCS_MAJOR);
vc_class = class_create("vc");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 64), NULL, "vcsu");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
for (i = 0; i < MIN_NR_CONSOLES; i++)
vcs_make_sysfs(i);
return 0;
}
| linux-master | drivers/tty/vt/vc_screen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992 obz under the linux copyright
*
* Dynamic diacritical handling - [email protected] - Dec 1993
* Dynamic keymap and string allocation - [email protected] - May 1994
* Restrict VT switching via ioctl() - [email protected] - Dec 1995
* Some code moved for less code duplication - Andi Kleen - Mar 1997
* Check put/get_user, cleanups - [email protected] - Jun 2001
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/tty.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/vt.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/consolemap.h>
#include <linux/signal.h>
#include <linux/suspend.h>
#include <linux/timex.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/selection.h>
bool vt_dont_switch;
static inline bool vt_in_use(unsigned int i)
{
const struct vc_data *vc = vc_cons[i].d;
/*
* console_lock must be held to prevent the vc from being deallocated
* while we're checking whether it's in-use.
*/
WARN_CONSOLE_UNLOCKED();
return vc && kref_read(&vc->port.kref) > 1;
}
static inline bool vt_busy(int i)
{
if (vt_in_use(i))
return true;
if (i == fg_console)
return true;
if (vc_is_sel(vc_cons[i].d))
return true;
return false;
}
/*
* Console (vt and kd) routines, as defined by USL SVR4 manual, and by
* experimentation and study of X386 SYSV handling.
*
* One point of difference: SYSV vt's are /dev/vtX, which X >= 0, and
* /dev/console is a separate ttyp. Under Linux, /dev/tty0 is /dev/console,
* and the vc start at /dev/ttyX, X >= 1. We maintain that here, so we will
* always treat our set of vt as numbered 1..MAX_NR_CONSOLES (corresponding to
* ttys 0..MAX_NR_CONSOLES-1). Explicitly naming VT 0 is illegal, but using
* /dev/tty0 (fg_console) as a target is legal, since an implicit aliasing
* to the current console is done by the main ioctl code.
*/
#ifdef CONFIG_X86
#include <asm/syscalls.h>
#endif
static void complete_change_console(struct vc_data *vc);
/*
* User space VT_EVENT handlers
*/
struct vt_event_wait {
struct list_head list;
struct vt_event event;
int done;
};
static LIST_HEAD(vt_events);
static DEFINE_SPINLOCK(vt_event_lock);
static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue);
/**
* vt_event_post
* @event: the event that occurred
* @old: old console
* @new: new console
*
* Post an VT event to interested VT handlers
*/
void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
{
struct list_head *pos, *head;
unsigned long flags;
int wake = 0;
spin_lock_irqsave(&vt_event_lock, flags);
head = &vt_events;
list_for_each(pos, head) {
struct vt_event_wait *ve = list_entry(pos,
struct vt_event_wait, list);
if (!(ve->event.event & event))
continue;
ve->event.event = event;
/* kernel view is consoles 0..n-1, user space view is
console 1..n with 0 meaning current, so we must bias */
ve->event.oldev = old + 1;
ve->event.newev = new + 1;
wake = 1;
ve->done = 1;
}
spin_unlock_irqrestore(&vt_event_lock, flags);
if (wake)
wake_up_interruptible(&vt_event_waitqueue);
}
static void __vt_event_queue(struct vt_event_wait *vw)
{
unsigned long flags;
/* Prepare the event */
INIT_LIST_HEAD(&vw->list);
vw->done = 0;
/* Queue our event */
spin_lock_irqsave(&vt_event_lock, flags);
list_add(&vw->list, &vt_events);
spin_unlock_irqrestore(&vt_event_lock, flags);
}
static void __vt_event_wait(struct vt_event_wait *vw)
{
/* Wait for it to pass */
wait_event_interruptible(vt_event_waitqueue, vw->done);
}
static void __vt_event_dequeue(struct vt_event_wait *vw)
{
unsigned long flags;
/* Dequeue it */
spin_lock_irqsave(&vt_event_lock, flags);
list_del(&vw->list);
spin_unlock_irqrestore(&vt_event_lock, flags);
}
/**
* vt_event_wait - wait for an event
* @vw: our event
*
* Waits for an event to occur which completes our vt_event_wait
* structure. On return the structure has wv->done set to 1 for success
* or 0 if some event such as a signal ended the wait.
*/
static void vt_event_wait(struct vt_event_wait *vw)
{
__vt_event_queue(vw);
__vt_event_wait(vw);
__vt_event_dequeue(vw);
}
/**
* vt_event_wait_ioctl - event ioctl handler
* @event: argument to ioctl (the event)
*
* Implement the VT_WAITEVENT ioctl using the VT event interface
*/
static int vt_event_wait_ioctl(struct vt_event __user *event)
{
struct vt_event_wait vw;
if (copy_from_user(&vw.event, event, sizeof(struct vt_event)))
return -EFAULT;
/* Highest supported event for now */
if (vw.event.event & ~VT_MAX_EVENT)
return -EINVAL;
vt_event_wait(&vw);
/* If it occurred report it */
if (vw.done) {
if (copy_to_user(event, &vw.event, sizeof(struct vt_event)))
return -EFAULT;
return 0;
}
return -EINTR;
}
/**
* vt_waitactive - active console wait
* @n: new console
*
* Helper for event waits. Used to implement the legacy
* event waiting ioctls in terms of events
*/
int vt_waitactive(int n)
{
struct vt_event_wait vw;
do {
vw.event.event = VT_EVENT_SWITCH;
__vt_event_queue(&vw);
if (n == fg_console + 1) {
__vt_event_dequeue(&vw);
break;
}
__vt_event_wait(&vw);
__vt_event_dequeue(&vw);
if (vw.done == 0)
return -EINTR;
} while (vw.event.newev != n);
return 0;
}
/*
* these are the valid i/o ports we're allowed to change. they map all the
* video ports
*/
#define GPFIRST 0x3b4
#define GPLAST 0x3df
#define GPNUM (GPLAST - GPFIRST + 1)
/*
* currently, setting the mode from KD_TEXT to KD_GRAPHICS doesn't do a whole
* lot. i'm not sure if it should do any restoration of modes or what...
*
* XXX It should at least call into the driver, fbdev's definitely need to
* restore their engine state. --BenH
*
* Called with the console lock held.
*/
static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
{
switch (mode) {
case KD_GRAPHICS:
break;
case KD_TEXT0:
case KD_TEXT1:
mode = KD_TEXT;
fallthrough;
case KD_TEXT:
break;
default:
return -EINVAL;
}
if (vc->vc_mode == mode)
return 0;
vc->vc_mode = mode;
if (vc->vc_num != fg_console)
return 0;
/* explicitly blank/unblank the screen if switching modes */
if (mode == KD_TEXT)
do_unblank_screen(1);
else
do_blank_screen(1);
return 0;
}
static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg, bool perm)
{
struct vc_data *vc = tty->driver_data;
void __user *up = (void __user *)arg;
unsigned int console = vc->vc_num;
int ret;
switch (cmd) {
case KIOCSOUND:
if (!perm)
return -EPERM;
/*
* The use of PIT_TICK_RATE is historic, it used to be
* the platform-dependent CLOCK_TICK_RATE between 2.6.12
* and 2.6.36, which was a minor but unfortunate ABI
* change. kd_mksound is locked by the input layer.
*/
if (arg)
arg = PIT_TICK_RATE / arg;
kd_mksound(arg, 0);
break;
case KDMKTONE:
if (!perm)
return -EPERM;
{
unsigned int ticks, count;
/*
* Generate the tone for the appropriate number of ticks.
* If the time is zero, turn off sound ourselves.
*/
ticks = msecs_to_jiffies((arg >> 16) & 0xffff);
count = ticks ? (arg & 0xffff) : 0;
if (count)
count = PIT_TICK_RATE / count;
kd_mksound(count, ticks);
break;
}
case KDGKBTYPE:
/*
* this is naïve.
*/
return put_user(KB_101, (char __user *)arg);
/*
* These cannot be implemented on any machine that implements
* ioperm() in user level (such as Alpha PCs) or not at all.
*
* XXX: you should never use these, just call ioperm directly..
*/
#ifdef CONFIG_X86
case KDADDIO:
case KDDELIO:
/*
* KDADDIO and KDDELIO may be able to add ports beyond what
* we reject here, but to be safe...
*
* These are locked internally via sys_ioperm
*/
if (arg < GPFIRST || arg > GPLAST)
return -EINVAL;
return ksys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0;
case KDENABIO:
case KDDISABIO:
return ksys_ioperm(GPFIRST, GPNUM,
(cmd == KDENABIO)) ? -ENXIO : 0;
#endif
/* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */
case KDKBDREP:
{
struct kbd_repeat kbrep;
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat)))
return -EFAULT;
ret = kbd_rate(&kbrep);
if (ret)
return ret;
if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat)))
return -EFAULT;
break;
}
case KDSETMODE:
if (!perm)
return -EPERM;
console_lock();
ret = vt_kdsetmode(vc, arg);
console_unlock();
return ret;
case KDGETMODE:
return put_user(vc->vc_mode, (int __user *)arg);
case KDMAPDISP:
case KDUNMAPDISP:
/*
* these work like a combination of mmap and KDENABIO.
* this could be easily finished.
*/
return -EINVAL;
case KDSKBMODE:
if (!perm)
return -EPERM;
ret = vt_do_kdskbmode(console, arg);
if (ret)
return ret;
tty_ldisc_flush(tty);
break;
case KDGKBMODE:
return put_user(vt_do_kdgkbmode(console), (int __user *)arg);
/* this could be folded into KDSKBMODE, but for compatibility
reasons it is not so easy to fold KDGKBMETA into KDGKBMODE */
case KDSKBMETA:
return vt_do_kdskbmeta(console, arg);
case KDGKBMETA:
/* FIXME: should review whether this is worth locking */
return put_user(vt_do_kdgkbmeta(console), (int __user *)arg);
case KDGETKEYCODE:
case KDSETKEYCODE:
if(!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
return vt_do_kbkeycode_ioctl(cmd, up, perm);
case KDGKBENT:
case KDSKBENT:
return vt_do_kdsk_ioctl(cmd, up, perm, console);
case KDGKBSENT:
case KDSKBSENT:
return vt_do_kdgkb_ioctl(cmd, up, perm);
/* Diacritical processing. Handled in keyboard.c as it has
to operate on the keyboard locks and structures */
case KDGKBDIACR:
case KDGKBDIACRUC:
case KDSKBDIACR:
case KDSKBDIACRUC:
return vt_do_diacrit(cmd, up, perm);
/* the ioctls below read/set the flags usually shown in the leds */
/* don't use them - they will go away without warning */
case KDGKBLED:
case KDSKBLED:
case KDGETLED:
case KDSETLED:
return vt_do_kdskled(console, cmd, arg, perm);
/*
* A process can indicate its willingness to accept signals
* generated by pressing an appropriate key combination.
* Thus, one can have a daemon that e.g. spawns a new console
* upon a keypress and then changes to it.
* See also the kbrequest field of inittab(5).
*/
case KDSIGACCEPT:
if (!perm || !capable(CAP_KILL))
return -EPERM;
if (!valid_signal(arg) || arg < 1 || arg == SIGKILL)
return -EINVAL;
spin_lock_irq(&vt_spawn_con.lock);
put_pid(vt_spawn_con.pid);
vt_spawn_con.pid = get_pid(task_pid(current));
vt_spawn_con.sig = arg;
spin_unlock_irq(&vt_spawn_con.lock);
break;
case KDFONTOP: {
struct console_font_op op;
if (copy_from_user(&op, up, sizeof(op)))
return -EFAULT;
if (!perm && op.op != KD_FONT_OP_GET)
return -EPERM;
ret = con_font_op(vc, &op);
if (ret)
return ret;
if (copy_to_user(up, &op, sizeof(op)))
return -EFAULT;
break;
}
default:
return -ENOIOCTLCMD;
}
return 0;
}
static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
bool perm, struct vc_data *vc)
{
struct unimapdesc tmp;
if (copy_from_user(&tmp, user_ud, sizeof tmp))
return -EFAULT;
switch (cmd) {
case PIO_UNIMAP:
if (!perm)
return -EPERM;
return con_set_unimap(vc, tmp.entry_ct, tmp.entries);
case GIO_UNIMAP:
if (!perm && fg_console != vc->vc_num)
return -EPERM;
return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct),
tmp.entries);
}
return 0;
}
static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
bool perm)
{
switch (cmd) {
case PIO_CMAP:
if (!perm)
return -EPERM;
return con_set_cmap(up);
case GIO_CMAP:
return con_get_cmap(up);
case PIO_SCRNMAP:
if (!perm)
return -EPERM;
return con_set_trans_old(up);
case GIO_SCRNMAP:
return con_get_trans_old(up);
case PIO_UNISCRNMAP:
if (!perm)
return -EPERM;
return con_set_trans_new(up);
case GIO_UNISCRNMAP:
return con_get_trans_new(up);
case PIO_UNIMAPCLR:
if (!perm)
return -EPERM;
con_clear_unimap(vc);
break;
case PIO_UNIMAP:
case GIO_UNIMAP:
return do_unimap_ioctl(cmd, up, perm, vc);
default:
return -ENOIOCTLCMD;
}
return 0;
}
static int vt_reldisp(struct vc_data *vc, unsigned int swtch)
{
int newvt, ret;
if (vc->vt_mode.mode != VT_PROCESS)
return -EINVAL;
/* Switched-to response */
if (vc->vt_newvt < 0) {
/* If it's just an ACK, ignore it */
return swtch == VT_ACKACQ ? 0 : -EINVAL;
}
/* Switching-from response */
if (swtch == 0) {
/* Switch disallowed, so forget we were trying to do it. */
vc->vt_newvt = -1;
return 0;
}
/* The current vt has been released, so complete the switch. */
newvt = vc->vt_newvt;
vc->vt_newvt = -1;
ret = vc_allocate(newvt);
if (ret)
return ret;
/*
* When we actually do the console switch, make sure we are atomic with
* respect to other console switches..
*/
complete_change_console(vc_cons[newvt].d);
return 0;
}
static int vt_setactivate(struct vt_setactivate __user *sa)
{
struct vt_setactivate vsa;
struct vc_data *nvc;
int ret;
if (copy_from_user(&vsa, sa, sizeof(vsa)))
return -EFAULT;
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
return -ENXIO;
vsa.console--;
vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(vsa.console);
if (ret) {
console_unlock();
return ret;
}
/*
* This is safe providing we don't drop the console sem between
* vc_allocate and finishing referencing nvc.
*/
nvc = vc_cons[vsa.console].d;
nvc->vt_mode = vsa.mode;
nvc->vt_mode.frsig = 0;
put_pid(nvc->vt_pid);
nvc->vt_pid = get_pid(task_pid(current));
console_unlock();
/* Commence switch and lock */
/* Review set_console locks */
set_console(vsa.console);
return 0;
}
/* deallocate a single console, if possible (leave 0) */
static int vt_disallocate(unsigned int vc_num)
{
struct vc_data *vc = NULL;
int ret = 0;
console_lock();
if (vt_busy(vc_num))
ret = -EBUSY;
else if (vc_num)
vc = vc_deallocate(vc_num);
console_unlock();
if (vc && vc_num >= MIN_NR_CONSOLES)
tty_port_put(&vc->port);
return ret;
}
/* deallocate all unused consoles, but leave 0 */
static void vt_disallocate_all(void)
{
struct vc_data *vc[MAX_NR_CONSOLES];
int i;
console_lock();
for (i = 1; i < MAX_NR_CONSOLES; i++)
if (!vt_busy(i))
vc[i] = vc_deallocate(i);
else
vc[i] = NULL;
console_unlock();
for (i = 1; i < MAX_NR_CONSOLES; i++) {
if (vc[i] && i >= MIN_NR_CONSOLES)
tty_port_put(&vc[i]->port);
}
}
static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
{
struct vt_consize v;
int i;
if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
return -EFAULT;
/* FIXME: Should check the copies properly */
if (!v.v_vlin)
v.v_vlin = vc->vc_scan_lines;
if (v.v_clin) {
int rows = v.v_vlin / v.v_clin;
if (v.v_rows != rows) {
if (v.v_rows) /* Parameters don't add up */
return -EINVAL;
v.v_rows = rows;
}
}
if (v.v_vcol && v.v_ccol) {
int cols = v.v_vcol / v.v_ccol;
if (v.v_cols != cols) {
if (v.v_cols)
return -EINVAL;
v.v_cols = cols;
}
}
if (v.v_clin > 32)
return -EINVAL;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
struct vc_data *vcp;
if (!vc_cons[i].d)
continue;
console_lock();
vcp = vc_cons[i].d;
if (vcp) {
int ret;
int save_scan_lines = vcp->vc_scan_lines;
int save_cell_height = vcp->vc_cell_height;
if (v.v_vlin)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_cell_height = v.v_clin;
vcp->vc_resize_user = 1;
ret = vc_resize(vcp, v.v_cols, v.v_rows);
if (ret) {
vcp->vc_scan_lines = save_scan_lines;
vcp->vc_cell_height = save_cell_height;
console_unlock();
return ret;
}
}
console_unlock();
}
return 0;
}
/*
* We handle the console-specific ioctl's here. We allow the
* capability to modify any console, not just the fg_console.
*/
int vt_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct vc_data *vc = tty->driver_data;
void __user *up = (void __user *)arg;
int i, perm;
int ret;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
perm = 0;
if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG))
perm = 1;
ret = vt_k_ioctl(tty, cmd, arg, perm);
if (ret != -ENOIOCTLCMD)
return ret;
ret = vt_io_ioctl(vc, cmd, up, perm);
if (ret != -ENOIOCTLCMD)
return ret;
switch (cmd) {
case TIOCLINUX:
return tioclinux(tty, arg);
case VT_SETMODE:
{
struct vt_mode tmp;
if (!perm)
return -EPERM;
if (copy_from_user(&tmp, up, sizeof(struct vt_mode)))
return -EFAULT;
if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS)
return -EINVAL;
console_lock();
vc->vt_mode = tmp;
/* the frsig is ignored, so we set it to 0 */
vc->vt_mode.frsig = 0;
put_pid(vc->vt_pid);
vc->vt_pid = get_pid(task_pid(current));
/* no switch is required -- [email protected] */
vc->vt_newvt = -1;
console_unlock();
break;
}
case VT_GETMODE:
{
struct vt_mode tmp;
int rc;
console_lock();
memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
console_unlock();
rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
if (rc)
return -EFAULT;
break;
}
/*
* Returns global vt state. Note that VT 0 is always open, since
* it's an alias for the current VT, and people can't use it here.
* We cannot return state for more than 16 VTs, since v_state is short.
*/
case VT_GETSTATE:
{
struct vt_stat __user *vtstat = up;
unsigned short state, mask;
if (put_user(fg_console + 1, &vtstat->v_active))
return -EFAULT;
state = 1; /* /dev/tty0 is always open */
console_lock(); /* required by vt_in_use() */
for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
++i, mask <<= 1)
if (vt_in_use(i))
state |= mask;
console_unlock();
return put_user(state, &vtstat->v_state);
}
/*
* Returns the first available (non-opened) console.
*/
case VT_OPENQRY:
console_lock(); /* required by vt_in_use() */
for (i = 0; i < MAX_NR_CONSOLES; ++i)
if (!vt_in_use(i))
break;
console_unlock();
i = i < MAX_NR_CONSOLES ? (i+1) : -1;
return put_user(i, (int __user *)arg);
/*
* ioctl(fd, VT_ACTIVATE, num) will cause us to switch to vt # num,
* with num >= 1 (switches to vt 0, our console, are not allowed, just
* to preserve sanity).
*/
case VT_ACTIVATE:
if (!perm)
return -EPERM;
if (arg == 0 || arg > MAX_NR_CONSOLES)
return -ENXIO;
arg--;
arg = array_index_nospec(arg, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(arg);
console_unlock();
if (ret)
return ret;
set_console(arg);
break;
case VT_SETACTIVATE:
if (!perm)
return -EPERM;
return vt_setactivate(up);
/*
* wait until the specified VT has been activated
*/
case VT_WAITACTIVE:
if (!perm)
return -EPERM;
if (arg == 0 || arg > MAX_NR_CONSOLES)
return -ENXIO;
return vt_waitactive(arg);
/*
* If a vt is under process control, the kernel will not switch to it
* immediately, but postpone the operation until the process calls this
* ioctl, allowing the switch to complete.
*
* According to the X sources this is the behavior:
* 0: pending switch-from not OK
* 1: pending switch-from OK
* 2: completed switch-to OK
*/
case VT_RELDISP:
if (!perm)
return -EPERM;
console_lock();
ret = vt_reldisp(vc, arg);
console_unlock();
return ret;
/*
* Disallocate memory associated to VT (but leave VT1)
*/
case VT_DISALLOCATE:
if (arg > MAX_NR_CONSOLES)
return -ENXIO;
if (arg == 0) {
vt_disallocate_all();
break;
}
arg = array_index_nospec(arg - 1, MAX_NR_CONSOLES);
return vt_disallocate(arg);
case VT_RESIZE:
{
struct vt_sizes __user *vtsizes = up;
struct vc_data *vc;
ushort ll,cc;
if (!perm)
return -EPERM;
if (get_user(ll, &vtsizes->v_rows) ||
get_user(cc, &vtsizes->v_cols))
return -EFAULT;
console_lock();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
vc = vc_cons[i].d;
if (vc) {
vc->vc_resize_user = 1;
/* FIXME: review v tty lock */
vc_resize(vc_cons[i].d, cc, ll);
}
}
console_unlock();
break;
}
case VT_RESIZEX:
if (!perm)
return -EPERM;
return vt_resizex(vc, up);
case VT_LOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
vt_dont_switch = true;
break;
case VT_UNLOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
vt_dont_switch = false;
break;
case VT_GETHIFONTMASK:
return put_user(vc->vc_hi_font_mask,
(unsigned short __user *)arg);
case VT_WAITEVENT:
return vt_event_wait_ioctl((struct vt_event __user *)arg);
default:
return -ENOIOCTLCMD;
}
return 0;
}
void reset_vc(struct vc_data *vc)
{
vc->vc_mode = KD_TEXT;
vt_reset_unicode(vc->vc_num);
vc->vt_mode.mode = VT_AUTO;
vc->vt_mode.waitv = 0;
vc->vt_mode.relsig = 0;
vc->vt_mode.acqsig = 0;
vc->vt_mode.frsig = 0;
put_pid(vc->vt_pid);
vc->vt_pid = NULL;
vc->vt_newvt = -1;
reset_palette(vc);
}
void vc_SAK(struct work_struct *work)
{
struct vc *vc_con =
container_of(work, struct vc, SAK_work);
struct vc_data *vc;
struct tty_struct *tty;
console_lock();
vc = vc_con->d;
if (vc) {
/* FIXME: review tty ref counting */
tty = vc->port.tty;
/*
* SAK should also work in all raw modes and reset
* them properly.
*/
if (tty)
__do_SAK(tty);
reset_vc(vc);
}
console_unlock();
}
#ifdef CONFIG_COMPAT
struct compat_console_font_op {
compat_uint_t op; /* operation code KD_FONT_OP_* */
compat_uint_t flags; /* KD_FONT_FLAG_* */
compat_uint_t width, height; /* font size */
compat_uint_t charcount;
compat_caddr_t data; /* font data with height fixed to 32 */
};
static inline int
compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
int perm, struct console_font_op *op, struct vc_data *vc)
{
int i;
if (copy_from_user(op, fontop, sizeof(struct compat_console_font_op)))
return -EFAULT;
if (!perm && op->op != KD_FONT_OP_GET)
return -EPERM;
op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
i = con_font_op(vc, op);
if (i)
return i;
((struct compat_console_font_op *)op)->data = (unsigned long)op->data;
if (copy_to_user(fontop, op, sizeof(struct compat_console_font_op)))
return -EFAULT;
return 0;
}
struct compat_unimapdesc {
unsigned short entry_ct;
compat_caddr_t entries;
};
static inline int
compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud,
int perm, struct vc_data *vc)
{
struct compat_unimapdesc tmp;
struct unipair __user *tmp_entries;
if (copy_from_user(&tmp, user_ud, sizeof tmp))
return -EFAULT;
tmp_entries = compat_ptr(tmp.entries);
switch (cmd) {
case PIO_UNIMAP:
if (!perm)
return -EPERM;
return con_set_unimap(vc, tmp.entry_ct, tmp_entries);
case GIO_UNIMAP:
if (!perm && fg_console != vc->vc_num)
return -EPERM;
return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp_entries);
}
return 0;
}
long vt_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
void __user *up = compat_ptr(arg);
int perm;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
perm = 0;
if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG))
perm = 1;
switch (cmd) {
/*
* these need special handlers for incompatible data structures
*/
case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc);
case PIO_UNIMAP:
case GIO_UNIMAP:
return compat_unimap_ioctl(cmd, up, perm, vc);
/*
* all these treat 'arg' as an integer
*/
case KIOCSOUND:
case KDMKTONE:
#ifdef CONFIG_X86
case KDADDIO:
case KDDELIO:
#endif
case KDSETMODE:
case KDMAPDISP:
case KDUNMAPDISP:
case KDSKBMODE:
case KDSKBMETA:
case KDSKBLED:
case KDSETLED:
case KDSIGACCEPT:
case VT_ACTIVATE:
case VT_WAITACTIVE:
case VT_RELDISP:
case VT_DISALLOCATE:
case VT_RESIZE:
case VT_RESIZEX:
return vt_ioctl(tty, cmd, arg);
/*
* the rest has a compatible data structure behind arg,
* but we have to convert it to a proper 64 bit pointer.
*/
default:
return vt_ioctl(tty, cmd, (unsigned long)up);
}
}
#endif /* CONFIG_COMPAT */
/*
* Performs the back end of a vt switch. Called under the console
* semaphore.
*/
static void complete_change_console(struct vc_data *vc)
{
unsigned char old_vc_mode;
int old = fg_console;
last_console = fg_console;
/*
* If we're switching, we could be going from KD_GRAPHICS to
* KD_TEXT mode or vice versa, which means we need to blank or
* unblank the screen later.
*/
old_vc_mode = vc_cons[fg_console].d->vc_mode;
switch_screen(vc);
/*
* This can't appear below a successful kill_pid(). If it did,
* then the *blank_screen operation could occur while X, having
* received acqsig, is waking up on another processor. This
* condition can lead to overlapping accesses to the VGA range
* and the framebuffer (causing system lockups).
*
* To account for this we duplicate this code below only if the
* controlling process is gone and we've called reset_vc.
*/
if (old_vc_mode != vc->vc_mode) {
if (vc->vc_mode == KD_TEXT)
do_unblank_screen(1);
else
do_blank_screen(1);
}
/*
* If this new console is under process control, send it a signal
* telling it that it has acquired. Also check if it has died and
* clean up (similar to logic employed in change_console())
*/
if (vc->vt_mode.mode == VT_PROCESS) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
* is awry
*/
if (kill_pid(vc->vt_pid, vc->vt_mode.acqsig, 1) != 0) {
/*
* The controlling process has died, so we revert back to
* normal operation. In this case, we'll also change back
* to KD_TEXT mode. I'm not sure if this is strictly correct
* but it saves the agony when the X server dies and the screen
* remains blanked due to KD_GRAPHICS! It would be nice to do
* this outside of VT_PROCESS but there is no single process
* to account for and tracking tty count may be undesirable.
*/
reset_vc(vc);
if (old_vc_mode != vc->vc_mode) {
if (vc->vc_mode == KD_TEXT)
do_unblank_screen(1);
else
do_blank_screen(1);
}
}
}
/*
* Wake anyone waiting for their VT to activate
*/
vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num);
return;
}
/*
* Performs the front-end of a vt switch
*/
void change_console(struct vc_data *new_vc)
{
struct vc_data *vc;
if (!new_vc || new_vc->vc_num == fg_console || vt_dont_switch)
return;
/*
* If this vt is in process mode, then we need to handshake with
* that process before switching. Essentially, we store where that
* vt wants to switch to and wait for it to tell us when it's done
* (via VT_RELDISP ioctl).
*
* We also check to see if the controlling process still exists.
* If it doesn't, we reset this vt to auto mode and continue.
* This is a cheap way to track process control. The worst thing
* that can happen is: we send a signal to a process, it dies, and
* the switch gets "lost" waiting for a response; hopefully, the
* user will try again, we'll detect the process is gone (unless
* the user waits just the right amount of time :-) and revert the
* vt to auto control.
*/
vc = vc_cons[fg_console].d;
if (vc->vt_mode.mode == VT_PROCESS) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
* is awry.
*
* We need to set vt_newvt *before* sending the signal or we
* have a race.
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
/*
* It worked. Mark the vt to switch to and
* return. The process needs to send us a
* VT_RELDISP ioctl to complete the switch.
*/
return;
}
/*
* The controlling process has died, so we revert back to
* normal operation. In this case, we'll also change back
* to KD_TEXT mode. I'm not sure if this is strictly correct
* but it saves the agony when the X server dies and the screen
* remains blanked due to KD_GRAPHICS! It would be nice to do
* this outside of VT_PROCESS but there is no single process
* to account for and tracking tty count may be undesirable.
*/
reset_vc(vc);
/*
* Fall through to normal (VT_AUTO) handling of the switch...
*/
}
/*
* Ignore all switches in KD_GRAPHICS+VT_AUTO mode
*/
if (vc->vc_mode == KD_GRAPHICS)
return;
complete_change_console(new_vc);
}
/* Perform a kernel triggered VT switch for suspend/resume */
static int disable_vt_switch;
int vt_move_to_console(unsigned int vt, int alloc)
{
int prev;
console_lock();
/* Graphics mode - up to X */
if (disable_vt_switch) {
console_unlock();
return 0;
}
prev = fg_console;
if (alloc && vc_allocate(vt)) {
/* we can't have a free VC for now. Too bad,
* we don't want to mess the screen for now. */
console_unlock();
return -ENOSPC;
}
if (set_console(vt)) {
/*
* We're unable to switch to the SUSPEND_CONSOLE.
* Let the calling function know so it can decide
* what to do.
*/
console_unlock();
return -EIO;
}
console_unlock();
if (vt_waitactive(vt + 1)) {
pr_debug("Suspend: Can't switch VCs.");
return -EINTR;
}
return prev;
}
/*
* Normally during a suspend, we allocate a new console and switch to it.
* When we resume, we switch back to the original console. This switch
* can be slow, so on systems where the framebuffer can handle restoration
* of video registers anyways, there's little point in doing the console
* switch. This function allows you to disable it by passing it '0'.
*/
void pm_set_vt_switch(int do_switch)
{
console_lock();
disable_vt_switch = !do_switch;
console_unlock();
}
EXPORT_SYMBOL(pm_set_vt_switch);
| linux-master | drivers/tty/vt/vt_ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Hopefully this will be a rather complete VT102 implementation.
*
* Beeping thanks to John T Kohl.
*
* Virtual Consoles, Screen Blanking, Screen Dumping, Color, Graphics
* Chars, and VT100 enhancements by Peter MacDonald.
*
* Copy and paste function by Andrew Haylett,
* some enhancements by Alessandro Rubini.
*
* Code to check for different video-cards mostly by Galen Hunt,
* <[email protected]>
*
* Rudimentary ISO 10646/Unicode/UTF-8 character set support by
* Markus Kuhn, <[email protected]>.
*
* Dynamic allocation of consoles, [email protected], May 1994
* Resizing of consoles, aeb, 940926
*
* Code for xterm like mouse click reporting by Peter Orbaek 20-Jul-94
* <[email protected]>
*
* User-defined bell sound, new setterm control sequences and printk
* redirection by Martin Mares <[email protected]> 19-Nov-95
*
* APM screenblank bug fixed Takashi Manabe <[email protected]>
*
* Merge with the abstract console driver by Geert Uytterhoeven
* <[email protected]>, Jan 1997.
*
* Original m68k console driver modifications by
*
* - Arno Griffioen <[email protected]>
* - David Carter <[email protected]>
*
* The abstract console driver provides a generic interface for a text
* console. It supports VGA text mode, frame buffer based graphical consoles
* and special graphics processors that are only accessible through some
* registers (e.g. a TMS340x0 GSP).
*
* The interface to the hardware is specified using a special structure
* (struct consw) which contains function pointers to console operations
* (see <linux/console.h> for more information).
*
* Support for changeable cursor shape
* by Pavel Machek <[email protected]>, August 1997
*
* Ported to i386 and con_scrolldelta fixed
* by Emmanuel Marty <[email protected]>, April 1998
*
* Resurrected character buffers in videoram plus lots of other trickery
* by Martin Mares <[email protected]>, July 1998
*
* Removed old-style timers, introduced console_timer, made timer
* deletion SMP-safe. 17Jun00, Andrew Morton
*
* Removed console_lock, enabled interrupts across all console operations
* 13 March 2001, Andrew Morton
*
* Fixed UTF-8 mode so alternate charset modes always work according
* to control sequences interpreted in do_con_trol function
* preserving backward VT100 semigraphics compatibility,
* malformed UTF sequences represented as sequences of replacement glyphs,
* original codes or '?' as a last resort if replacement glyph is undefined
* by Adam Tla/lka <[email protected]>, Aug 2006
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kd.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/vt_kern.h>
#include <linux/selection.h>
#include <linux/tiocl.h>
#include <linux/kbd_kern.h>
#include <linux/consolemap.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pm.h>
#include <linux/font.h>
#include <linux/bitops.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/kdb.h>
#include <linux/ctype.h>
#include <linux/bsearch.h>
#include <linux/gcd.h>
#define MAX_NR_CON_DRIVER 16
#define CON_DRIVER_FLAG_MODULE 1
#define CON_DRIVER_FLAG_INIT 2
#define CON_DRIVER_FLAG_ATTR 4
#define CON_DRIVER_FLAG_ZOMBIE 8
struct con_driver {
const struct consw *con;
const char *desc;
struct device *dev;
int node;
int first;
int last;
int flag;
};
static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER];
const struct consw *conswitchp;
/*
* Here is the default bell parameters: 750HZ, 1/8th of a second
*/
#define DEFAULT_BELL_PITCH 750
#define DEFAULT_BELL_DURATION (HZ/8)
#define DEFAULT_CURSOR_BLINK_MS 200
struct vc vc_cons [MAX_NR_CONSOLES];
EXPORT_SYMBOL(vc_cons);
static const struct consw *con_driver_map[MAX_NR_CONSOLES];
static int con_open(struct tty_struct *, struct file *);
static void vc_init(struct vc_data *vc, int do_clear);
static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
static void con_flush_chars(struct tty_struct *tty);
static int set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
static void unblank_screen(void);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
int default_utf8 = true;
module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL(global_cursor_default);
static int cur_default = CUR_UNDERLINE;
module_param(cur_default, int, S_IRUGO | S_IWUSR);
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
*/
static int ignore_poke;
int do_poke_blanked_console;
int console_blanked;
EXPORT_SYMBOL(console_blanked);
static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
static int vesa_off_interval;
static int blankinterval;
core_param(consoleblank, blankinterval, int, 0444);
static DECLARE_WORK(console_work, console_callback);
static DECLARE_WORK(con_driver_unregister_work, con_driver_unregister_callback);
/*
* fg_console is the current virtual console,
* last_console is the last used one,
* want_console is the console we want to switch to,
* saved_* variants are for save/restore around kernel debugger enter/leave
*/
int fg_console;
EXPORT_SYMBOL(fg_console);
int last_console;
int want_console = -1;
static int saved_fg_console;
static int saved_last_console;
static int saved_want_console;
static int saved_vc_mode;
static int saved_console_blanked;
/*
* For each existing display, we have a pointer to console currently visible
* on that display, allowing consoles other than fg_console to be refreshed
* appropriately. Unless the low-level driver supplies its own display_fg
* variable, we use this one for the "master display".
*/
static struct vc_data *master_display_fg;
/*
* Unfortunately, we need to delay tty echo when we're currently writing to the
* console since the code is (and always was) not re-entrant, so we schedule
* all flip requests to process context with schedule-task() and run it from
* console_callback().
*/
/*
* For the same reason, we defer scrollback to the console callback.
*/
static int scrollback_delta;
/*
* Hook so that the power management routines can (un)blank
* the console on our behalf.
*/
int (*console_blank_hook)(int);
EXPORT_SYMBOL(console_blank_hook);
static DEFINE_TIMER(console_timer, blank_screen_t);
static int blank_state;
static int blank_timer_expired;
enum {
blank_off = 0,
blank_normal_wait,
blank_vesa_wait,
};
/*
* /sys/class/tty/tty0/
*
* the attribute 'active' contains the name of the current vc
* console and it supports poll() to detect vc switches
*/
static struct device *tty0dev;
/*
* Notifier list for console events.
*/
static ATOMIC_NOTIFIER_HEAD(vt_notifier_list);
int register_vt_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&vt_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_vt_notifier);
int unregister_vt_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&vt_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_vt_notifier);
static void notify_write(struct vc_data *vc, unsigned int unicode)
{
struct vt_notifier_param param = { .vc = vc, .c = unicode };
atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m);
}
static void notify_update(struct vc_data *vc)
{
struct vt_notifier_param param = { .vc = vc };
atomic_notifier_call_chain(&vt_notifier_list, VT_UPDATE, ¶m);
}
/*
* Low-Level Functions
*/
static inline bool con_is_fg(const struct vc_data *vc)
{
return vc->vc_num == fg_console;
}
static inline bool con_should_update(const struct vc_data *vc)
{
return con_is_visible(vc) && !console_blanked;
}
static inline unsigned short *screenpos(const struct vc_data *vc, int offset,
bool viewed)
{
unsigned short *p;
if (!viewed)
p = (unsigned short *)(vc->vc_origin + offset);
else if (!vc->vc_sw->con_screen_pos)
p = (unsigned short *)(vc->vc_visible_origin + offset);
else
p = vc->vc_sw->con_screen_pos(vc, offset);
return p;
}
/* Called from the keyboard irq path.. */
static inline void scrolldelta(int lines)
{
/* FIXME */
/* scrolldelta needs some kind of consistency lock, but the BKL was
and still is not protecting versus the scheduled back end */
scrollback_delta += lines;
schedule_console_callback();
}
void schedule_console_callback(void)
{
schedule_work(&console_work);
}
/*
* Code to manage unicode-based screen buffers
*/
/*
* Our screen buffer is preceded by an array of line pointers so that
* scrolling only implies some pointer shuffling.
*/
static u32 **vc_uniscr_alloc(unsigned int cols, unsigned int rows)
{
u32 **uni_lines;
void *p;
unsigned int memsize, i, col_size = cols * sizeof(**uni_lines);
/* allocate everything in one go */
memsize = col_size * rows;
memsize += rows * sizeof(*uni_lines);
uni_lines = vzalloc(memsize);
if (!uni_lines)
return NULL;
/* initial line pointers */
p = uni_lines + rows;
for (i = 0; i < rows; i++) {
uni_lines[i] = p;
p += col_size;
}
return uni_lines;
}
static void vc_uniscr_free(u32 **uni_lines)
{
vfree(uni_lines);
}
static void vc_uniscr_set(struct vc_data *vc, u32 **new_uni_lines)
{
vc_uniscr_free(vc->vc_uni_lines);
vc->vc_uni_lines = new_uni_lines;
}
static void vc_uniscr_putc(struct vc_data *vc, u32 uc)
{
if (vc->vc_uni_lines)
vc->vc_uni_lines[vc->state.y][vc->state.x] = uc;
}
static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr)
{
if (vc->vc_uni_lines) {
u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln));
memset32(&ln[x], ' ', nr);
}
}
static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
{
if (vc->vc_uni_lines) {
u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
memset32(&ln[cols - nr], ' ', nr);
}
}
static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x,
unsigned int nr)
{
if (vc->vc_uni_lines)
memset32(&vc->vc_uni_lines[vc->state.y][x], ' ', nr);
}
static void vc_uniscr_clear_lines(struct vc_data *vc, unsigned int y,
unsigned int nr)
{
if (vc->vc_uni_lines)
while (nr--)
memset32(vc->vc_uni_lines[y++], ' ', vc->vc_cols);
}
/* juggling array rotation algorithm (complexity O(N), size complexity O(1)) */
static void juggle_array(u32 **array, unsigned int size, unsigned int nr)
{
unsigned int gcd_idx;
for (gcd_idx = 0; gcd_idx < gcd(nr, size); gcd_idx++) {
u32 *gcd_idx_val = array[gcd_idx];
unsigned int dst_idx = gcd_idx;
while (1) {
unsigned int src_idx = (dst_idx + nr) % size;
if (src_idx == gcd_idx)
break;
array[dst_idx] = array[src_idx];
dst_idx = src_idx;
}
array[dst_idx] = gcd_idx_val;
}
}
static void vc_uniscr_scroll(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int nr)
{
u32 **uni_lines = vc->vc_uni_lines;
unsigned int size = bottom - top;
if (!uni_lines)
return;
if (dir == SM_DOWN) {
juggle_array(&uni_lines[top], size, size - nr);
vc_uniscr_clear_lines(vc, top, nr);
} else {
juggle_array(&uni_lines[top], size, nr);
vc_uniscr_clear_lines(vc, bottom - nr, nr);
}
}
static void vc_uniscr_copy_area(u32 **dst_lines,
unsigned int dst_cols,
unsigned int dst_rows,
u32 **src_lines,
unsigned int src_cols,
unsigned int src_top_row,
unsigned int src_bot_row)
{
unsigned int dst_row = 0;
if (!dst_lines)
return;
while (src_top_row < src_bot_row) {
u32 *src_line = src_lines[src_top_row];
u32 *dst_line = dst_lines[dst_row];
memcpy(dst_line, src_line, src_cols * sizeof(*src_line));
if (dst_cols - src_cols)
memset32(dst_line + src_cols, ' ', dst_cols - src_cols);
src_top_row++;
dst_row++;
}
while (dst_row < dst_rows) {
u32 *dst_line = dst_lines[dst_row];
memset32(dst_line, ' ', dst_cols);
dst_row++;
}
}
/*
* Called from vcs_read() to make sure unicode screen retrieval is possible.
* This will initialize the unicode screen buffer if not already done.
* This returns 0 if OK, or a negative error code otherwise.
* In particular, -ENODATA is returned if the console is not in UTF-8 mode.
*/
int vc_uniscr_check(struct vc_data *vc)
{
u32 **uni_lines;
unsigned short *p;
int x, y, mask;
WARN_CONSOLE_UNLOCKED();
if (!vc->vc_utf)
return -ENODATA;
if (vc->vc_uni_lines)
return 0;
uni_lines = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows);
if (!uni_lines)
return -ENOMEM;
/*
* Let's populate it initially with (imperfect) reverse translation.
* This is the next best thing we can do short of having it enabled
* from the start even when no users rely on this functionality. True
* unicode content will be available after a complete screen refresh.
*/
p = (unsigned short *)vc->vc_origin;
mask = vc->vc_hi_font_mask | 0xff;
for (y = 0; y < vc->vc_rows; y++) {
u32 *line = uni_lines[y];
for (x = 0; x < vc->vc_cols; x++) {
u16 glyph = scr_readw(p++) & mask;
line[x] = inverse_translate(vc, glyph, true);
}
}
vc->vc_uni_lines = uni_lines;
return 0;
}
/*
* Called from vcs_read() to get the unicode data from the screen.
* This must be preceded by a successful call to vc_uniscr_check() once
* the console lock has been taken.
*/
void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
unsigned int row, unsigned int col, unsigned int nr)
{
u32 **uni_lines = vc->vc_uni_lines;
int offset = row * vc->vc_size_row + col * 2;
unsigned long pos;
if (WARN_ON_ONCE(!uni_lines))
return;
pos = (unsigned long)screenpos(vc, offset, viewed);
if (pos >= vc->vc_origin && pos < vc->vc_scr_end) {
/*
* Desired position falls in the main screen buffer.
* However the actual row/col might be different if
* scrollback is active.
*/
row = (pos - vc->vc_origin) / vc->vc_size_row;
col = ((pos - vc->vc_origin) % vc->vc_size_row) / 2;
memcpy(dest, &uni_lines[row][col], nr * sizeof(u32));
} else {
/*
* Scrollback is active. For now let's simply backtranslate
* the screen glyphs until the unicode screen buffer does
* synchronize with console display drivers for a scrollback
* buffer of its own.
*/
u16 *p = (u16 *)pos;
int mask = vc->vc_hi_font_mask | 0xff;
u32 *uni_buf = dest;
while (nr--) {
u16 glyph = scr_readw(p++) & mask;
*uni_buf++ = inverse_translate(vc, glyph, true);
}
}
}
static void con_scroll(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int nr)
{
unsigned int rows = bottom - top;
u16 *clear, *dst, *src;
if (top + nr >= bottom)
nr = rows - 1;
if (bottom > vc->vc_rows || top >= bottom || nr < 1)
return;
vc_uniscr_scroll(vc, top, bottom, dir, nr);
if (con_is_visible(vc) &&
vc->vc_sw->con_scroll(vc, top, bottom, dir, nr))
return;
src = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * top);
dst = (u16 *)(vc->vc_origin + vc->vc_size_row * (top + nr));
if (dir == SM_UP) {
clear = src + (rows - nr) * vc->vc_cols;
swap(src, dst);
}
scr_memmovew(dst, src, (rows - nr) * vc->vc_size_row);
scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
unsigned int xx, yy, offset;
u16 *p;
p = (u16 *) start;
if (!vc->vc_sw->con_getxy) {
offset = (start - vc->vc_origin) / 2;
xx = offset % vc->vc_cols;
yy = offset / vc->vc_cols;
} else {
int nxx, nyy;
start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy);
xx = nxx; yy = nyy;
}
for(;;) {
u16 attrib = scr_readw(p) & 0xff00;
int startx = xx;
u16 *q = p;
while (xx < vc->vc_cols && count) {
if (attrib != (scr_readw(p) & 0xff00)) {
if (p > q)
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
startx = xx;
q = p;
attrib = scr_readw(p) & 0xff00;
}
p++;
xx++;
count--;
}
if (p > q)
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
if (!count)
break;
xx = 0;
yy++;
if (vc->vc_sw->con_getxy) {
p = (u16 *)start;
start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
}
}
}
void update_region(struct vc_data *vc, unsigned long start, int count)
{
WARN_CONSOLE_UNLOCKED();
if (con_should_update(vc)) {
hide_cursor(vc);
do_update_region(vc, start, count);
set_cursor(vc);
}
}
EXPORT_SYMBOL(update_region);
/* Structure of attributes is hardware-dependent */
static u8 build_attr(struct vc_data *vc, u8 _color,
enum vc_intensity _intensity, bool _blink, bool _underline,
bool _reverse, bool _italic)
{
if (vc->vc_sw->con_build_attr)
return vc->vc_sw->con_build_attr(vc, _color, _intensity,
_blink, _underline, _reverse, _italic);
/*
* ++roman: I completely changed the attribute format for monochrome
* mode (!can_do_color). The formerly used MDA (monochrome display
* adapter) format didn't allow the combination of certain effects.
* Now the attribute is just a bit vector:
* Bit 0..1: intensity (0..2)
* Bit 2 : underline
* Bit 3 : reverse
* Bit 7 : blink
*/
{
u8 a = _color;
if (!vc->vc_can_do_color)
return _intensity |
(_italic << 1) |
(_underline << 2) |
(_reverse << 3) |
(_blink << 7);
if (_italic)
a = (a & 0xF0) | vc->vc_itcolor;
else if (_underline)
a = (a & 0xf0) | vc->vc_ulcolor;
else if (_intensity == VCI_HALF_BRIGHT)
a = (a & 0xf0) | vc->vc_halfcolor;
if (_reverse)
a = (a & 0x88) | (((a >> 4) | (a << 4)) & 0x77);
if (_blink)
a ^= 0x80;
if (_intensity == VCI_BOLD)
a ^= 0x08;
if (vc->vc_hi_font_mask == 0x100)
a <<= 1;
return a;
}
}
static void update_attr(struct vc_data *vc)
{
vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity,
vc->state.blink, vc->state.underline,
vc->state.reverse ^ vc->vc_decscnm, vc->state.italic);
vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color,
VCI_NORMAL, vc->state.blink, false,
vc->vc_decscnm, false) << 8);
}
/* Note: inverting the screen twice should revert to the original state */
void invert_screen(struct vc_data *vc, int offset, int count, bool viewed)
{
unsigned short *p;
WARN_CONSOLE_UNLOCKED();
count /= 2;
p = screenpos(vc, offset, viewed);
if (vc->vc_sw->con_invert_region) {
vc->vc_sw->con_invert_region(vc, p, count);
} else {
u16 *q = p;
int cnt = count;
u16 a;
if (!vc->vc_can_do_color) {
while (cnt--) {
a = scr_readw(q);
a ^= 0x0800;
scr_writew(a, q);
q++;
}
} else if (vc->vc_hi_font_mask == 0x100) {
while (cnt--) {
a = scr_readw(q);
a = (a & 0x11ff) |
((a & 0xe000) >> 4) |
((a & 0x0e00) << 4);
scr_writew(a, q);
q++;
}
} else {
while (cnt--) {
a = scr_readw(q);
a = (a & 0x88ff) |
((a & 0x7000) >> 4) |
((a & 0x0700) << 4);
scr_writew(a, q);
q++;
}
}
}
if (con_should_update(vc))
do_update_region(vc, (unsigned long) p, count);
notify_update(vc);
}
/* used by selection: complement pointer position */
void complement_pos(struct vc_data *vc, int offset)
{
static int old_offset = -1;
static unsigned short old;
static unsigned short oldx, oldy;
WARN_CONSOLE_UNLOCKED();
if (old_offset != -1 && old_offset >= 0 &&
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, true));
if (con_should_update(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
notify_update(vc);
}
old_offset = offset;
if (offset != -1 && offset >= 0 &&
offset < vc->vc_screenbuf_size) {
unsigned short new;
unsigned short *p;
p = screenpos(vc, offset, true);
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
if (con_should_update(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
}
notify_update(vc);
}
}
static void insert_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_insert(vc, nr);
scr_memmovew(p + nr, p, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->state.x);
}
static void delete_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_delete(vc, nr);
scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->state.x);
}
static int softcursor_original = -1;
static void add_softcursor(struct vc_data *vc)
{
int i = scr_readw((u16 *) vc->vc_pos);
u32 type = vc->vc_cursor_type;
if (!(type & CUR_SW))
return;
if (softcursor_original != -1)
return;
softcursor_original = i;
i |= CUR_SET(type);
i ^= CUR_CHANGE(type);
if ((type & CUR_ALWAYS_BG) &&
(softcursor_original & CUR_BG) == (i & CUR_BG))
i ^= CUR_BG;
if ((type & CUR_INVERT_FG_BG) && (i & CUR_FG) == ((i & CUR_BG) >> 4))
i ^= CUR_FG;
scr_writew(i, (u16 *)vc->vc_pos);
if (con_should_update(vc))
vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x);
}
static void hide_softcursor(struct vc_data *vc)
{
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
if (con_should_update(vc))
vc->vc_sw->con_putc(vc, softcursor_original,
vc->state.y, vc->state.x);
softcursor_original = -1;
}
}
static void hide_cursor(struct vc_data *vc)
{
if (vc_is_sel(vc))
clear_selection();
vc->vc_sw->con_cursor(vc, CM_ERASE);
hide_softcursor(vc);
}
static void set_cursor(struct vc_data *vc)
{
if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
if (vc_is_sel(vc))
clear_selection();
add_softcursor(vc);
if (CUR_SIZE(vc->vc_cursor_type) != CUR_NONE)
vc->vc_sw->con_cursor(vc, CM_DRAW);
} else
hide_cursor(vc);
}
static void set_origin(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (!con_is_visible(vc) ||
!vc->vc_sw->con_set_origin ||
!vc->vc_sw->con_set_origin(vc))
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
vc->vc_visible_origin = vc->vc_origin;
vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size;
vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->state.y +
2 * vc->state.x;
}
static void save_screen(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (vc->vc_sw->con_save_screen)
vc->vc_sw->con_save_screen(vc);
}
static void flush_scrollback(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
set_origin(vc);
if (vc->vc_sw->con_flush_scrollback) {
vc->vc_sw->con_flush_scrollback(vc);
} else if (con_is_visible(vc)) {
/*
* When no con_flush_scrollback method is provided then the
* legacy way for flushing the scrollback buffer is to use
* a side effect of the con_switch method. We do it only on
* the foreground console as background consoles have no
* scrollback buffers in that case and we obviously don't
* want to switch to them.
*/
hide_cursor(vc);
vc->vc_sw->con_switch(vc);
set_cursor(vc);
}
}
/*
* Redrawing of screen
*/
void clear_buffer_attributes(struct vc_data *vc)
{
unsigned short *p = (unsigned short *)vc->vc_origin;
int count = vc->vc_screenbuf_size / 2;
int mask = vc->vc_hi_font_mask | 0xff;
for (; count > 0; count--, p++) {
scr_writew((scr_readw(p)&mask) | (vc->vc_video_erase_char & ~mask), p);
}
}
void redraw_screen(struct vc_data *vc, int is_switch)
{
int redraw = 0;
WARN_CONSOLE_UNLOCKED();
if (!vc) {
/* strange ... */
/* printk("redraw_screen: tty %d not allocated ??\n", new_console+1); */
return;
}
if (is_switch) {
struct vc_data *old_vc = vc_cons[fg_console].d;
if (old_vc == vc)
return;
if (!con_is_visible(vc))
redraw = 1;
*vc->vc_display_fg = vc;
fg_console = vc->vc_num;
hide_cursor(old_vc);
if (!con_is_visible(old_vc)) {
save_screen(old_vc);
set_origin(old_vc);
}
if (tty0dev)
sysfs_notify(&tty0dev->kobj, NULL, "active");
} else {
hide_cursor(vc);
redraw = 1;
}
if (redraw) {
int update;
int old_was_color = vc->vc_can_do_color;
set_origin(vc);
update = vc->vc_sw->con_switch(vc);
set_palette(vc);
/*
* If console changed from mono<->color, the best we can do
* is to clear the buffer attributes. As it currently stands,
* rebuilding new attributes from the old buffer is not doable
* without overly complex code.
*/
if (old_was_color != vc->vc_can_do_color) {
update_attr(vc);
clear_buffer_attributes(vc);
}
if (update && vc->vc_mode != KD_GRAPHICS)
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
set_cursor(vc);
if (is_switch) {
vt_set_leds_compute_shiftstate();
notify_update(vc);
}
}
EXPORT_SYMBOL(redraw_screen);
/*
* Allocation, freeing and resizing of VTs.
*/
int vc_cons_allocated(unsigned int i)
{
return (i < MAX_NR_CONSOLES && vc_cons[i].d);
}
static void visual_init(struct vc_data *vc, int num, int init)
{
/* ++Geert: vc->vc_sw->con_init determines console size */
if (vc->vc_sw)
module_put(vc->vc_sw->owner);
vc->vc_sw = conswitchp;
if (con_driver_map[num])
vc->vc_sw = con_driver_map[num];
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
if (vc->uni_pagedict_loc)
con_free_unimap(vc);
vc->uni_pagedict_loc = &vc->uni_pagedict;
vc->uni_pagedict = NULL;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
vc->vc_s_complement_mask = vc->vc_complement_mask;
vc->vc_size_row = vc->vc_cols << 1;
vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
}
static void visual_deinit(struct vc_data *vc)
{
vc->vc_sw->con_deinit(vc);
module_put(vc->vc_sw->owner);
}
static void vc_port_destruct(struct tty_port *port)
{
struct vc_data *vc = container_of(port, struct vc_data, port);
kfree(vc);
}
static const struct tty_port_operations vc_port_ops = {
.destruct = vc_port_destruct,
};
/*
* Change # of rows and columns (0 means unchanged/the size of fg_console)
* [this is to be used together with some user program
* like resize that changes the hardware videomode]
*/
#define VC_MAXCOL (32767)
#define VC_MAXROW (32767)
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
struct vc_data *vc;
int err;
WARN_CONSOLE_UNLOCKED();
if (currcons >= MAX_NR_CONSOLES)
return -ENXIO;
if (vc_cons[currcons].d)
return 0;
/* due to the granularity of kmalloc, we waste some memory here */
/* the alloc is done in two steps, to optimize the common situation
of a 25x80 console (structsize=216, screenbuf_size=4000) */
/* although the numbers above are not valid since long ago, the
point is still up-to-date and the comment still has its value
even if only as a historical artifact. --mj, July 1998 */
param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
if (!vc)
return -ENOMEM;
vc_cons[currcons].d = vc;
tty_port_init(&vc->port);
vc->port.ops = &vc_port_ops;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
err = -EINVAL;
if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW ||
vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size)
goto err_free;
err = -ENOMEM;
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
/* If no drivers have overridden us and the user didn't pass a
boot option, default to displaying the cursor */
if (global_cursor_default == -1)
global_cursor_default = 1;
vc_init(vc, 1);
vcs_make_sysfs(currcons);
atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, ¶m);
return 0;
err_free:
visual_deinit(vc);
kfree(vc);
vc_cons[currcons].d = NULL;
return err;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
int user)
{
/* Resizes the resolution of the display adapater */
int err = 0;
if (vc->vc_sw->con_resize)
err = vc->vc_sw->con_resize(vc, width, height, user);
return err;
}
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
* @vc: virtual console private data
* @cols: columns
* @lines: lines
*
* Resize a virtual console, clipping according to the actual constraints.
* If the caller passes a tty structure then update the termios winsize
* information and perform any necessary signal handling.
*
* Caller must hold the console semaphore. Takes the termios rwsem and
* ctrl.lock of the tty IFF a tty is passed.
*/
static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int cols, unsigned int lines)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned long end;
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
unsigned short *oldscreen, *newscreen;
u32 **new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
if (!vc)
return -ENXIO;
user = vc->vc_resize_user;
vc->vc_resize_user = 0;
if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
new_cols = (cols ? cols : vc->vc_cols);
new_rows = (lines ? lines : vc->vc_rows);
new_row_size = new_cols << 1;
new_screen_size = new_row_size * new_rows;
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) {
/*
* This function is being called here to cover the case
* where the userspace calls the FBIOPUT_VSCREENINFO twice,
* passing the same fb_var_screeninfo containing the fields
* yres/xres equal to a number non-multiple of vc_font.height
* and yres_virtual/xres_virtual equal to number lesser than the
* vc_font.height and yres/xres.
* In the second call, the struct fb_var_screeninfo isn't
* being modified by the underlying driver because of the
* if above, and this causes the fbcon_display->vrows to become
* negative and it eventually leads to out-of-bound
* access by the imageblit function.
* To give the correct values to the struct and to not have
* to deal with possible errors from the code below, we call
* the resize_screen here as well.
*/
return resize_screen(vc, new_cols, new_rows, user);
}
if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
if (vc->vc_uni_lines) {
new_uniscr = vc_uniscr_alloc(new_cols, new_rows);
if (!new_uniscr) {
kfree(newscreen);
return -ENOMEM;
}
}
if (vc_is_sel(vc))
clear_selection();
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
err = resize_screen(vc, new_cols, new_rows, user);
if (err) {
kfree(newscreen);
vc_uniscr_free(new_uniscr);
return err;
}
vc->vc_rows = new_rows;
vc->vc_cols = new_cols;
vc->vc_size_row = new_row_size;
vc->vc_screenbuf_size = new_screen_size;
rlth = min(old_row_size, new_row_size);
rrem = new_row_size - rlth;
old_origin = vc->vc_origin;
new_origin = (long) newscreen;
new_scr_end = new_origin + new_screen_size;
if (vc->state.y > new_rows) {
if (old_rows - vc->state.y < new_rows) {
/*
* Cursor near the bottom, copy contents from the
* bottom of buffer
*/
first_copied_row = (old_rows - new_rows);
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
first_copied_row = (vc->state.y - new_rows/2);
}
old_origin += first_copied_row * old_row_size;
} else
first_copied_row = 0;
end = old_origin + old_row_size * min(old_rows, new_rows);
vc_uniscr_copy_area(new_uniscr, new_cols, new_rows,
vc->vc_uni_lines, rlth/2, first_copied_row,
min(old_rows, new_rows));
vc_uniscr_set(vc, new_uniscr);
update_attr(vc);
while (old_origin < end) {
scr_memcpyw((unsigned short *) new_origin,
(unsigned short *) old_origin, rlth);
if (rrem)
scr_memsetw((void *)(new_origin + rlth),
vc->vc_video_erase_char, rrem);
old_origin += old_row_size;
new_origin += new_row_size;
}
if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin);
oldscreen = vc->vc_screenbuf;
vc->vc_screenbuf = newscreen;
vc->vc_screenbuf_size = new_screen_size;
set_origin(vc);
kfree(oldscreen);
/* do part of a reset_terminal() */
vc->vc_top = 0;
vc->vc_bottom = vc->vc_rows;
gotoxy(vc, vc->state.x, vc->state.y);
save_cur(vc);
if (tty) {
/* Rewrite the requested winsize data with the actual
resulting sizes */
struct winsize ws;
memset(&ws, 0, sizeof(ws));
ws.ws_row = vc->vc_rows;
ws.ws_col = vc->vc_cols;
ws.ws_ypixel = vc->vc_scan_lines;
tty_do_resize(tty, &ws);
}
if (con_is_visible(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
notify_update(vc);
return err;
}
/**
* vc_resize - resize a VT
* @vc: virtual console
* @cols: columns
* @rows: rows
*
* Resize a virtual console as seen from the console end of things. We
* use the common vc_do_resize methods to update the structures. The
* caller must hold the console sem to protect console internals and
* vc->port.tty
*/
int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
{
return vc_do_resize(vc->port.tty, vc, cols, rows);
}
EXPORT_SYMBOL(vc_resize);
/**
* vt_resize - resize a VT
* @tty: tty to resize
* @ws: winsize attributes
*
* Resize a virtual terminal. This is called by the tty layer as we
* register our own handler for resizing. The mutual helper does all
* the actual work.
*
* Takes the console sem and the called methods then take the tty
* termios_rwsem and the tty ctrl.lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
struct vc_data *vc = tty->driver_data;
int ret;
console_lock();
ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
console_unlock();
return ret;
}
struct vc_data *vc_deallocate(unsigned int currcons)
{
struct vc_data *vc = NULL;
WARN_CONSOLE_UNLOCKED();
if (vc_cons_allocated(currcons)) {
struct vt_notifier_param param;
param.vc = vc = vc_cons[currcons].d;
atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m);
vcs_remove_sysfs(currcons);
visual_deinit(vc);
con_free_unimap(vc);
put_pid(vc->vt_pid);
vc_uniscr_set(vc, NULL);
kfree(vc->vc_screenbuf);
vc_cons[currcons].d = NULL;
}
return vc;
}
/*
* VT102 emulator
*/
enum { EPecma = 0, EPdec, EPeq, EPgt, EPlt};
#define set_kbd(vc, x) vt_set_kbd_mode_bit((vc)->vc_num, (x))
#define clr_kbd(vc, x) vt_clr_kbd_mode_bit((vc)->vc_num, (x))
#define is_kbd(vc, x) vt_get_kbd_mode_bit((vc)->vc_num, (x))
#define decarm VC_REPEAT
#define decckm VC_CKMODE
#define kbdapplic VC_APPLIC
#define lnm VC_CRLF
const unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
8,12,10,14, 9,13,11,15 };
EXPORT_SYMBOL(color_table);
/* the default colour table, for VGA+ colour systems */
unsigned char default_red[] = {
0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
0x55, 0xff, 0x55, 0xff, 0x55, 0xff, 0x55, 0xff
};
module_param_array(default_red, byte, NULL, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL(default_red);
unsigned char default_grn[] = {
0x00, 0x00, 0xaa, 0x55, 0x00, 0x00, 0xaa, 0xaa,
0x55, 0x55, 0xff, 0xff, 0x55, 0x55, 0xff, 0xff
};
module_param_array(default_grn, byte, NULL, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL(default_grn);
unsigned char default_blu[] = {
0x00, 0x00, 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa,
0x55, 0x55, 0x55, 0x55, 0xff, 0xff, 0xff, 0xff
};
module_param_array(default_blu, byte, NULL, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL(default_blu);
/*
* gotoxy() must verify all boundaries, because the arguments
* might also be negative. If the given position is out of
* bounds, the cursor is placed at the nearest margin.
*/
static void gotoxy(struct vc_data *vc, int new_x, int new_y)
{
int min_y, max_y;
if (new_x < 0)
vc->state.x = 0;
else {
if (new_x >= vc->vc_cols)
vc->state.x = vc->vc_cols - 1;
else
vc->state.x = new_x;
}
if (vc->vc_decom) {
min_y = vc->vc_top;
max_y = vc->vc_bottom;
} else {
min_y = 0;
max_y = vc->vc_rows;
}
if (new_y < min_y)
vc->state.y = min_y;
else if (new_y >= max_y)
vc->state.y = max_y - 1;
else
vc->state.y = new_y;
vc->vc_pos = vc->vc_origin + vc->state.y * vc->vc_size_row +
(vc->state.x << 1);
vc->vc_need_wrap = 0;
}
/* for absolute user moves, when decom is set */
static void gotoxay(struct vc_data *vc, int new_x, int new_y)
{
gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
}
void scrollback(struct vc_data *vc)
{
scrolldelta(-(vc->vc_rows / 2));
}
void scrollfront(struct vc_data *vc, int lines)
{
if (!lines)
lines = vc->vc_rows / 2;
scrolldelta(lines);
}
static void lf(struct vc_data *vc)
{
/* don't scroll if above bottom of scrolling region, or
* if below scrolling region
*/
if (vc->state.y + 1 == vc->vc_bottom)
con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1);
else if (vc->state.y < vc->vc_rows - 1) {
vc->state.y++;
vc->vc_pos += vc->vc_size_row;
}
vc->vc_need_wrap = 0;
notify_write(vc, '\n');
}
static void ri(struct vc_data *vc)
{
/* don't scroll if below top of scrolling region, or
* if above scrolling region
*/
if (vc->state.y == vc->vc_top)
con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1);
else if (vc->state.y > 0) {
vc->state.y--;
vc->vc_pos -= vc->vc_size_row;
}
vc->vc_need_wrap = 0;
}
static inline void cr(struct vc_data *vc)
{
vc->vc_pos -= vc->state.x << 1;
vc->vc_need_wrap = vc->state.x = 0;
notify_write(vc, '\r');
}
static inline void bs(struct vc_data *vc)
{
if (vc->state.x) {
vc->vc_pos -= 2;
vc->state.x--;
vc->vc_need_wrap = 0;
notify_write(vc, '\b');
}
}
static inline void del(struct vc_data *vc)
{
/* ignored */
}
static void csi_J(struct vc_data *vc, int vpar)
{
unsigned int count;
unsigned short * start;
switch (vpar) {
case 0: /* erase from cursor to end of display */
vc_uniscr_clear_line(vc, vc->state.x,
vc->vc_cols - vc->state.x);
vc_uniscr_clear_lines(vc, vc->state.y + 1,
vc->vc_rows - vc->state.y - 1);
count = (vc->vc_scr_end - vc->vc_pos) >> 1;
start = (unsigned short *)vc->vc_pos;
break;
case 1: /* erase from start to cursor */
vc_uniscr_clear_line(vc, 0, vc->state.x + 1);
vc_uniscr_clear_lines(vc, 0, vc->state.y);
count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
start = (unsigned short *)vc->vc_origin;
break;
case 3: /* include scrollback */
flush_scrollback(vc);
fallthrough;
case 2: /* erase whole display */
vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
count = vc->vc_cols * vc->vc_rows;
start = (unsigned short *)vc->vc_origin;
break;
default:
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
static void csi_K(struct vc_data *vc, int vpar)
{
unsigned int count;
unsigned short *start = (unsigned short *)vc->vc_pos;
int offset;
switch (vpar) {
case 0: /* erase from cursor to end of line */
offset = 0;
count = vc->vc_cols - vc->state.x;
break;
case 1: /* erase from start of line to cursor */
offset = -vc->state.x;
count = vc->state.x + 1;
break;
case 2: /* erase whole line */
offset = -vc->state.x;
count = vc->vc_cols;
break;
default:
return;
}
vc_uniscr_clear_line(vc, vc->state.x + offset, count);
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
do_update_region(vc, (unsigned long)(start + offset), count);
}
/* erase the following vpar positions */
static void csi_X(struct vc_data *vc, unsigned int vpar)
{ /* not vt100? */
unsigned int count;
if (!vpar)
vpar++;
count = min(vpar, vc->vc_cols - vc->state.x);
vc_uniscr_clear_line(vc, vc->state.x, count);
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count);
vc->vc_need_wrap = 0;
}
static void default_attr(struct vc_data *vc)
{
vc->state.intensity = VCI_NORMAL;
vc->state.italic = false;
vc->state.underline = false;
vc->state.reverse = false;
vc->state.blink = false;
vc->state.color = vc->vc_def_color;
}
struct rgb { u8 r; u8 g; u8 b; };
static void rgb_from_256(int i, struct rgb *c)
{
if (i < 8) { /* Standard colours. */
c->r = i&1 ? 0xaa : 0x00;
c->g = i&2 ? 0xaa : 0x00;
c->b = i&4 ? 0xaa : 0x00;
} else if (i < 16) {
c->r = i&1 ? 0xff : 0x55;
c->g = i&2 ? 0xff : 0x55;
c->b = i&4 ? 0xff : 0x55;
} else if (i < 232) { /* 6x6x6 colour cube. */
c->r = (i - 16) / 36 * 85 / 2;
c->g = (i - 16) / 6 % 6 * 85 / 2;
c->b = (i - 16) % 6 * 85 / 2;
} else /* Grayscale ramp. */
c->r = c->g = c->b = i * 10 - 2312;
}
static void rgb_foreground(struct vc_data *vc, const struct rgb *c)
{
u8 hue = 0, max = max3(c->r, c->g, c->b);
if (c->r > max / 2)
hue |= 4;
if (c->g > max / 2)
hue |= 2;
if (c->b > max / 2)
hue |= 1;
if (hue == 7 && max <= 0x55) {
hue = 0;
vc->state.intensity = VCI_BOLD;
} else if (max > 0xaa)
vc->state.intensity = VCI_BOLD;
else
vc->state.intensity = VCI_NORMAL;
vc->state.color = (vc->state.color & 0xf0) | hue;
}
static void rgb_background(struct vc_data *vc, const struct rgb *c)
{
/* For backgrounds, err on the dark side. */
vc->state.color = (vc->state.color & 0x0f)
| (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3;
}
/*
* ITU T.416 Higher colour modes. They break the usual properties of SGR codes
* and thus need to be detected and ignored by hand. That standard also
* wants : rather than ; as separators but sequences containing : are currently
* completely ignored by the parser.
*
* Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in
* supporting them.
*/
static int vc_t416_color(struct vc_data *vc, int i,
void(*set_color)(struct vc_data *vc, const struct rgb *c))
{
struct rgb c;
i++;
if (i > vc->vc_npar)
return i;
if (vc->vc_par[i] == 5 && i + 1 <= vc->vc_npar) {
/* 256 colours */
i++;
rgb_from_256(vc->vc_par[i], &c);
} else if (vc->vc_par[i] == 2 && i + 3 <= vc->vc_npar) {
/* 24 bit */
c.r = vc->vc_par[i + 1];
c.g = vc->vc_par[i + 2];
c.b = vc->vc_par[i + 3];
i += 3;
} else
return i;
set_color(vc, &c);
return i;
}
/* console_lock is held */
static void csi_m(struct vc_data *vc)
{
int i;
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
case 0: /* all attributes off */
default_attr(vc);
break;
case 1:
vc->state.intensity = VCI_BOLD;
break;
case 2:
vc->state.intensity = VCI_HALF_BRIGHT;
break;
case 3:
vc->state.italic = true;
break;
case 21:
/*
* No console drivers support double underline, so
* convert it to a single underline.
*/
case 4:
vc->state.underline = true;
break;
case 5:
vc->state.blink = true;
break;
case 7:
vc->state.reverse = true;
break;
case 10: /* ANSI X3.64-1979 (SCO-ish?)
* Select primary font, don't display control chars if
* defined, don't set bit 8 on output.
*/
vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], vc);
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
break;
case 11: /* ANSI X3.64-1979 (SCO-ish?)
* Select first alternate font, lets chars < 32 be
* displayed as ROM chars.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 0;
break;
case 12: /* ANSI X3.64-1979 (SCO-ish?)
* Select second alternate font, toggle high bit
* before displaying as ROM char.
*/
vc->vc_translate = set_translate(IBMPC_MAP, vc);
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
case 22:
vc->state.intensity = VCI_NORMAL;
break;
case 23:
vc->state.italic = false;
break;
case 24:
vc->state.underline = false;
break;
case 25:
vc->state.blink = false;
break;
case 27:
vc->state.reverse = false;
break;
case 38:
i = vc_t416_color(vc, i, rgb_foreground);
break;
case 48:
i = vc_t416_color(vc, i, rgb_background);
break;
case 39:
vc->state.color = (vc->vc_def_color & 0x0f) |
(vc->state.color & 0xf0);
break;
case 49:
vc->state.color = (vc->vc_def_color & 0xf0) |
(vc->state.color & 0x0f);
break;
default:
if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) {
if (vc->vc_par[i] < 100)
vc->state.intensity = VCI_BOLD;
vc->vc_par[i] -= 60;
}
if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
vc->state.color = color_table[vc->vc_par[i] - 30]
| (vc->state.color & 0xf0);
else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
vc->state.color = (color_table[vc->vc_par[i] - 40] << 4)
| (vc->state.color & 0x0f);
break;
}
update_attr(vc);
}
static void respond_string(const char *p, size_t len, struct tty_port *port)
{
tty_insert_flip_string(port, p, len);
tty_flip_buffer_push(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
{
char buf[40];
int len;
len = sprintf(buf, "\033[%d;%dR", vc->state.y +
(vc->vc_decom ? vc->vc_top + 1 : 1),
vc->state.x + 1);
respond_string(buf, len, tty->port);
}
static inline void status_report(struct tty_struct *tty)
{
static const char teminal_ok[] = "\033[0n";
respond_string(teminal_ok, strlen(teminal_ok), tty->port);
}
static inline void respond_ID(struct tty_struct *tty)
{
/* terminal answer to an ESC-Z or csi0c query. */
static const char vt102_id[] = "\033[?6c";
respond_string(vt102_id, strlen(vt102_id), tty->port);
}
void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
{
char buf[8];
int len;
len = sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt),
(char)('!' + mrx), (char)('!' + mry));
respond_string(buf, len, tty->port);
}
/* invoked via ioctl(TIOCLINUX) and through set_selection_user */
int mouse_reporting(void)
{
return vc_cons[fg_console].d->vc_report_mouse;
}
/* console_lock is held */
static void set_mode(struct vc_data *vc, int on_off)
{
int i;
for (i = 0; i <= vc->vc_npar; i++)
if (vc->vc_priv == EPdec) {
switch(vc->vc_par[i]) { /* DEC private modes set/reset */
case 1: /* Cursor keys send ^[Ox/^[[x */
if (on_off)
set_kbd(vc, decckm);
else
clr_kbd(vc, decckm);
break;
case 3: /* 80/132 mode switch unimplemented */
#if 0
vc_resize(deccolm ? 132 : 80, vc->vc_rows);
/* this alone does not suffice; some user mode
utility has to change the hardware regs */
#endif
break;
case 5: /* Inverted screen on/off */
if (vc->vc_decscnm != on_off) {
vc->vc_decscnm = on_off;
invert_screen(vc, 0,
vc->vc_screenbuf_size,
false);
update_attr(vc);
}
break;
case 6: /* Origin relative/absolute */
vc->vc_decom = on_off;
gotoxay(vc, 0, 0);
break;
case 7: /* Autowrap on/off */
vc->vc_decawm = on_off;
break;
case 8: /* Autorepeat on/off */
if (on_off)
set_kbd(vc, decarm);
else
clr_kbd(vc, decarm);
break;
case 9:
vc->vc_report_mouse = on_off ? 1 : 0;
break;
case 25: /* Cursor on/off */
vc->vc_deccm = on_off;
break;
case 1000:
vc->vc_report_mouse = on_off ? 2 : 0;
break;
}
} else {
switch(vc->vc_par[i]) { /* ANSI modes set/reset */
case 3: /* Monitor (display ctrls) */
vc->vc_disp_ctrl = on_off;
break;
case 4: /* Insert Mode on/off */
vc->vc_decim = on_off;
break;
case 20: /* Lf, Enter == CrLf/Lf */
if (on_off)
set_kbd(vc, lnm);
else
clr_kbd(vc, lnm);
break;
}
}
}
/* console_lock is held */
static void setterm_command(struct vc_data *vc)
{
switch (vc->vc_par[0]) {
case 1: /* set color for underline mode */
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_ulcolor = color_table[vc->vc_par[1]];
if (vc->state.underline)
update_attr(vc);
}
break;
case 2: /* set color for half intensity mode */
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_halfcolor = color_table[vc->vc_par[1]];
if (vc->state.intensity == VCI_HALF_BRIGHT)
update_attr(vc);
}
break;
case 8: /* store colors as defaults */
vc->vc_def_color = vc->vc_attr;
if (vc->vc_hi_font_mask == 0x100)
vc->vc_def_color >>= 1;
default_attr(vc);
update_attr(vc);
break;
case 9: /* set blanking interval */
blankinterval = min(vc->vc_par[1], 60U) * 60;
poke_blanked_console();
break;
case 10: /* set bell frequency in Hz */
if (vc->vc_npar >= 1)
vc->vc_bell_pitch = vc->vc_par[1];
else
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
break;
case 11: /* set bell duration in msec */
if (vc->vc_npar >= 1)
vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
msecs_to_jiffies(vc->vc_par[1]) : 0;
else
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
break;
case 12: /* bring specified console to the front */
if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
set_console(vc->vc_par[1] - 1);
break;
case 13: /* unblank the screen */
poke_blanked_console();
break;
case 14: /* set vesa powerdown interval */
vesa_off_interval = min(vc->vc_par[1], 60U) * 60 * HZ;
break;
case 15: /* activate the previous console */
set_console(last_console);
break;
case 16: /* set cursor blink duration in msec */
if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
vc->vc_par[1] <= USHRT_MAX)
vc->vc_cur_blink_ms = vc->vc_par[1];
else
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
break;
}
}
/* console_lock is held */
static void csi_at(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->state.x)
nr = vc->vc_cols - vc->state.x;
else if (!nr)
nr = 1;
insert_char(vc, nr);
}
/* console_lock is held */
static void csi_L(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->state.y)
nr = vc->vc_rows - vc->state.y;
else if (!nr)
nr = 1;
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held */
static void csi_P(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->state.x)
nr = vc->vc_cols - vc->state.x;
else if (!nr)
nr = 1;
delete_char(vc, nr);
}
/* console_lock is held */
static void csi_M(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->state.y)
nr = vc->vc_rows - vc->state.y;
else if (!nr)
nr=1;
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held (except via vc_init->reset_terminal */
static void save_cur(struct vc_data *vc)
{
memcpy(&vc->saved_state, &vc->state, sizeof(vc->state));
}
/* console_lock is held */
static void restore_cur(struct vc_data *vc)
{
memcpy(&vc->state, &vc->saved_state, sizeof(vc->state));
gotoxy(vc, vc->state.x, vc->state.y);
vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset],
vc);
update_attr(vc);
vc->vc_need_wrap = 0;
}
enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
ESpalette, ESosc, ESapc, ESpm, ESdcs };
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
{
unsigned int i;
vc->vc_top = 0;
vc->vc_bottom = vc->vc_rows;
vc->vc_state = ESnormal;
vc->vc_priv = EPecma;
vc->vc_translate = set_translate(LAT1_MAP, vc);
vc->state.Gx_charset[0] = LAT1_MAP;
vc->state.Gx_charset[1] = GRAF_MAP;
vc->state.charset = 0;
vc->vc_need_wrap = 0;
vc->vc_report_mouse = 0;
vc->vc_utf = default_utf8;
vc->vc_utf_count = 0;
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
vc->vc_decscnm = 0;
vc->vc_decom = 0;
vc->vc_decawm = 1;
vc->vc_deccm = global_cursor_default;
vc->vc_decim = 0;
vt_reset_keyboard(vc->vc_num);
vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
update_attr(vc);
bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
for (i = 0; i < VC_TABSTOPS_COUNT; i += 8)
set_bit(i, vc->vc_tab_stop);
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
gotoxy(vc, 0, 0);
save_cur(vc);
if (do_clear)
csi_J(vc, 2);
}
static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
{
unsigned char *charset = &vc->state.Gx_charset[which];
switch (c) {
case '0':
*charset = GRAF_MAP;
break;
case 'B':
*charset = LAT1_MAP;
break;
case 'U':
*charset = IBMPC_MAP;
break;
case 'K':
*charset = USER_MAP;
break;
}
if (vc->state.charset == which)
vc->vc_translate = set_translate(*charset, vc);
}
/* is this state an ANSI control string? */
static bool ansi_control_string(unsigned int state)
{
if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
return true;
return false;
}
/* console_lock is held */
static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
{
/*
* Control characters can be used in the _middle_
* of an escape sequence, aside from ANSI control strings.
*/
if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
return;
switch (c) {
case 0:
return;
case 7:
if (ansi_control_string(vc->vc_state))
vc->vc_state = ESnormal;
else if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
return;
case 8:
bs(vc);
return;
case 9:
vc->vc_pos -= (vc->state.x << 1);
vc->state.x = find_next_bit(vc->vc_tab_stop,
min(vc->vc_cols - 1, VC_TABSTOPS_COUNT),
vc->state.x + 1);
if (vc->state.x >= VC_TABSTOPS_COUNT)
vc->state.x = vc->vc_cols - 1;
vc->vc_pos += (vc->state.x << 1);
notify_write(vc, '\t');
return;
case 10: case 11: case 12:
lf(vc);
if (!is_kbd(vc, lnm))
return;
fallthrough;
case 13:
cr(vc);
return;
case 14:
vc->state.charset = 1;
vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc);
vc->vc_disp_ctrl = 1;
return;
case 15:
vc->state.charset = 0;
vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc);
vc->vc_disp_ctrl = 0;
return;
case 24: case 26:
vc->vc_state = ESnormal;
return;
case 27:
vc->vc_state = ESesc;
return;
case 127:
del(vc);
return;
case 128+27:
vc->vc_state = ESsquare;
return;
}
switch(vc->vc_state) {
case ESesc:
vc->vc_state = ESnormal;
switch (c) {
case '[':
vc->vc_state = ESsquare;
return;
case ']':
vc->vc_state = ESnonstd;
return;
case '_':
vc->vc_state = ESapc;
return;
case '^':
vc->vc_state = ESpm;
return;
case '%':
vc->vc_state = ESpercent;
return;
case 'E':
cr(vc);
lf(vc);
return;
case 'M':
ri(vc);
return;
case 'D':
lf(vc);
return;
case 'H':
if (vc->state.x < VC_TABSTOPS_COUNT)
set_bit(vc->state.x, vc->vc_tab_stop);
return;
case 'P':
vc->vc_state = ESdcs;
return;
case 'Z':
respond_ID(tty);
return;
case '7':
save_cur(vc);
return;
case '8':
restore_cur(vc);
return;
case '(':
vc->vc_state = ESsetG0;
return;
case ')':
vc->vc_state = ESsetG1;
return;
case '#':
vc->vc_state = EShash;
return;
case 'c':
reset_terminal(vc, 1);
return;
case '>': /* Numeric keypad */
clr_kbd(vc, kbdapplic);
return;
case '=': /* Appl. keypad */
set_kbd(vc, kbdapplic);
return;
}
return;
case ESnonstd:
if (c=='P') { /* palette escape sequence */
for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
vc->vc_par[vc->vc_npar] = 0;
vc->vc_npar = 0;
vc->vc_state = ESpalette;
return;
} else if (c=='R') { /* reset palette */
reset_palette(vc);
vc->vc_state = ESnormal;
} else if (c>='0' && c<='9')
vc->vc_state = ESosc;
else
vc->vc_state = ESnormal;
return;
case ESpalette:
if (isxdigit(c)) {
vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
if (vc->vc_npar == 7) {
int i = vc->vc_par[0] * 3, j = 1;
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i++] += vc->vc_par[j++];
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i++] += vc->vc_par[j++];
vc->vc_palette[i] = 16 * vc->vc_par[j++];
vc->vc_palette[i] += vc->vc_par[j];
set_palette(vc);
vc->vc_state = ESnormal;
}
} else
vc->vc_state = ESnormal;
return;
case ESsquare:
for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
vc->vc_par[vc->vc_npar] = 0;
vc->vc_npar = 0;
vc->vc_state = ESgetpars;
if (c == '[') { /* Function key */
vc->vc_state=ESfunckey;
return;
}
switch (c) {
case '?':
vc->vc_priv = EPdec;
return;
case '>':
vc->vc_priv = EPgt;
return;
case '=':
vc->vc_priv = EPeq;
return;
case '<':
vc->vc_priv = EPlt;
return;
}
vc->vc_priv = EPecma;
fallthrough;
case ESgetpars:
if (c == ';' && vc->vc_npar < NPAR - 1) {
vc->vc_npar++;
return;
} else if (c>='0' && c<='9') {
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
}
if (c >= 0x20 && c <= 0x3f) { /* 0x2x, 0x3a and 0x3c - 0x3f */
vc->vc_state = EScsiignore;
return;
}
vc->vc_state = ESnormal;
switch(c) {
case 'h':
if (vc->vc_priv <= EPdec)
set_mode(vc, 1);
return;
case 'l':
if (vc->vc_priv <= EPdec)
set_mode(vc, 0);
return;
case 'c':
if (vc->vc_priv == EPdec) {
if (vc->vc_par[0])
vc->vc_cursor_type =
CUR_MAKE(vc->vc_par[0],
vc->vc_par[1],
vc->vc_par[2]);
else
vc->vc_cursor_type = cur_default;
return;
}
break;
case 'm':
if (vc->vc_priv == EPdec) {
clear_selection();
if (vc->vc_par[0])
vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
else
vc->vc_complement_mask = vc->vc_s_complement_mask;
return;
}
break;
case 'n':
if (vc->vc_priv == EPecma) {
if (vc->vc_par[0] == 5)
status_report(tty);
else if (vc->vc_par[0] == 6)
cursor_report(vc, tty);
}
return;
}
if (vc->vc_priv != EPecma) {
vc->vc_priv = EPecma;
return;
}
switch(c) {
case 'G': case '`':
if (vc->vc_par[0])
vc->vc_par[0]--;
gotoxy(vc, vc->vc_par[0], vc->state.y);
return;
case 'A':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]);
return;
case 'B': case 'e':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]);
return;
case 'C': case 'a':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y);
return;
case 'D':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y);
return;
case 'E':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, 0, vc->state.y + vc->vc_par[0]);
return;
case 'F':
if (!vc->vc_par[0])
vc->vc_par[0]++;
gotoxy(vc, 0, vc->state.y - vc->vc_par[0]);
return;
case 'd':
if (vc->vc_par[0])
vc->vc_par[0]--;
gotoxay(vc, vc->state.x ,vc->vc_par[0]);
return;
case 'H': case 'f':
if (vc->vc_par[0])
vc->vc_par[0]--;
if (vc->vc_par[1])
vc->vc_par[1]--;
gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
return;
case 'J':
csi_J(vc, vc->vc_par[0]);
return;
case 'K':
csi_K(vc, vc->vc_par[0]);
return;
case 'L':
csi_L(vc, vc->vc_par[0]);
return;
case 'M':
csi_M(vc, vc->vc_par[0]);
return;
case 'P':
csi_P(vc, vc->vc_par[0]);
return;
case 'c':
if (!vc->vc_par[0])
respond_ID(tty);
return;
case 'g':
if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT)
set_bit(vc->state.x, vc->vc_tab_stop);
else if (vc->vc_par[0] == 3)
bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
return;
case 'm':
csi_m(vc);
return;
case 'q': /* DECLL - but only 3 leds */
/* map 0,1,2,3 to 0,1,2,4 */
if (vc->vc_par[0] < 4)
vt_set_led_state(vc->vc_num,
(vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
return;
case 'r':
if (!vc->vc_par[0])
vc->vc_par[0]++;
if (!vc->vc_par[1])
vc->vc_par[1] = vc->vc_rows;
/* Minimum allowed region is 2 lines */
if (vc->vc_par[0] < vc->vc_par[1] &&
vc->vc_par[1] <= vc->vc_rows) {
vc->vc_top = vc->vc_par[0] - 1;
vc->vc_bottom = vc->vc_par[1];
gotoxay(vc, 0, 0);
}
return;
case 's':
save_cur(vc);
return;
case 'u':
restore_cur(vc);
return;
case 'X':
csi_X(vc, vc->vc_par[0]);
return;
case '@':
csi_at(vc, vc->vc_par[0]);
return;
case ']': /* setterm functions */
setterm_command(vc);
return;
}
return;
case EScsiignore:
if (c >= 20 && c <= 0x3f)
return;
vc->vc_state = ESnormal;
return;
case ESpercent:
vc->vc_state = ESnormal;
switch (c) {
case '@': /* defined in ISO 2022 */
vc->vc_utf = 0;
return;
case 'G': /* prelim official escape code */
case '8': /* retained for compatibility */
vc->vc_utf = 1;
return;
}
return;
case ESfunckey:
vc->vc_state = ESnormal;
return;
case EShash:
vc->vc_state = ESnormal;
if (c == '8') {
/* DEC screen alignment test. kludge :-) */
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | 'E';
csi_J(vc, 2);
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | ' ';
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
return;
case ESsetG0:
vc_setGx(vc, 0, c);
vc->vc_state = ESnormal;
return;
case ESsetG1:
vc_setGx(vc, 1, c);
vc->vc_state = ESnormal;
return;
case ESapc:
return;
case ESosc:
return;
case ESpm:
return;
case ESdcs:
return;
default:
vc->vc_state = ESnormal;
}
}
/* is_double_width() is based on the wcwidth() implementation by
* Markus Kuhn -- 2007-05-26 (Unicode 5.0)
* Latest version: https://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
*/
struct interval {
uint32_t first;
uint32_t last;
};
static int ucs_cmp(const void *key, const void *elt)
{
uint32_t ucs = *(uint32_t *)key;
struct interval e = *(struct interval *) elt;
if (ucs > e.last)
return 1;
else if (ucs < e.first)
return -1;
return 0;
}
static int is_double_width(uint32_t ucs)
{
static const struct interval double_width[] = {
{ 0x1100, 0x115F }, { 0x2329, 0x232A }, { 0x2E80, 0x303E },
{ 0x3040, 0xA4CF }, { 0xAC00, 0xD7A3 }, { 0xF900, 0xFAFF },
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
if (ucs < double_width[0].first ||
ucs > double_width[ARRAY_SIZE(double_width) - 1].last)
return 0;
return bsearch(&ucs, double_width, ARRAY_SIZE(double_width),
sizeof(struct interval), ucs_cmp) != NULL;
}
struct vc_draw_region {
unsigned long from, to;
int x;
};
static void con_flush(struct vc_data *vc, struct vc_draw_region *draw)
{
if (draw->x < 0)
return;
vc->vc_sw->con_putcs(vc, (u16 *)draw->from,
(u16 *)draw->to - (u16 *)draw->from, vc->state.y,
draw->x);
draw->x = -1;
}
static inline int vc_translate_ascii(const struct vc_data *vc, int c)
{
if (IS_ENABLED(CONFIG_CONSOLE_TRANSLATIONS)) {
if (vc->vc_toggle_meta)
c |= 0x80;
return vc->vc_translate[c];
}
return c;
}
/**
* vc_sanitize_unicode -- Replace invalid Unicode code points with U+FFFD
* @c: the received character, or U+FFFD for invalid sequences.
*/
static inline int vc_sanitize_unicode(const int c)
{
if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff)
return 0xfffd;
return c;
}
/**
* vc_translate_unicode -- Combine UTF-8 into Unicode in @vc_utf_char
* @vc: virtual console
* @c: character to translate
* @rescan: we return true if we need more (continuation) data
*
* @vc_utf_char is the being-constructed unicode character.
* @vc_utf_count is the number of continuation bytes still expected to arrive.
* @vc_npar is the number of continuation bytes arrived so far.
*/
static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
{
static const u32 utf8_length_changes[] = {
0x0000007f, 0x000007ff, 0x0000ffff,
0x001fffff, 0x03ffffff, 0x7fffffff
};
/* Continuation byte received */
if ((c & 0xc0) == 0x80) {
/* Unexpected continuation byte? */
if (!vc->vc_utf_count)
return 0xfffd;
vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f);
vc->vc_npar++;
if (--vc->vc_utf_count)
goto need_more_bytes;
/* Got a whole character */
c = vc->vc_utf_char;
/* Reject overlong sequences */
if (c <= utf8_length_changes[vc->vc_npar - 1] ||
c > utf8_length_changes[vc->vc_npar])
return 0xfffd;
return vc_sanitize_unicode(c);
}
/* Single ASCII byte or first byte of a sequence received */
if (vc->vc_utf_count) {
/* Continuation byte expected */
*rescan = true;
vc->vc_utf_count = 0;
return 0xfffd;
}
/* Nothing to do if an ASCII byte was received */
if (c <= 0x7f)
return c;
/* First byte of a multibyte sequence received */
vc->vc_npar = 0;
if ((c & 0xe0) == 0xc0) {
vc->vc_utf_count = 1;
vc->vc_utf_char = (c & 0x1f);
} else if ((c & 0xf0) == 0xe0) {
vc->vc_utf_count = 2;
vc->vc_utf_char = (c & 0x0f);
} else if ((c & 0xf8) == 0xf0) {
vc->vc_utf_count = 3;
vc->vc_utf_char = (c & 0x07);
} else if ((c & 0xfc) == 0xf8) {
vc->vc_utf_count = 4;
vc->vc_utf_char = (c & 0x03);
} else if ((c & 0xfe) == 0xfc) {
vc->vc_utf_count = 5;
vc->vc_utf_char = (c & 0x01);
} else {
/* 254 and 255 are invalid */
return 0xfffd;
}
need_more_bytes:
return -1;
}
static int vc_translate(struct vc_data *vc, int *c, bool *rescan)
{
/* Do no translation at all in control states */
if (vc->vc_state != ESnormal)
return *c;
if (vc->vc_utf && !vc->vc_disp_ctrl)
return *c = vc_translate_unicode(vc, *c, rescan);
/* no utf or alternate charset mode */
return vc_translate_ascii(vc, *c);
}
static inline unsigned char vc_invert_attr(const struct vc_data *vc)
{
if (!vc->vc_can_do_color)
return vc->vc_attr ^ 0x08;
if (vc->vc_hi_font_mask == 0x100)
return (vc->vc_attr & 0x11) |
((vc->vc_attr & 0xe0) >> 4) |
((vc->vc_attr & 0x0e) << 4);
return (vc->vc_attr & 0x88) |
((vc->vc_attr & 0x70) >> 4) |
((vc->vc_attr & 0x07) << 4);
}
static bool vc_is_control(struct vc_data *vc, int tc, int c)
{
/*
* A bitmap for codes <32. A bit of 1 indicates that the code
* corresponding to that bit number invokes some special action (such
* as cursor movement) and should not be displayed as a glyph unless
* the disp_ctrl mode is explicitly enabled.
*/
static const u32 CTRL_ACTION = 0x0d00ff81;
/* Cannot be overridden by disp_ctrl */
static const u32 CTRL_ALWAYS = 0x0800f501;
if (vc->vc_state != ESnormal)
return true;
if (!tc)
return true;
/*
* If the original code was a control character we only allow a glyph
* to be displayed if the code is not normally used (such as for cursor
* movement) or if the disp_ctrl mode has been explicitly enabled.
* Certain characters (as given by the CTRL_ALWAYS bitmap) are always
* displayed as control characters, as the console would be pretty
* useless without them; to display an arbitrary font position use the
* direct-to-font zone in UTF-8 mode.
*/
if (c < 32) {
if (vc->vc_disp_ctrl)
return CTRL_ALWAYS & BIT(c);
else
return vc->vc_utf || (CTRL_ACTION & BIT(c));
}
if (c == 127 && !vc->vc_disp_ctrl)
return true;
if (c == 128 + 27)
return true;
return false;
}
static int vc_con_write_normal(struct vc_data *vc, int tc, int c,
struct vc_draw_region *draw)
{
int next_c;
unsigned char vc_attr = vc->vc_attr;
u16 himask = vc->vc_hi_font_mask, charmask = himask ? 0x1ff : 0xff;
u8 width = 1;
bool inverse = false;
if (vc->vc_utf && !vc->vc_disp_ctrl) {
if (is_double_width(c))
width = 2;
}
/* Now try to find out how to display it */
tc = conv_uni_to_pc(vc, tc);
if (tc & ~charmask) {
if (tc == -1 || tc == -2)
return -1; /* nothing to display */
/* Glyph not found */
if ((!vc->vc_utf || vc->vc_disp_ctrl || c < 128) &&
!(c & ~charmask)) {
/*
* In legacy mode use the glyph we get by a 1:1
* mapping.
* This would make absolutely no sense with Unicode in
* mind, but do this for ASCII characters since a font
* may lack Unicode mapping info and we don't want to
* end up with having question marks only.
*/
tc = c;
} else {
/*
* Display U+FFFD. If it's not found, display an inverse
* question mark.
*/
tc = conv_uni_to_pc(vc, 0xfffd);
if (tc < 0) {
inverse = true;
tc = conv_uni_to_pc(vc, '?');
if (tc < 0)
tc = '?';
vc_attr = vc_invert_attr(vc);
con_flush(vc, draw);
}
}
}
next_c = c;
while (1) {
if (vc->vc_need_wrap || vc->vc_decim)
con_flush(vc, draw);
if (vc->vc_need_wrap) {
cr(vc);
lf(vc);
}
if (vc->vc_decim)
insert_char(vc, 1);
vc_uniscr_putc(vc, next_c);
if (himask)
tc = ((tc & 0x100) ? himask : 0) |
(tc & 0xff);
tc |= (vc_attr << 8) & ~himask;
scr_writew(tc, (u16 *)vc->vc_pos);
if (con_should_update(vc) && draw->x < 0) {
draw->x = vc->state.x;
draw->from = vc->vc_pos;
}
if (vc->state.x == vc->vc_cols - 1) {
vc->vc_need_wrap = vc->vc_decawm;
draw->to = vc->vc_pos + 2;
} else {
vc->state.x++;
draw->to = (vc->vc_pos += 2);
}
if (!--width)
break;
/* A space is printed in the second column */
tc = conv_uni_to_pc(vc, ' ');
if (tc < 0)
tc = ' ';
next_c = ' ';
}
notify_write(vc, c);
if (inverse)
con_flush(vc, draw);
return 0;
}
/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
{
struct vc_draw_region draw = {
.x = -1,
};
int c, tc, n = 0;
unsigned int currcons;
struct vc_data *vc;
struct vt_notifier_param param;
bool rescan;
if (in_interrupt())
return count;
console_lock();
vc = tty->driver_data;
if (vc == NULL) {
pr_err("vt: argh, driver_data is NULL !\n");
console_unlock();
return 0;
}
currcons = vc->vc_num;
if (!vc_cons_allocated(currcons)) {
/* could this happen? */
pr_warn_once("con_write: tty %d not allocated\n", currcons+1);
console_unlock();
return 0;
}
/* undraw cursor first */
if (con_is_fg(vc))
hide_cursor(vc);
param.vc = vc;
while (!tty->flow.stopped && count) {
int orig = *buf;
buf++;
n++;
count--;
rescan_last_byte:
c = orig;
rescan = false;
tc = vc_translate(vc, &c, &rescan);
if (tc == -1)
continue;
param.c = tc;
if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
¶m) == NOTIFY_STOP)
continue;
if (vc_is_control(vc, tc, c)) {
con_flush(vc, &draw);
do_con_trol(tty, vc, orig);
continue;
}
if (vc_con_write_normal(vc, tc, c, &draw) < 0)
continue;
if (rescan)
goto rescan_last_byte;
}
con_flush(vc, &draw);
console_conditional_schedule();
notify_update(vc);
console_unlock();
return n;
}
/*
* This is the console switching callback.
*
* Doing console switching in a process context allows
* us to do the switches asynchronously (needed when we want
* to switch due to a keyboard interrupt). Synchronization
* with other console code and prevention of re-entrancy is
* ensured with console_lock.
*/
static void console_callback(struct work_struct *ignored)
{
console_lock();
if (want_console >= 0) {
if (want_console != fg_console &&
vc_cons_allocated(want_console)) {
hide_cursor(vc_cons[fg_console].d);
change_console(vc_cons[want_console].d);
/* we only changed when the console had already
been allocated - a new console is not created
in an interrupt routine */
}
want_console = -1;
}
if (do_poke_blanked_console) { /* do not unblank for a LED change */
do_poke_blanked_console = 0;
poke_blanked_console();
}
if (scrollback_delta) {
struct vc_data *vc = vc_cons[fg_console].d;
clear_selection();
if (vc->vc_mode == KD_TEXT && vc->vc_sw->con_scrolldelta)
vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
scrollback_delta = 0;
}
if (blank_timer_expired) {
do_blank_screen(0);
blank_timer_expired = 0;
}
notify_update(vc_cons[fg_console].d);
console_unlock();
}
int set_console(int nr)
{
struct vc_data *vc = vc_cons[fg_console].d;
if (!vc_cons_allocated(nr) || vt_dont_switch ||
(vc->vt_mode.mode == VT_AUTO && vc->vc_mode == KD_GRAPHICS)) {
/*
* Console switch will fail in console_callback() or
* change_console() so there is no point scheduling
* the callback
*
* Existing set_console() users don't check the return
* value so this shouldn't break anything
*/
return -EINVAL;
}
want_console = nr;
schedule_console_callback();
return 0;
}
struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
/**
* vt_kmsg_redirect() - Sets/gets the kernel message console
* @new: The new virtual terminal number or -1 if the console should stay
* unchanged
*
* By default, the kernel messages are always printed on the current virtual
* console. However, the user may modify that default with the
* TIOCL_SETKMSGREDIRECT ioctl call.
*
* This function sets the kernel message console to be @new. It returns the old
* virtual console number. The virtual terminal number 0 (both as parameter and
* return value) means no redirection (i.e. always printed on the currently
* active console).
*
* The parameter -1 means that only the current console is returned, but the
* value is not modified. You may use the macro vt_get_kmsg_redirect() in that
* case to make the code more understandable.
*
* When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
* the parameter and always returns 0.
*/
int vt_kmsg_redirect(int new)
{
static int kmsg_con;
if (new != -1)
return xchg(&kmsg_con, new);
else
return kmsg_con;
}
/*
* Console on virtual terminal
*
* The console must be locked when we get here.
*/
static void vt_console_print(struct console *co, const char *b, unsigned count)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned char c;
static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
ushort start_x, cnt;
int kmsg_console;
WARN_CONSOLE_UNLOCKED();
/* this protects against concurrent oops only */
if (!spin_trylock(&printing_lock))
return;
kmsg_console = vt_get_kmsg_redirect();
if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
vc = vc_cons[kmsg_console - 1].d;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
goto quit;
}
if (vc->vc_mode != KD_TEXT)
goto quit;
/* undraw cursor first */
if (con_is_fg(vc))
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
cnt = 0;
while (count--) {
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
cnt = 0;
if (c == 8) { /* backspace */
bs(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
continue;
}
if (c != 13)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
if (c == 10 || c == 13)
continue;
}
vc_uniscr_putc(vc, c);
scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
notify_write(vc, c);
cnt++;
if (vc->state.x == vc->vc_cols - 1) {
vc->vc_need_wrap = 1;
} else {
vc->vc_pos += 2;
vc->state.x++;
}
}
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
set_cursor(vc);
notify_update(vc);
quit:
spin_unlock(&printing_lock);
}
static struct tty_driver *vt_console_device(struct console *c, int *index)
{
*index = c->index ? c->index-1 : fg_console;
return console_driver;
}
static int vt_console_setup(struct console *co, char *options)
{
return co->index >= MAX_NR_CONSOLES ? -EINVAL : 0;
}
static struct console vt_console_driver = {
.name = "tty",
.setup = vt_console_setup,
.write = vt_console_print,
.device = vt_console_device,
.unblank = unblank_screen,
.flags = CON_PRINTBUFFER,
.index = -1,
};
#endif
/*
* Handling of Linux-specific VC ioctls
*/
/*
* Generally a bit racy with respect to console_lock();.
*
* There are some functions which don't need it.
*
* There are some functions which can sleep for arbitrary periods
* (paste_selection) but we don't need the lock there anyway.
*
* set_selection_user has locking, and definitely needs it
*/
int tioclinux(struct tty_struct *tty, unsigned long arg)
{
char type, data;
char __user *p = (char __user *)arg;
int lines;
int ret;
if (current->signal->tty != tty && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(type, p))
return -EFAULT;
ret = 0;
switch (type) {
case TIOCL_SETSEL:
return set_selection_user((struct tiocl_selection
__user *)(p+1), tty);
case TIOCL_PASTESEL:
return paste_selection(tty);
case TIOCL_UNBLANKSCREEN:
console_lock();
unblank_screen();
console_unlock();
break;
case TIOCL_SELLOADLUT:
console_lock();
ret = sel_loadlut(p);
console_unlock();
break;
case TIOCL_GETSHIFTSTATE:
/*
* Make it possible to react to Shift+Mousebutton. Note that
* 'shift_state' is an undocumented kernel-internal variable;
* programs not closely related to the kernel should not use
* this.
*/
data = vt_get_shift_state();
return put_user(data, p);
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
return put_user(data, p);
case TIOCL_SETVESABLANK:
console_lock();
ret = set_vesa_blanking(p);
console_unlock();
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
return put_user(data, p);
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(data, p+1))
return -EFAULT;
vt_kmsg_redirect(data);
break;
case TIOCL_GETFGCONSOLE:
/*
* No locking needed as this is a transiently correct return
* anyway if the caller hasn't disabled switching.
*/
return fg_console;
case TIOCL_SCROLLCONSOLE:
if (get_user(lines, (s32 __user *)(p+4)))
return -EFAULT;
/*
* Needs the console lock here. Note that lots of other calls
* need fixing before the lock is actually useful!
*/
console_lock();
scrollfront(vc_cons[fg_console].d, lines);
console_unlock();
break;
case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
console_lock();
ignore_poke = 1;
do_blank_screen(0);
console_unlock();
break;
case TIOCL_BLANKEDSCREEN:
return console_blanked;
default:
return -EINVAL;
}
return ret;
}
/*
* /dev/ttyN handling
*/
static ssize_t con_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
int retval;
retval = do_con_write(tty, buf, count);
con_flush_chars(tty);
return retval;
}
static int con_put_char(struct tty_struct *tty, u8 ch)
{
return do_con_write(tty, &ch, 1);
}
static unsigned int con_write_room(struct tty_struct *tty)
{
if (tty->flow.stopped)
return 0;
return 32768; /* No limit, really; we're not buffering */
}
/*
* con_throttle and con_unthrottle are only used for
* paste_selection(), which has to stuff in a large number of
* characters...
*/
static void con_throttle(struct tty_struct *tty)
{
}
static void con_unthrottle(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
wake_up_interruptible(&vc->paste_wait);
}
/*
* Turn the Scroll-Lock LED on when the tty is stopped
*/
static void con_stop(struct tty_struct *tty)
{
int console_num;
if (!tty)
return;
console_num = tty->index;
if (!vc_cons_allocated(console_num))
return;
vt_kbd_con_stop(console_num);
}
/*
* Turn the Scroll-Lock LED off when the console is started
*/
static void con_start(struct tty_struct *tty)
{
int console_num;
if (!tty)
return;
console_num = tty->index;
if (!vc_cons_allocated(console_num))
return;
vt_kbd_con_start(console_num);
}
static void con_flush_chars(struct tty_struct *tty)
{
struct vc_data *vc;
if (in_interrupt()) /* from flush_to_ldisc */
return;
/* if we race with con_close(), vt may be null */
console_lock();
vc = tty->driver_data;
if (vc)
set_cursor(vc);
console_unlock();
}
/*
* Allocate the console screen memory.
*/
static int con_install(struct tty_driver *driver, struct tty_struct *tty)
{
unsigned int currcons = tty->index;
struct vc_data *vc;
int ret;
console_lock();
ret = vc_allocate(currcons);
if (ret)
goto unlock;
vc = vc_cons[currcons].d;
/* Still being freed */
if (vc->port.tty) {
ret = -ERESTARTSYS;
goto unlock;
}
ret = tty_port_install(&vc->port, driver, tty);
if (ret)
goto unlock;
tty->driver_data = vc;
vc->port.tty = tty;
tty_port_get(&vc->port);
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
}
if (vc->vc_utf)
tty->termios.c_iflag |= IUTF8;
else
tty->termios.c_iflag &= ~IUTF8;
unlock:
console_unlock();
return ret;
}
static int con_open(struct tty_struct *tty, struct file *filp)
{
/* everything done in install */
return 0;
}
static void con_close(struct tty_struct *tty, struct file *filp)
{
/* Nothing to do - we defer to shutdown */
}
static void con_shutdown(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
BUG_ON(vc == NULL);
console_lock();
vc->port.tty = NULL;
console_unlock();
}
static void con_cleanup(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
tty_port_put(&vc->port);
}
static int default_color = 7; /* white */
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
module_param_named(color, default_color, int, S_IRUGO | S_IWUSR);
module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR);
module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR);
static void vc_init(struct vc_data *vc, int do_clear)
{
int j, k ;
set_origin(vc);
vc->vc_pos = vc->vc_origin;
reset_vc(vc);
for (j=k=0; j<16; j++) {
vc->vc_palette[k++] = default_red[j] ;
vc->vc_palette[k++] = default_grn[j] ;
vc->vc_palette[k++] = default_blu[j] ;
}
vc->vc_def_color = default_color;
vc->vc_ulcolor = default_underline_color;
vc->vc_itcolor = default_italic_color;
vc->vc_halfcolor = 0x08; /* grey */
init_waitqueue_head(&vc->paste_wait);
reset_terminal(vc, do_clear);
}
/*
* This routine initializes console interrupts, and does nothing
* else. If you want the screen to clear, call tty_write with
* the appropriate escape-sequence.
*/
static int __init con_init(void)
{
const char *display_desc = NULL;
struct vc_data *vc;
unsigned int currcons = 0, i;
console_lock();
if (!conswitchp)
conswitchp = &dummy_con;
display_desc = conswitchp->con_startup();
if (!display_desc) {
fg_console = 0;
console_unlock();
return 0;
}
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = ®istered_con_driver[i];
if (con_driver->con == NULL) {
con_driver->con = conswitchp;
con_driver->desc = display_desc;
con_driver->flag = CON_DRIVER_FLAG_INIT;
con_driver->first = 0;
con_driver->last = MAX_NR_CONSOLES - 1;
break;
}
}
for (i = 0; i < MAX_NR_CONSOLES; i++)
con_driver_map[i] = conswitchp;
if (blankinterval) {
blank_state = blank_normal_wait;
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
}
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
/* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, currcons || !vc->vc_sw->con_save_screen);
}
currcons = fg_console = 0;
master_display_fg = vc = vc_cons[currcons].d;
set_origin(vc);
save_screen(vc);
gotoxy(vc, vc->state.x, vc->state.y);
csi_J(vc, 0);
update_screen(vc);
pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
display_desc, vc->vc_cols, vc->vc_rows);
console_unlock();
#ifdef CONFIG_VT_CONSOLE
register_console(&vt_console_driver);
#endif
return 0;
}
console_initcall(con_init);
static const struct tty_operations con_ops = {
.install = con_install,
.open = con_open,
.close = con_close,
.write = con_write,
.write_room = con_write_room,
.put_char = con_put_char,
.flush_chars = con_flush_chars,
.ioctl = vt_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vt_compat_ioctl,
#endif
.stop = con_stop,
.start = con_start,
.throttle = con_throttle,
.unthrottle = con_unthrottle,
.resize = vt_resize,
.shutdown = con_shutdown,
.cleanup = con_cleanup,
};
static struct cdev vc0_cdev;
static ssize_t show_tty_active(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "tty%d\n", fg_console + 1);
}
static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL);
static struct attribute *vt_dev_attrs[] = {
&dev_attr_active.attr,
NULL
};
ATTRIBUTE_GROUPS(vt_dev);
int __init vty_init(const struct file_operations *console_fops)
{
cdev_init(&vc0_cdev, console_fops);
if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
panic("Couldn't register /dev/tty0 driver\n");
tty0dev = device_create_with_groups(&tty_class, NULL,
MKDEV(TTY_MAJOR, 0), NULL,
vt_dev_groups, "tty0");
if (IS_ERR(tty0dev))
tty0dev = NULL;
vcs_init();
console_driver = tty_alloc_driver(MAX_NR_CONSOLES, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS);
if (IS_ERR(console_driver))
panic("Couldn't allocate console driver\n");
console_driver->name = "tty";
console_driver->name_base = 1;
console_driver->major = TTY_MAJOR;
console_driver->minor_start = 1;
console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
console_driver->init_termios = tty_std_termios;
if (default_utf8)
console_driver->init_termios.c_iflag |= IUTF8;
tty_set_operations(console_driver, &con_ops);
if (tty_register_driver(console_driver))
panic("Couldn't register console driver\n");
kbd_init();
console_map_init();
#ifdef CONFIG_MDA_CONSOLE
mda_console_init();
#endif
return 0;
}
static struct class *vtconsole_class;
static int do_bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
{
struct module *owner = csw->owner;
const char *desc = NULL;
struct con_driver *con_driver;
int i, j = -1, k = -1, retval = -ENODEV;
if (!try_module_get(owner))
return -ENODEV;
WARN_CONSOLE_UNLOCKED();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == csw) {
desc = con_driver->desc;
retval = 0;
break;
}
}
if (retval)
goto err;
if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) {
csw->con_startup();
con_driver->flag |= CON_DRIVER_FLAG_INIT;
}
if (deflt) {
if (conswitchp)
module_put(conswitchp->owner);
__module_get(owner);
conswitchp = csw;
}
first = max(first, con_driver->first);
last = min(last, con_driver->last);
for (i = first; i <= last; i++) {
int old_was_color;
struct vc_data *vc = vc_cons[i].d;
if (con_driver_map[i])
module_put(con_driver_map[i]->owner);
__module_get(owner);
con_driver_map[i] = csw;
if (!vc || !vc->vc_sw)
continue;
j = i;
if (con_is_visible(vc)) {
k = i;
save_screen(vc);
}
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
visual_init(vc, i, 0);
set_origin(vc);
update_attr(vc);
/* If the console changed between mono <-> color, then
* the attributes in the screenbuf will be wrong. The
* following resets all attributes to something sane.
*/
if (old_was_color != vc->vc_can_do_color)
clear_buffer_attributes(vc);
}
pr_info("Console: switching ");
if (!deflt)
pr_cont("consoles %d-%d ", first + 1, last + 1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
pr_cont("to %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
desc, vc->vc_cols, vc->vc_rows);
if (k >= 0) {
vc = vc_cons[k].d;
update_screen(vc);
}
} else {
pr_cont("to %s\n", desc);
}
retval = 0;
err:
module_put(owner);
return retval;
};
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
struct con_driver *con_driver = NULL, *con_back = NULL;
int i, retval = -ENODEV;
if (!try_module_get(owner))
return -ENODEV;
WARN_CONSOLE_UNLOCKED();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == csw &&
con_driver->flag & CON_DRIVER_FLAG_MODULE) {
retval = 0;
break;
}
}
if (retval)
goto err;
retval = -ENODEV;
/* check if backup driver exists */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_back = ®istered_con_driver[i];
if (con_back->con && con_back->con != csw) {
defcsw = con_back->con;
retval = 0;
break;
}
}
if (retval)
goto err;
if (!con_is_bound(csw))
goto err;
first = max(first, con_driver->first);
last = min(last, con_driver->last);
for (i = first; i <= last; i++) {
if (con_driver_map[i] == csw) {
module_put(csw->owner);
con_driver_map[i] = NULL;
}
}
if (!con_is_bound(defcsw)) {
const struct consw *defconsw = conswitchp;
defcsw->con_startup();
con_back->flag |= CON_DRIVER_FLAG_INIT;
/*
* vgacon may change the default driver to point
* to dummycon, we restore it here...
*/
conswitchp = defconsw;
}
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
/* ignore return value, binding should not fail */
do_bind_con_driver(defcsw, first, last, deflt);
err:
module_put(owner);
return retval;
}
EXPORT_SYMBOL_GPL(do_unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
const struct consw *defcsw = NULL, *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE))
goto err;
csw = con->con;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con = ®istered_con_driver[i];
if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) {
defcsw = con->con;
break;
}
}
if (!defcsw)
goto err;
while (more) {
more = 0;
for (i = con->first; i <= con->last; i++) {
if (con_driver_map[i] == defcsw) {
if (first == -1)
first = i;
last = i;
more = 1;
} else if (first != -1)
break;
}
if (first == 0 && last == MAX_NR_CONSOLES -1)
deflt = 1;
if (first != -1)
do_bind_con_driver(csw, first, last, deflt);
first = -1;
last = -1;
deflt = 0;
}
err:
return 0;
}
static int vt_unbind(struct con_driver *con)
{
const struct consw *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
int ret;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE))
goto err;
csw = con->con;
while (more) {
more = 0;
for (i = con->first; i <= con->last; i++) {
if (con_driver_map[i] == csw) {
if (first == -1)
first = i;
last = i;
more = 1;
} else if (first != -1)
break;
}
if (first == 0 && last == MAX_NR_CONSOLES -1)
deflt = 1;
if (first != -1) {
ret = do_unbind_con_driver(csw, first, last, deflt);
if (ret != 0)
return ret;
}
first = -1;
last = -1;
deflt = 0;
}
err:
return 0;
}
#else
static inline int vt_bind(struct con_driver *con)
{
return 0;
}
static inline int vt_unbind(struct con_driver *con)
{
return 0;
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct con_driver *con = dev_get_drvdata(dev);
int bind = simple_strtoul(buf, NULL, 0);
console_lock();
if (bind)
vt_bind(con);
else
vt_unbind(con);
console_unlock();
return count;
}
static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
int bind;
console_lock();
bind = con_is_bound(con->con);
console_unlock();
return sysfs_emit(buf, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
}
static DEVICE_ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct attribute *con_dev_attrs[] = {
&dev_attr_bind.attr,
&dev_attr_name.attr,
NULL
};
ATTRIBUTE_GROUPS(con_dev);
static int vtconsole_init_device(struct con_driver *con)
{
con->flag |= CON_DRIVER_FLAG_ATTR;
return 0;
}
static void vtconsole_deinit_device(struct con_driver *con)
{
con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
/**
* con_is_bound - checks if driver is bound to the console
* @csw: console driver
*
* RETURNS: zero if unbound, nonzero if bound
*
* Drivers can call this and if zero, they should release
* all resources allocated on con_startup()
*/
int con_is_bound(const struct consw *csw)
{
int i, bound = 0;
WARN_CONSOLE_UNLOCKED();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (con_driver_map[i] == csw) {
bound = 1;
break;
}
}
return bound;
}
EXPORT_SYMBOL(con_is_bound);
/**
* con_is_visible - checks whether the current console is visible
* @vc: virtual console
*
* RETURNS: zero if not visible, nonzero if visible
*/
bool con_is_visible(const struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
return *vc->vc_display_fg == vc;
}
EXPORT_SYMBOL(con_is_visible);
/**
* con_debug_enter - prepare the console for the kernel debugger
* @vc: virtual console
*
* Called when the console is taken over by the kernel debugger, this
* function needs to save the current console state, then put the console
* into a state suitable for the kernel debugger.
*
* RETURNS:
* Zero on success, nonzero if a failure occurred when trying to prepare
* the console for the debugger.
*/
int con_debug_enter(struct vc_data *vc)
{
int ret = 0;
saved_fg_console = fg_console;
saved_last_console = last_console;
saved_want_console = want_console;
saved_vc_mode = vc->vc_mode;
saved_console_blanked = console_blanked;
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
ret = vc->vc_sw->con_debug_enter(vc);
#ifdef CONFIG_KGDB_KDB
/* Set the initial LINES variable if it is not already set */
if (vc->vc_rows < 999) {
int linecount;
char lns[4];
const char *setargs[3] = {
"set",
"LINES",
lns,
};
if (kdbgetintenv(setargs[0], &linecount)) {
snprintf(lns, 4, "%i", vc->vc_rows);
kdb_set(2, setargs);
}
}
if (vc->vc_cols < 999) {
int colcount;
char cols[4];
const char *setargs[3] = {
"set",
"COLUMNS",
cols,
};
if (kdbgetintenv(setargs[0], &colcount)) {
snprintf(cols, 4, "%i", vc->vc_cols);
kdb_set(2, setargs);
}
}
#endif /* CONFIG_KGDB_KDB */
return ret;
}
EXPORT_SYMBOL_GPL(con_debug_enter);
/**
* con_debug_leave - restore console state
*
* Restore the console state to what it was before the kernel debugger
* was invoked.
*
* RETURNS:
* Zero on success, nonzero if a failure occurred when trying to restore
* the console.
*/
int con_debug_leave(void)
{
struct vc_data *vc;
int ret = 0;
fg_console = saved_fg_console;
last_console = saved_last_console;
want_console = saved_want_console;
console_blanked = saved_console_blanked;
vc_cons[fg_console].d->vc_mode = saved_vc_mode;
vc = vc_cons[fg_console].d;
if (vc->vc_sw->con_debug_leave)
ret = vc->vc_sw->con_debug_leave(vc);
return ret;
}
EXPORT_SYMBOL_GPL(con_debug_leave);
static int do_register_con_driver(const struct consw *csw, int first, int last)
{
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
int i, retval;
WARN_CONSOLE_UNLOCKED();
if (!try_module_get(owner))
return -ENODEV;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
/* already registered */
if (con_driver->con == csw) {
retval = -EBUSY;
goto err;
}
}
desc = csw->con_startup();
if (!desc) {
retval = -ENODEV;
goto err;
}
retval = -EINVAL;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = ®istered_con_driver[i];
if (con_driver->con == NULL &&
!(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE)) {
con_driver->con = csw;
con_driver->desc = desc;
con_driver->node = i;
con_driver->flag = CON_DRIVER_FLAG_MODULE |
CON_DRIVER_FLAG_INIT;
con_driver->first = first;
con_driver->last = last;
retval = 0;
break;
}
}
if (retval)
goto err;
con_driver->dev =
device_create_with_groups(vtconsole_class, NULL,
MKDEV(0, con_driver->node),
con_driver, con_dev_groups,
"vtcon%i", con_driver->node);
if (IS_ERR(con_driver->dev)) {
pr_warn("Unable to create device for %s; errno = %ld\n",
con_driver->desc, PTR_ERR(con_driver->dev));
con_driver->dev = NULL;
} else {
vtconsole_init_device(con_driver);
}
err:
module_put(owner);
return retval;
}
/**
* do_unregister_con_driver - unregister console driver from console layer
* @csw: console driver
*
* DESCRIPTION: All drivers that registers to the console layer must
* call this function upon exit, or if the console driver is in a state
* where it won't be able to handle console services, such as the
* framebuffer console without loaded framebuffer drivers.
*
* The driver must unbind first prior to unregistration.
*/
int do_unregister_con_driver(const struct consw *csw)
{
int i;
/* cannot unregister a bound driver */
if (con_is_bound(csw))
return -EBUSY;
if (csw == conswitchp)
return -EINVAL;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = ®istered_con_driver[i];
if (con_driver->con == csw) {
/*
* Defer the removal of the sysfs entries since that
* will acquire the kernfs s_active lock and we can't
* acquire this lock while holding the console lock:
* the unbind sysfs entry imposes already the opposite
* order. Reset con already here to prevent any later
* lookup to succeed and mark this slot as zombie, so
* it won't get reused until we complete the removal
* in the deferred work.
*/
con_driver->con = NULL;
con_driver->flag = CON_DRIVER_FLAG_ZOMBIE;
schedule_work(&con_driver_unregister_work);
return 0;
}
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(do_unregister_con_driver);
static void con_driver_unregister_callback(struct work_struct *ignored)
{
int i;
console_lock();
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = ®istered_con_driver[i];
if (!(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE))
continue;
console_unlock();
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class, MKDEV(0, con_driver->node));
console_lock();
if (WARN_ON_ONCE(con_driver->con))
con_driver->con = NULL;
con_driver->desc = NULL;
con_driver->dev = NULL;
con_driver->node = 0;
WARN_ON_ONCE(con_driver->flag != CON_DRIVER_FLAG_ZOMBIE);
con_driver->flag = 0;
con_driver->first = 0;
con_driver->last = 0;
}
console_unlock();
}
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
* do_take_over_console is basically a register followed by bind
*/
int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
{
int err;
err = do_register_con_driver(csw, first, last);
/*
* If we get an busy error we still want to bind the console driver
* and return success, as we may have unbound the console driver
* but not unregistered it.
*/
if (err == -EBUSY)
err = 0;
if (!err)
do_bind_con_driver(csw, first, last, deflt);
return err;
}
EXPORT_SYMBOL_GPL(do_take_over_console);
/*
* give_up_console is a wrapper to unregister_con_driver. It will only
* work if driver is fully unbound.
*/
void give_up_console(const struct consw *csw)
{
console_lock();
do_unregister_con_driver(csw);
console_unlock();
}
EXPORT_SYMBOL(give_up_console);
static int __init vtconsole_class_init(void)
{
int i;
vtconsole_class = class_create("vtconsole");
if (IS_ERR(vtconsole_class)) {
pr_warn("Unable to create vt console class; errno = %ld\n",
PTR_ERR(vtconsole_class));
vtconsole_class = NULL;
}
/* Add system drivers to sysfs */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con = ®istered_con_driver[i];
if (con->con && !con->dev) {
con->dev =
device_create_with_groups(vtconsole_class, NULL,
MKDEV(0, con->node),
con, con_dev_groups,
"vtcon%i", con->node);
if (IS_ERR(con->dev)) {
pr_warn("Unable to create device for %s; errno = %ld\n",
con->desc, PTR_ERR(con->dev));
con->dev = NULL;
} else {
vtconsole_init_device(con);
}
}
}
return 0;
}
postcore_initcall(vtconsole_class_init);
/*
* Screen blanking
*/
static int set_vesa_blanking(char __user *p)
{
unsigned int mode;
if (get_user(mode, p + 1))
return -EFAULT;
vesa_blank_mode = (mode < 4) ? mode : 0;
return 0;
}
void do_blank_screen(int entering_gfx)
{
struct vc_data *vc = vc_cons[fg_console].d;
int i;
might_sleep();
WARN_CONSOLE_UNLOCKED();
if (console_blanked) {
if (blank_state == blank_vesa_wait) {
blank_state = blank_off;
vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
}
return;
}
/* entering graphics mode? */
if (entering_gfx) {
hide_cursor(vc);
save_screen(vc);
vc->vc_sw->con_blank(vc, -1, 1);
console_blanked = fg_console + 1;
blank_state = blank_off;
set_origin(vc);
return;
}
blank_state = blank_off;
/* don't blank graphics */
if (vc->vc_mode != KD_TEXT) {
console_blanked = fg_console + 1;
return;
}
hide_cursor(vc);
del_timer_sync(&console_timer);
blank_timer_expired = 0;
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
if (console_blank_hook && console_blank_hook(1))
return;
if (vesa_off_interval && vesa_blank_mode) {
blank_state = blank_vesa_wait;
mod_timer(&console_timer, jiffies + vesa_off_interval);
}
vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num);
}
EXPORT_SYMBOL(do_blank_screen);
/*
* Called by timer as well as from vt_console_driver
*/
void do_unblank_screen(int leaving_gfx)
{
struct vc_data *vc;
/* This should now always be called from a "sane" (read: can schedule)
* context for the sake of the low level drivers, except in the special
* case of oops_in_progress
*/
if (!oops_in_progress)
might_sleep();
WARN_CONSOLE_UNLOCKED();
ignore_poke = 0;
if (!console_blanked)
return;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
pr_warn("unblank_screen: tty %d not allocated ??\n",
fg_console + 1);
return;
}
vc = vc_cons[fg_console].d;
if (vc->vc_mode != KD_TEXT)
return; /* but leave console_blanked != 0 */
if (blankinterval) {
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
console_blanked = 0;
if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
console_blank_hook(0);
set_palette(vc);
set_cursor(vc);
vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
}
EXPORT_SYMBOL(do_unblank_screen);
/*
* This is called by the outside world to cause a forced unblank, mostly for
* oopses. Currently, I just call do_unblank_screen(0), but we could eventually
* call it with 1 as an argument and so force a mode restore... that may kill
* X or at least garbage the screen but would also make the Oops visible...
*/
static void unblank_screen(void)
{
do_unblank_screen(0);
}
/*
* We defer the timer blanking to work queue so it can take the console mutex
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
static void blank_screen_t(struct timer_list *unused)
{
blank_timer_expired = 1;
schedule_work(&console_work);
}
void poke_blanked_console(void)
{
WARN_CONSOLE_UNLOCKED();
/* Add this so we quickly catch whoever might call us in a non
* safe context. Nowadays, unblank_screen() isn't to be called in
* atomic contexts and is allowed to schedule (with the special case
* of oops_in_progress, but that isn't of any concern for this
* function. --BenH.
*/
might_sleep();
/* This isn't perfectly race free, but a race here would be mostly harmless,
* at worst, we'll do a spurious blank and it's unlikely
*/
del_timer(&console_timer);
blank_timer_expired = 0;
if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS)
return;
if (console_blanked)
unblank_screen();
else if (blankinterval) {
mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
}
/*
* Palettes
*/
static void set_palette(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_set_palette)
vc->vc_sw->con_set_palette(vc, color_table);
}
/*
* Load palette into the DAC registers. arg points to a colour
* map, 3 bytes per colour, 16 colours, range from 0 to 255.
*/
int con_set_cmap(unsigned char __user *arg)
{
int i, j, k;
unsigned char colormap[3*16];
if (copy_from_user(colormap, arg, sizeof(colormap)))
return -EFAULT;
console_lock();
for (i = k = 0; i < 16; i++) {
default_red[i] = colormap[k++];
default_grn[i] = colormap[k++];
default_blu[i] = colormap[k++];
}
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
for (j = k = 0; j < 16; j++) {
vc_cons[i].d->vc_palette[k++] = default_red[j];
vc_cons[i].d->vc_palette[k++] = default_grn[j];
vc_cons[i].d->vc_palette[k++] = default_blu[j];
}
set_palette(vc_cons[i].d);
}
console_unlock();
return 0;
}
int con_get_cmap(unsigned char __user *arg)
{
int i, k;
unsigned char colormap[3*16];
console_lock();
for (i = k = 0; i < 16; i++) {
colormap[k++] = default_red[i];
colormap[k++] = default_grn[i];
colormap[k++] = default_blu[i];
}
console_unlock();
if (copy_to_user(arg, colormap, sizeof(colormap)))
return -EFAULT;
return 0;
}
void reset_palette(struct vc_data *vc)
{
int j, k;
for (j=k=0; j<16; j++) {
vc->vc_palette[k++] = default_red[j];
vc->vc_palette[k++] = default_grn[j];
vc->vc_palette[k++] = default_blu[j];
}
set_palette(vc);
}
/*
* Font switching
*
* Currently we only support fonts up to 128 pixels wide, at a maximum height
* of 128 pixels. Userspace fontdata may have to be stored with 32 bytes
* (shorts/ints, depending on width) reserved for each character which is
* kinda wasty, but this is done in order to maintain compatibility with the
* EGA/VGA fonts. It is up to the actual low-level console-driver convert data
* into its favorite format (maybe we should add a `fontoffset' field to the
* `display' structure so we won't have to convert the fontdata all the time.
* /Jes
*/
#define max_font_width 64
#define max_font_height 128
#define max_font_glyphs 512
#define max_font_size (max_font_glyphs*max_font_width*max_font_height)
static int con_font_get(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
int c;
unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32;
if (vpitch > max_font_height)
return -EINVAL;
if (op->data) {
font.data = kvmalloc(max_font_size, GFP_KERNEL);
if (!font.data)
return -ENOMEM;
} else
font.data = NULL;
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
else if (vc->vc_sw->con_font_get)
rc = vc->vc_sw->con_font_get(vc, &font, vpitch);
else
rc = -ENOSYS;
console_unlock();
if (rc)
goto out;
c = (font.width+7)/8 * vpitch * font.charcount;
if (op->data && font.charcount > op->charcount)
rc = -ENOSPC;
if (font.width > op->width || font.height > op->height)
rc = -ENOSPC;
if (rc)
goto out;
op->height = font.height;
op->width = font.width;
op->charcount = font.charcount;
if (op->data && copy_to_user(op->data, font.data, c))
rc = -EFAULT;
out:
kvfree(font.data);
return rc;
}
static int con_font_set(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
int size;
unsigned int vpitch = op->op == KD_FONT_OP_SET_TALL ? op->height : 32;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
if (!op->data)
return -EINVAL;
if (op->charcount > max_font_glyphs)
return -EINVAL;
if (op->width <= 0 || op->width > max_font_width || !op->height ||
op->height > max_font_height)
return -EINVAL;
if (vpitch < op->height)
return -EINVAL;
size = (op->width+7)/8 * vpitch * op->charcount;
if (size > max_font_size)
return -ENOSPC;
font.data = memdup_user(op->data, size);
if (IS_ERR(font.data))
return PTR_ERR(font.data);
font.charcount = op->charcount;
font.width = op->width;
font.height = op->height;
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
else if (vc->vc_sw->con_font_set) {
if (vc_is_sel(vc))
clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, vpitch, op->flags);
} else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
return rc;
}
static int con_font_default(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font = {.width = op->width, .height = op->height};
char name[MAX_FONT_NAME];
char *s = name;
int rc;
if (!op->data)
s = NULL;
else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
return -EFAULT;
else
name[MAX_FONT_NAME - 1] = 0;
console_lock();
if (vc->vc_mode != KD_TEXT) {
console_unlock();
return -EINVAL;
}
if (vc->vc_sw->con_font_default) {
if (vc_is_sel(vc))
clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
} else
rc = -ENOSYS;
console_unlock();
if (!rc) {
op->width = font.width;
op->height = font.height;
}
return rc;
}
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
case KD_FONT_OP_SET:
case KD_FONT_OP_SET_TALL:
return con_font_set(vc, op);
case KD_FONT_OP_GET:
case KD_FONT_OP_GET_TALL:
return con_font_get(vc, op);
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
/* was buggy and never really used */
return -EINVAL;
}
return -ENOSYS;
}
/*
* Interface exported to selection and vcs.
*/
/* used by selection */
u16 screen_glyph(const struct vc_data *vc, int offset)
{
u16 w = scr_readw(screenpos(vc, offset, true));
u16 c = w & 0xff;
if (w & vc->vc_hi_font_mask)
c |= 0x100;
return c;
}
EXPORT_SYMBOL_GPL(screen_glyph);
u32 screen_glyph_unicode(const struct vc_data *vc, int n)
{
u32 **uni_lines = vc->vc_uni_lines;
if (uni_lines)
return uni_lines[n / vc->vc_cols][n % vc->vc_cols];
return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
/* used by vcs - note the word offset */
unsigned short *screen_pos(const struct vc_data *vc, int w_offset, bool viewed)
{
return screenpos(vc, 2 * w_offset, viewed);
}
EXPORT_SYMBOL_GPL(screen_pos);
void getconsxy(const struct vc_data *vc, unsigned char xy[static 2])
{
/* clamp values if they don't fit */
xy[0] = min(vc->state.x, 0xFFu);
xy[1] = min(vc->state.y, 0xFFu);
}
void putconsxy(struct vc_data *vc, unsigned char xy[static const 2])
{
hide_cursor(vc);
gotoxy(vc, xy[0], xy[1]);
set_cursor(vc);
}
u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org)
{
if ((unsigned long)org == vc->vc_pos && softcursor_original != -1)
return softcursor_original;
return scr_readw(org);
}
void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
{
scr_writew(val, org);
if ((unsigned long)org == vc->vc_pos) {
softcursor_original = -1;
add_softcursor(vc);
}
}
void vcs_scr_updated(struct vc_data *vc)
{
notify_update(vc);
}
void vc_scrolldelta_helper(struct vc_data *c, int lines,
unsigned int rolled_over, void *base, unsigned int size)
{
unsigned long ubase = (unsigned long)base;
ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
ptrdiff_t origin = (void *)c->vc_origin - base;
int margin = c->vc_size_row * 4;
int from, wrap, from_off, avail;
/* Turn scrollback off */
if (!lines) {
c->vc_visible_origin = c->vc_origin;
return;
}
/* Do we have already enough to allow jumping from 0 to the end? */
if (rolled_over > scr_end + margin) {
from = scr_end;
wrap = rolled_over + c->vc_size_row;
} else {
from = 0;
wrap = size;
}
from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
avail = (origin - from + wrap) % wrap;
/* Only a little piece would be left? Show all incl. the piece! */
if (avail < 2 * margin)
margin = 0;
if (from_off < margin)
from_off = 0;
if (from_off > avail - margin)
from_off = avail;
c->vc_visible_origin = ubase + (from + from_off) % wrap;
}
EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
| linux-master | drivers/tty/vt/vt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
* Original code
* by Stephen Blackheath <[email protected]>,
* Ben Martel <[email protected]>
*
* Copyrighted as follows:
* Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
*
* Various driver changes and rewrites, port to new kernels
* Copyright (C) 2006-2007 Jiri Kosina
*
* Misc code cleanups and updates
* Copyright (C) 2007 David Sterba
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/slab.h>
#include <linux/ppp-ioctl.h>
#include <linux/skbuff.h>
#include "network.h"
#include "hardware.h"
#include "main.h"
#include "tty.h"
#define MAX_ASSOCIATED_TTYS 2
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
struct ipw_network {
/* Hardware context, used for calls to hardware layer. */
struct ipw_hardware *hardware;
/* Context for kernel 'generic_ppp' functionality */
struct ppp_channel *ppp_channel;
/* tty context connected with IPW console */
struct ipw_tty *associated_ttys[NO_OF_IPW_CHANNELS][MAX_ASSOCIATED_TTYS];
/* True if ppp needs waking up once we're ready to xmit */
int ppp_blocked;
/* Number of packets queued up in hardware module. */
int outgoing_packets_queued;
/* Spinlock to avoid interrupts during shutdown */
spinlock_t lock;
struct mutex close_lock;
/* PPP ioctl data, not actually used anywere */
unsigned int flags;
unsigned int rbits;
u32 xaccm[8];
u32 raccm;
int mru;
int shutting_down;
unsigned int ras_control_lines;
struct work_struct work_go_online;
struct work_struct work_go_offline;
};
static void notify_packet_sent(void *callback_data, unsigned int packet_length)
{
struct ipw_network *network = callback_data;
unsigned long flags;
spin_lock_irqsave(&network->lock, flags);
network->outgoing_packets_queued--;
if (network->ppp_channel != NULL) {
if (network->ppp_blocked) {
network->ppp_blocked = 0;
spin_unlock_irqrestore(&network->lock, flags);
ppp_output_wakeup(network->ppp_channel);
if (ipwireless_debug)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": ppp unblocked\n");
} else
spin_unlock_irqrestore(&network->lock, flags);
} else
spin_unlock_irqrestore(&network->lock, flags);
}
/*
* Called by the ppp system when it has a packet to send to the hardware.
*/
static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
struct sk_buff *skb)
{
struct ipw_network *network = ppp_channel->private;
unsigned long flags;
spin_lock_irqsave(&network->lock, flags);
if (network->outgoing_packets_queued < ipwireless_out_queue) {
unsigned char *buf;
static unsigned char header[] = {
PPP_ALLSTATIONS, /* 0xff */
PPP_UI, /* 0x03 */
};
int ret;
network->outgoing_packets_queued++;
spin_unlock_irqrestore(&network->lock, flags);
/*
* If we have the requested amount of headroom in the skb we
* were handed, then we can add the header efficiently.
*/
if (skb_headroom(skb) >= 2) {
memcpy(skb_push(skb, 2), header, 2);
ret = ipwireless_send_packet(network->hardware,
IPW_CHANNEL_RAS, skb->data,
skb->len,
notify_packet_sent,
network);
if (ret < 0) {
skb_pull(skb, 2);
return 0;
}
} else {
/* Otherwise (rarely) we do it inefficiently. */
buf = kmalloc(skb->len + 2, GFP_ATOMIC);
if (!buf)
return 0;
memcpy(buf + 2, skb->data, skb->len);
memcpy(buf, header, 2);
ret = ipwireless_send_packet(network->hardware,
IPW_CHANNEL_RAS, buf,
skb->len + 2,
notify_packet_sent,
network);
kfree(buf);
if (ret < 0)
return 0;
}
kfree_skb(skb);
return 1;
} else {
/*
* Otherwise reject the packet, and flag that the ppp system
* needs to be unblocked once we are ready to send.
*/
network->ppp_blocked = 1;
spin_unlock_irqrestore(&network->lock, flags);
if (ipwireless_debug)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": ppp blocked\n");
return 0;
}
}
/* Handle an ioctl call that has come in via ppp. (copy of ppp_async_ioctl() */
static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel,
unsigned int cmd, unsigned long arg)
{
struct ipw_network *network = ppp_channel->private;
int err, val;
u32 accm[8];
int __user *user_arg = (int __user *) arg;
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = network->flags | network->rbits;
if (put_user(val, user_arg))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, user_arg))
break;
network->flags = val & ~SC_RCV_BITS;
network->rbits = val & SC_RCV_BITS;
err = 0;
break;
case PPPIOCGASYNCMAP:
if (put_user(network->xaccm[0], user_arg))
break;
err = 0;
break;
case PPPIOCSASYNCMAP:
if (get_user(network->xaccm[0], user_arg))
break;
err = 0;
break;
case PPPIOCGRASYNCMAP:
if (put_user(network->raccm, user_arg))
break;
err = 0;
break;
case PPPIOCSRASYNCMAP:
if (get_user(network->raccm, user_arg))
break;
err = 0;
break;
case PPPIOCGXASYNCMAP:
if (copy_to_user((void __user *) arg, network->xaccm,
sizeof(network->xaccm)))
break;
err = 0;
break;
case PPPIOCSXASYNCMAP:
if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
break;
accm[2] &= ~0x40000000U; /* can't escape 0x5e */
accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
memcpy(network->xaccm, accm, sizeof(network->xaccm));
err = 0;
break;
case PPPIOCGMRU:
if (put_user(network->mru, user_arg))
break;
err = 0;
break;
case PPPIOCSMRU:
if (get_user(val, user_arg))
break;
if (val < PPP_MRU)
val = PPP_MRU;
network->mru = val;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
static const struct ppp_channel_ops ipwireless_ppp_channel_ops = {
.start_xmit = ipwireless_ppp_start_xmit,
.ioctl = ipwireless_ppp_ioctl
};
static void do_go_online(struct work_struct *work_go_online)
{
struct ipw_network *network =
container_of(work_go_online, struct ipw_network,
work_go_online);
unsigned long flags;
spin_lock_irqsave(&network->lock, flags);
if (!network->ppp_channel) {
struct ppp_channel *channel;
spin_unlock_irqrestore(&network->lock, flags);
channel = kzalloc(sizeof(struct ppp_channel), GFP_KERNEL);
if (!channel) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": unable to allocate PPP channel\n");
return;
}
channel->private = network;
channel->mtu = 16384; /* Wild guess */
channel->hdrlen = 2;
channel->ops = &ipwireless_ppp_channel_ops;
network->flags = 0;
network->rbits = 0;
network->mru = PPP_MRU;
memset(network->xaccm, 0, sizeof(network->xaccm));
network->xaccm[0] = ~0U;
network->xaccm[3] = 0x60000000U;
network->raccm = ~0U;
if (ppp_register_channel(channel) < 0) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": unable to register PPP channel\n");
kfree(channel);
return;
}
spin_lock_irqsave(&network->lock, flags);
network->ppp_channel = channel;
}
spin_unlock_irqrestore(&network->lock, flags);
}
static void do_go_offline(struct work_struct *work_go_offline)
{
struct ipw_network *network =
container_of(work_go_offline, struct ipw_network,
work_go_offline);
unsigned long flags;
mutex_lock(&network->close_lock);
spin_lock_irqsave(&network->lock, flags);
if (network->ppp_channel != NULL) {
struct ppp_channel *channel = network->ppp_channel;
network->ppp_channel = NULL;
spin_unlock_irqrestore(&network->lock, flags);
mutex_unlock(&network->close_lock);
ppp_unregister_channel(channel);
} else {
spin_unlock_irqrestore(&network->lock, flags);
mutex_unlock(&network->close_lock);
}
}
void ipwireless_network_notify_control_line_change(struct ipw_network *network,
unsigned int channel_idx,
unsigned int control_lines,
unsigned int changed_mask)
{
int i;
if (channel_idx == IPW_CHANNEL_RAS)
network->ras_control_lines = control_lines;
for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
struct ipw_tty *tty =
network->associated_ttys[channel_idx][i];
/*
* If it's associated with a tty (other than the RAS channel
* when we're online), then send the data to that tty. The RAS
* channel's data is handled above - it always goes through
* ppp_generic.
*/
if (tty)
ipwireless_tty_notify_control_line_change(tty,
channel_idx,
control_lines,
changed_mask);
}
}
/*
* Some versions of firmware stuff packets with 0xff 0x03 (PPP: ALLSTATIONS, UI)
* bytes, which are required on sent packet, but not always present on received
* packets
*/
static struct sk_buff *ipw_packet_received_skb(unsigned char *data,
unsigned int length)
{
struct sk_buff *skb;
if (length > 2 && data[0] == PPP_ALLSTATIONS && data[1] == PPP_UI) {
length -= 2;
data += 2;
}
skb = dev_alloc_skb(length + 4);
if (skb == NULL)
return NULL;
skb_reserve(skb, 2);
skb_put_data(skb, data, length);
return skb;
}
void ipwireless_network_packet_received(struct ipw_network *network,
unsigned int channel_idx,
unsigned char *data,
unsigned int length)
{
int i;
unsigned long flags;
for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
if (!tty)
continue;
/*
* If it's associated with a tty (other than the RAS channel
* when we're online), then send the data to that tty. The RAS
* channel's data is handled above - it always goes through
* ppp_generic.
*/
if (channel_idx == IPW_CHANNEL_RAS
&& (network->ras_control_lines &
IPW_CONTROL_LINE_DCD) != 0
&& ipwireless_tty_is_modem(tty)) {
/*
* If data came in on the RAS channel and this tty is
* the modem tty, and we are online, then we send it to
* the PPP layer.
*/
mutex_lock(&network->close_lock);
spin_lock_irqsave(&network->lock, flags);
if (network->ppp_channel != NULL) {
struct sk_buff *skb;
spin_unlock_irqrestore(&network->lock,
flags);
/* Send the data to the ppp_generic module. */
skb = ipw_packet_received_skb(data, length);
if (skb)
ppp_input(network->ppp_channel, skb);
} else
spin_unlock_irqrestore(&network->lock,
flags);
mutex_unlock(&network->close_lock);
}
/* Otherwise we send it out the tty. */
else
ipwireless_tty_received(tty, data, length);
}
}
struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw)
{
struct ipw_network *network =
kzalloc(sizeof(struct ipw_network), GFP_KERNEL);
if (!network)
return NULL;
spin_lock_init(&network->lock);
mutex_init(&network->close_lock);
network->hardware = hw;
INIT_WORK(&network->work_go_online, do_go_online);
INIT_WORK(&network->work_go_offline, do_go_offline);
ipwireless_associate_network(hw, network);
return network;
}
void ipwireless_network_free(struct ipw_network *network)
{
network->shutting_down = 1;
ipwireless_ppp_close(network);
flush_work(&network->work_go_online);
flush_work(&network->work_go_offline);
ipwireless_stop_interrupts(network->hardware);
ipwireless_associate_network(network->hardware, NULL);
kfree(network);
}
void ipwireless_associate_network_tty(struct ipw_network *network,
unsigned int channel_idx,
struct ipw_tty *tty)
{
int i;
for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
if (network->associated_ttys[channel_idx][i] == NULL) {
network->associated_ttys[channel_idx][i] = tty;
break;
}
}
void ipwireless_disassociate_network_ttys(struct ipw_network *network,
unsigned int channel_idx)
{
int i;
for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
network->associated_ttys[channel_idx][i] = NULL;
}
void ipwireless_ppp_open(struct ipw_network *network)
{
if (ipwireless_debug)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": online\n");
schedule_work(&network->work_go_online);
}
void ipwireless_ppp_close(struct ipw_network *network)
{
/* Disconnect from the wireless network. */
if (ipwireless_debug)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": offline\n");
schedule_work(&network->work_go_offline);
}
int ipwireless_ppp_channel_index(struct ipw_network *network)
{
int ret = -1;
unsigned long flags;
spin_lock_irqsave(&network->lock, flags);
if (network->ppp_channel != NULL)
ret = ppp_channel_index(network->ppp_channel);
spin_unlock_irqrestore(&network->lock, flags);
return ret;
}
int ipwireless_ppp_unit_number(struct ipw_network *network)
{
int ret = -1;
unsigned long flags;
spin_lock_irqsave(&network->lock, flags);
if (network->ppp_channel != NULL)
ret = ppp_unit_number(network->ppp_channel);
spin_unlock_irqrestore(&network->lock, flags);
return ret;
}
int ipwireless_ppp_mru(const struct ipw_network *network)
{
return network->mru;
}
| linux-master | drivers/tty/ipwireless/network.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
* Original code
* by Stephen Blackheath <[email protected]>,
* Ben Martel <[email protected]>
*
* Copyrighted as follows:
* Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
*
* Various driver changes and rewrites, port to new kernels
* Copyright (C) 2006-2007 Jiri Kosina
*
* Misc code cleanups and updates
* Copyright (C) 2007 David Sterba
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "hardware.h"
#include "setup_protocol.h"
#include "network.h"
#include "main.h"
static void ipw_send_setup_packet(struct ipw_hardware *hw);
static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
unsigned int address,
const unsigned char *data, int len,
int is_last);
static void ipwireless_setup_timer(struct timer_list *t);
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx, const unsigned char *data, int len);
/*#define TIMING_DIAGNOSTICS*/
#ifdef TIMING_DIAGNOSTICS
static struct timing_stats {
unsigned long last_report_time;
unsigned long read_time;
unsigned long write_time;
unsigned long read_bytes;
unsigned long write_bytes;
unsigned long start_time;
};
static void start_timing(void)
{
timing_stats.start_time = jiffies;
}
static void end_read_timing(unsigned length)
{
timing_stats.read_time += (jiffies - start_time);
timing_stats.read_bytes += length + 2;
report_timing();
}
static void end_write_timing(unsigned length)
{
timing_stats.write_time += (jiffies - start_time);
timing_stats.write_bytes += length + 2;
report_timing();
}
static void report_timing(void)
{
unsigned long since = jiffies - timing_stats.last_report_time;
/* If it's been more than one second... */
if (since >= HZ) {
int first = (timing_stats.last_report_time == 0);
timing_stats.last_report_time = jiffies;
if (!first)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n",
jiffies_to_usecs(since),
timing_stats.read_bytes,
jiffies_to_usecs(timing_stats.read_time),
timing_stats.write_bytes,
jiffies_to_usecs(timing_stats.write_time));
timing_stats.read_time = 0;
timing_stats.write_time = 0;
timing_stats.read_bytes = 0;
timing_stats.write_bytes = 0;
}
}
#else
static void start_timing(void) { }
static void end_read_timing(unsigned length) { }
static void end_write_timing(unsigned length) { }
#endif
/* Imported IPW definitions */
#define LL_MTU_V1 318
#define LL_MTU_V2 250
#define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2)
#define PRIO_DATA 2
#define PRIO_CTRL 1
#define PRIO_SETUP 0
/* Addresses */
#define ADDR_SETUP_PROT 0
/* Protocol ids */
enum {
/* Identifier for the Com Data protocol */
TL_PROTOCOLID_COM_DATA = 0,
/* Identifier for the Com Control protocol */
TL_PROTOCOLID_COM_CTRL = 1,
/* Identifier for the Setup protocol */
TL_PROTOCOLID_SETUP = 2
};
/* Number of bytes in NL packet header (cannot do
* sizeof(nl_packet_header) since it's a bitfield) */
#define NL_FIRST_PACKET_HEADER_SIZE 3
/* Number of bytes in NL packet header (cannot do
* sizeof(nl_packet_header) since it's a bitfield) */
#define NL_FOLLOWING_PACKET_HEADER_SIZE 1
struct nl_first_packet_header {
unsigned char protocol:3;
unsigned char address:3;
unsigned char packet_rank:2;
unsigned char length_lsb;
unsigned char length_msb;
};
struct nl_packet_header {
unsigned char protocol:3;
unsigned char address:3;
unsigned char packet_rank:2;
};
/* Value of 'packet_rank' above */
#define NL_INTERMEDIATE_PACKET 0x0
#define NL_LAST_PACKET 0x1
#define NL_FIRST_PACKET 0x2
union nl_packet {
/* Network packet header of the first packet (a special case) */
struct nl_first_packet_header hdr_first;
/* Network packet header of the following packets (if any) */
struct nl_packet_header hdr;
/* Complete network packet (header + data) */
unsigned char rawpkt[LL_MTU_MAX];
} __attribute__ ((__packed__));
#define HW_VERSION_UNKNOWN -1
#define HW_VERSION_1 1
#define HW_VERSION_2 2
/* IPW I/O ports */
#define IOIER 0x00 /* Interrupt Enable Register */
#define IOIR 0x02 /* Interrupt Source/ACK register */
#define IODCR 0x04 /* Data Control Register */
#define IODRR 0x06 /* Data Read Register */
#define IODWR 0x08 /* Data Write Register */
#define IOESR 0x0A /* Embedded Driver Status Register */
#define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */
#define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */
/* I/O ports and bit definitions for version 1 of the hardware */
/* IER bits*/
#define IER_RXENABLED 0x1
#define IER_TXENABLED 0x2
/* ISR bits */
#define IR_RXINTR 0x1
#define IR_TXINTR 0x2
/* DCR bits */
#define DCR_RXDONE 0x1
#define DCR_TXDONE 0x2
#define DCR_RXRESET 0x4
#define DCR_TXRESET 0x8
/* I/O ports and bit definitions for version 2 of the hardware */
struct MEMCCR {
unsigned short reg_config_option; /* PCCOR: Configuration Option Register */
unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */
unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */
unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */
unsigned short reg_ext_status; /* PCESR: Extendend Status Register */
unsigned short reg_io_base; /* PCIOB: I/O Base Register */
};
struct MEMINFREG {
unsigned short memreg_tx_old; /* TX Register (R/W) */
unsigned short pad1;
unsigned short memreg_rx_done; /* RXDone Register (R/W) */
unsigned short pad2;
unsigned short memreg_rx; /* RX Register (R/W) */
unsigned short pad3;
unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */
unsigned short pad4;
unsigned long memreg_card_present;/* Mask for Host to check (R) for
* CARD_PRESENT_VALUE */
unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */
};
#define CARD_PRESENT_VALUE (0xBEEFCAFEUL)
#define MEMTX_TX 0x0001
#define MEMRX_RX 0x0001
#define MEMRX_RX_DONE 0x0001
#define MEMRX_PCINTACKK 0x0001
#define NL_NUM_OF_PRIORITIES 3
#define NL_NUM_OF_PROTOCOLS 3
#define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS
struct ipw_hardware {
unsigned int base_port;
short hw_version;
unsigned short ll_mtu;
spinlock_t lock;
int initializing;
int init_loops;
struct timer_list setup_timer;
/* Flag if hw is ready to send next packet */
int tx_ready;
/* Count of pending packets to be sent */
int tx_queued;
struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
int rx_bytes_queued;
struct list_head rx_queue;
/* Pool of rx_packet structures that are not currently used. */
struct list_head rx_pool;
int rx_pool_size;
/* True if reception of data is blocked while userspace processes it. */
int blocking_rx;
/* True if there is RX data ready on the hardware. */
int rx_ready;
unsigned short last_memtx_serial;
/*
* Newer versions of the V2 card firmware send serial numbers in the
* MemTX register. 'serial_number_detected' is set true when we detect
* a non-zero serial number (indicating the new firmware). Thereafter,
* the driver can safely ignore the Timer Recovery re-sends to avoid
* out-of-sync problems.
*/
int serial_number_detected;
struct work_struct work_rx;
/* True if we are to send the set-up data to the hardware. */
int to_setup;
/* Card has been removed */
int removed;
/* Saved irq value when we disable the interrupt. */
int irq;
/* True if this driver is shutting down. */
int shutting_down;
/* Modem control lines */
unsigned int control_lines[NL_NUM_OF_ADDRESSES];
struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES];
struct tasklet_struct tasklet;
/* The handle for the network layer, for the sending of events to it. */
struct ipw_network *network;
struct MEMINFREG __iomem *memory_info_regs;
struct MEMCCR __iomem *memregs_CCR;
void (*reboot_callback) (void *data);
void *reboot_callback_data;
unsigned short __iomem *memreg_tx;
};
/*
* Packet info structure for tx packets.
* Note: not all the fields defined here are required for all protocols
*/
struct ipw_tx_packet {
struct list_head queue;
/* channel idx + 1 */
unsigned char dest_addr;
/* SETUP, CTRL or DATA */
unsigned char protocol;
/* Length of data block, which starts at the end of this structure */
unsigned short length;
/* Sending state */
/* Offset of where we've sent up to so far */
unsigned long offset;
/* Count of packet fragments, starting at 0 */
int fragment_count;
/* Called after packet is sent and before is freed */
void (*packet_callback) (void *cb_data, unsigned int packet_length);
void *callback_data;
};
/* Signals from DTE */
#define COMCTRL_RTS 0
#define COMCTRL_DTR 1
/* Signals from DCE */
#define COMCTRL_CTS 2
#define COMCTRL_DCD 3
#define COMCTRL_DSR 4
#define COMCTRL_RI 5
struct ipw_control_packet_body {
/* DTE signal or DCE signal */
unsigned char sig_no;
/* 0: set signal, 1: clear signal */
unsigned char value;
} __attribute__ ((__packed__));
struct ipw_control_packet {
struct ipw_tx_packet header;
struct ipw_control_packet_body body;
};
struct ipw_rx_packet {
struct list_head queue;
unsigned int capacity;
unsigned int length;
unsigned int protocol;
unsigned int channel_idx;
};
static char *data_type(const unsigned char *buf, unsigned length)
{
struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
if (length == 0)
return " ";
if (hdr->packet_rank & NL_FIRST_PACKET) {
switch (hdr->protocol) {
case TL_PROTOCOLID_COM_DATA: return "DATA ";
case TL_PROTOCOLID_COM_CTRL: return "CTRL ";
case TL_PROTOCOLID_SETUP: return "SETUP";
default: return "???? ";
}
} else
return " ";
}
#define DUMP_MAX_BYTES 64
static void dump_data_bytes(const char *type, const unsigned char *data,
unsigned length)
{
char prefix[56];
sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ",
type, data_type(data, length));
print_hex_dump_bytes(prefix, 0, (void *)data,
length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES);
}
static void swap_packet_bitfield_to_le(unsigned char *data)
{
#ifdef __BIG_ENDIAN_BITFIELD
unsigned char tmp = *data, ret = 0;
/*
* transform bits from aa.bbb.ccc to ccc.bbb.aa
*/
ret |= (tmp & 0xc0) >> 6;
ret |= (tmp & 0x38) >> 1;
ret |= (tmp & 0x07) << 5;
*data = ret & 0xff;
#endif
}
static void swap_packet_bitfield_from_le(unsigned char *data)
{
#ifdef __BIG_ENDIAN_BITFIELD
unsigned char tmp = *data, ret = 0;
/*
* transform bits from ccc.bbb.aa to aa.bbb.ccc
*/
ret |= (tmp & 0xe0) >> 5;
ret |= (tmp & 0x1c) << 1;
ret |= (tmp & 0x03) << 6;
*data = ret & 0xff;
#endif
}
static void do_send_fragment(struct ipw_hardware *hw, unsigned char *data,
unsigned length)
{
unsigned i;
unsigned long flags;
start_timing();
BUG_ON(length > hw->ll_mtu);
if (ipwireless_debug)
dump_data_bytes("send", data, length);
spin_lock_irqsave(&hw->lock, flags);
hw->tx_ready = 0;
swap_packet_bitfield_to_le(data);
if (hw->hw_version == HW_VERSION_1) {
outw((unsigned short) length, hw->base_port + IODWR);
for (i = 0; i < length; i += 2) {
unsigned short d = data[i];
__le16 raw_data;
if (i + 1 < length)
d |= data[i + 1] << 8;
raw_data = cpu_to_le16(d);
outw(raw_data, hw->base_port + IODWR);
}
outw(DCR_TXDONE, hw->base_port + IODCR);
} else if (hw->hw_version == HW_VERSION_2) {
outw((unsigned short) length, hw->base_port);
for (i = 0; i < length; i += 2) {
unsigned short d = data[i];
__le16 raw_data;
if (i + 1 < length)
d |= data[i + 1] << 8;
raw_data = cpu_to_le16(d);
outw(raw_data, hw->base_port);
}
while ((i & 3) != 2) {
outw((unsigned short) 0xDEAD, hw->base_port);
i += 2;
}
writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx);
}
spin_unlock_irqrestore(&hw->lock, flags);
end_write_timing(length);
}
static void do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet)
{
unsigned short fragment_data_len;
unsigned short data_left = packet->length - packet->offset;
unsigned short header_size;
union nl_packet pkt;
header_size =
(packet->fragment_count == 0)
? NL_FIRST_PACKET_HEADER_SIZE
: NL_FOLLOWING_PACKET_HEADER_SIZE;
fragment_data_len = hw->ll_mtu - header_size;
if (data_left < fragment_data_len)
fragment_data_len = data_left;
/*
* hdr_first is now in machine bitfield order, which will be swapped
* to le just before it goes to hw
*/
pkt.hdr_first.protocol = packet->protocol;
pkt.hdr_first.address = packet->dest_addr;
pkt.hdr_first.packet_rank = 0;
/* First packet? */
if (packet->fragment_count == 0) {
pkt.hdr_first.packet_rank |= NL_FIRST_PACKET;
pkt.hdr_first.length_lsb = (unsigned char) packet->length;
pkt.hdr_first.length_msb =
(unsigned char) (packet->length >> 8);
}
memcpy(pkt.rawpkt + header_size,
((unsigned char *) packet) + sizeof(struct ipw_tx_packet) +
packet->offset, fragment_data_len);
packet->offset += fragment_data_len;
packet->fragment_count++;
/* Last packet? (May also be first packet.) */
if (packet->offset == packet->length)
pkt.hdr_first.packet_rank |= NL_LAST_PACKET;
do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len);
/* If this packet has unsent data, then re-queue it. */
if (packet->offset < packet->length) {
/*
* Re-queue it at the head of the highest priority queue so
* it goes before all other packets
*/
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
list_add(&packet->queue, &hw->tx_queue[0]);
hw->tx_queued++;
spin_unlock_irqrestore(&hw->lock, flags);
} else {
if (packet->packet_callback)
packet->packet_callback(packet->callback_data,
packet->length);
kfree(packet);
}
}
static void ipw_setup_hardware(struct ipw_hardware *hw)
{
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (hw->hw_version == HW_VERSION_1) {
/* Reset RX FIFO */
outw(DCR_RXRESET, hw->base_port + IODCR);
/* SB: Reset TX FIFO */
outw(DCR_TXRESET, hw->base_port + IODCR);
/* Enable TX and RX interrupts. */
outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER);
} else {
/*
* Set INTRACK bit (bit 0), which means we must explicitly
* acknowledge interrupts by clearing bit 2 of reg_config_and_status.
*/
unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
csr |= 1;
writew(csr, &hw->memregs_CCR->reg_config_and_status);
}
spin_unlock_irqrestore(&hw->lock, flags);
}
/*
* If 'packet' is NULL, then this function allocates a new packet, setting its
* length to 0 and ensuring it has the specified minimum amount of free space.
*
* If 'packet' is not NULL, then this function enlarges it if it doesn't
* have the specified minimum amount of free space.
*
*/
static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
struct ipw_rx_packet *packet,
int minimum_free_space)
{
if (!packet) {
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (!list_empty(&hw->rx_pool)) {
packet = list_first_entry(&hw->rx_pool,
struct ipw_rx_packet, queue);
hw->rx_pool_size--;
spin_unlock_irqrestore(&hw->lock, flags);
list_del(&packet->queue);
} else {
const int min_capacity =
ipwireless_ppp_mru(hw->network) + 2;
int new_capacity;
spin_unlock_irqrestore(&hw->lock, flags);
new_capacity =
(minimum_free_space > min_capacity
? minimum_free_space
: min_capacity);
packet = kmalloc(sizeof(struct ipw_rx_packet)
+ new_capacity, GFP_ATOMIC);
if (!packet)
return NULL;
packet->capacity = new_capacity;
}
packet->length = 0;
}
if (packet->length + minimum_free_space > packet->capacity) {
struct ipw_rx_packet *old_packet = packet;
packet = kmalloc(sizeof(struct ipw_rx_packet) +
old_packet->length + minimum_free_space,
GFP_ATOMIC);
if (!packet) {
kfree(old_packet);
return NULL;
}
memcpy(packet, old_packet,
sizeof(struct ipw_rx_packet)
+ old_packet->length);
packet->capacity = old_packet->length + minimum_free_space;
kfree(old_packet);
}
return packet;
}
static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet)
{
if (hw->rx_pool_size > 6)
kfree(packet);
else {
hw->rx_pool_size++;
list_add(&packet->queue, &hw->rx_pool);
}
}
static void queue_received_packet(struct ipw_hardware *hw,
unsigned int protocol,
unsigned int address,
const unsigned char *data, int length,
int is_last)
{
unsigned int channel_idx = address - 1;
struct ipw_rx_packet *packet = NULL;
unsigned long flags;
/* Discard packet if channel index is out of range. */
if (channel_idx >= NL_NUM_OF_ADDRESSES) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": data packet has bad address %u\n", address);
return;
}
/*
* ->packet_assembler is safe to touch unlocked, this is the only place
*/
if (protocol == TL_PROTOCOLID_COM_DATA) {
struct ipw_rx_packet **assem =
&hw->packet_assembler[channel_idx];
/*
* Create a new packet, or assembler already contains one
* enlarge it by 'length' bytes.
*/
(*assem) = pool_allocate(hw, *assem, length);
if (!(*assem)) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": no memory for incoming data packet, dropped!\n");
return;
}
(*assem)->protocol = protocol;
(*assem)->channel_idx = channel_idx;
/* Append this packet data onto existing data. */
memcpy((unsigned char *)(*assem) +
sizeof(struct ipw_rx_packet)
+ (*assem)->length, data, length);
(*assem)->length += length;
if (is_last) {
packet = *assem;
*assem = NULL;
/* Count queued DATA bytes only */
spin_lock_irqsave(&hw->lock, flags);
hw->rx_bytes_queued += packet->length;
spin_unlock_irqrestore(&hw->lock, flags);
}
} else {
/* If it's a CTRL packet, don't assemble, just queue it. */
packet = pool_allocate(hw, NULL, length);
if (!packet) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": no memory for incoming ctrl packet, dropped!\n");
return;
}
packet->protocol = protocol;
packet->channel_idx = channel_idx;
memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet),
data, length);
packet->length = length;
}
/*
* If this is the last packet, then send the assembled packet on to the
* network layer.
*/
if (packet) {
spin_lock_irqsave(&hw->lock, flags);
list_add_tail(&packet->queue, &hw->rx_queue);
/* Block reception of incoming packets if queue is full. */
hw->blocking_rx =
(hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE);
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->work_rx);
}
}
/*
* Workqueue callback
*/
static void ipw_receive_data_work(struct work_struct *work_rx)
{
struct ipw_hardware *hw =
container_of(work_rx, struct ipw_hardware, work_rx);
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
while (!list_empty(&hw->rx_queue)) {
struct ipw_rx_packet *packet =
list_first_entry(&hw->rx_queue,
struct ipw_rx_packet, queue);
if (hw->shutting_down)
break;
list_del(&packet->queue);
/*
* Note: ipwireless_network_packet_received must be called in a
* process context (i.e. via schedule_work) because the tty
* output code can sleep in the tty_flip_buffer_push call.
*/
if (packet->protocol == TL_PROTOCOLID_COM_DATA) {
if (hw->network != NULL) {
/* If the network hasn't been disconnected. */
spin_unlock_irqrestore(&hw->lock, flags);
/*
* This must run unlocked due to tty processing
* and mutex locking
*/
ipwireless_network_packet_received(
hw->network,
packet->channel_idx,
(unsigned char *)packet
+ sizeof(struct ipw_rx_packet),
packet->length);
spin_lock_irqsave(&hw->lock, flags);
}
/* Count queued DATA bytes only */
hw->rx_bytes_queued -= packet->length;
} else {
/*
* This is safe to be called locked, callchain does
* not block
*/
handle_received_CTRL_packet(hw, packet->channel_idx,
(unsigned char *)packet
+ sizeof(struct ipw_rx_packet),
packet->length);
}
pool_free(hw, packet);
/*
* Unblock reception of incoming packets if queue is no longer
* full.
*/
hw->blocking_rx =
hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
if (hw->shutting_down)
break;
}
spin_unlock_irqrestore(&hw->lock, flags);
}
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx,
const unsigned char *data, int len)
{
const struct ipw_control_packet_body *body =
(const struct ipw_control_packet_body *) data;
unsigned int changed_mask;
if (len != sizeof(struct ipw_control_packet_body)) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": control packet was %d bytes - wrong size!\n",
len);
return;
}
switch (body->sig_no) {
case COMCTRL_CTS:
changed_mask = IPW_CONTROL_LINE_CTS;
break;
case COMCTRL_DCD:
changed_mask = IPW_CONTROL_LINE_DCD;
break;
case COMCTRL_DSR:
changed_mask = IPW_CONTROL_LINE_DSR;
break;
case COMCTRL_RI:
changed_mask = IPW_CONTROL_LINE_RI;
break;
default:
changed_mask = 0;
}
if (changed_mask != 0) {
if (body->value)
hw->control_lines[channel_idx] |= changed_mask;
else
hw->control_lines[channel_idx] &= ~changed_mask;
if (hw->network)
ipwireless_network_notify_control_line_change(
hw->network,
channel_idx,
hw->control_lines[channel_idx],
changed_mask);
}
}
static void handle_received_packet(struct ipw_hardware *hw,
const union nl_packet *packet,
unsigned short len)
{
unsigned int protocol = packet->hdr.protocol;
unsigned int address = packet->hdr.address;
unsigned int header_length;
const unsigned char *data;
unsigned int data_len;
int is_last = packet->hdr.packet_rank & NL_LAST_PACKET;
if (packet->hdr.packet_rank & NL_FIRST_PACKET)
header_length = NL_FIRST_PACKET_HEADER_SIZE;
else
header_length = NL_FOLLOWING_PACKET_HEADER_SIZE;
data = packet->rawpkt + header_length;
data_len = len - header_length;
switch (protocol) {
case TL_PROTOCOLID_COM_DATA:
case TL_PROTOCOLID_COM_CTRL:
queue_received_packet(hw, protocol, address, data, data_len,
is_last);
break;
case TL_PROTOCOLID_SETUP:
handle_received_SETUP_packet(hw, address, data, data_len,
is_last);
break;
}
}
static void acknowledge_data_read(struct ipw_hardware *hw)
{
if (hw->hw_version == HW_VERSION_1)
outw(DCR_RXDONE, hw->base_port + IODCR);
else
writew(MEMRX_PCINTACKK,
&hw->memory_info_regs->memreg_pc_interrupt_ack);
}
/*
* Retrieve a packet from the IPW hardware.
*/
static void do_receive_packet(struct ipw_hardware *hw)
{
unsigned len;
unsigned i;
unsigned char pkt[LL_MTU_MAX];
start_timing();
if (hw->hw_version == HW_VERSION_1) {
len = inw(hw->base_port + IODRR);
if (len > hw->ll_mtu) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": received a packet of %u bytes - longer than the MTU!\n", len);
outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR);
return;
}
for (i = 0; i < len; i += 2) {
__le16 raw_data = inw(hw->base_port + IODRR);
unsigned short data = le16_to_cpu(raw_data);
pkt[i] = (unsigned char) data;
pkt[i + 1] = (unsigned char) (data >> 8);
}
} else {
len = inw(hw->base_port);
if (len > hw->ll_mtu) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": received a packet of %u bytes - longer than the MTU!\n", len);
writew(MEMRX_PCINTACKK,
&hw->memory_info_regs->memreg_pc_interrupt_ack);
return;
}
for (i = 0; i < len; i += 2) {
__le16 raw_data = inw(hw->base_port);
unsigned short data = le16_to_cpu(raw_data);
pkt[i] = (unsigned char) data;
pkt[i + 1] = (unsigned char) (data >> 8);
}
while ((i & 3) != 2) {
inw(hw->base_port);
i += 2;
}
}
acknowledge_data_read(hw);
swap_packet_bitfield_from_le(pkt);
if (ipwireless_debug)
dump_data_bytes("recv", pkt, len);
handle_received_packet(hw, (union nl_packet *) pkt, len);
end_read_timing(len);
}
static int get_current_packet_priority(struct ipw_hardware *hw)
{
/*
* If we're initializing, don't send anything of higher priority than
* PRIO_SETUP. The network layer therefore need not care about
* hardware initialization - any of its stuff will simply be queued
* until setup is complete.
*/
return (hw->to_setup || hw->initializing
? PRIO_SETUP + 1 : NL_NUM_OF_PRIORITIES);
}
/*
* return 1 if something has been received from hw
*/
static int get_packets_from_hw(struct ipw_hardware *hw)
{
int received = 0;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
while (hw->rx_ready && !hw->blocking_rx) {
received = 1;
hw->rx_ready--;
spin_unlock_irqrestore(&hw->lock, flags);
do_receive_packet(hw);
spin_lock_irqsave(&hw->lock, flags);
}
spin_unlock_irqrestore(&hw->lock, flags);
return received;
}
/*
* Send pending packet up to given priority, prioritize SETUP data until
* hardware is fully setup.
*
* return 1 if more packets can be sent
*/
static int send_pending_packet(struct ipw_hardware *hw, int priority_limit)
{
int more_to_send = 0;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (hw->tx_queued && hw->tx_ready) {
int priority;
struct ipw_tx_packet *packet = NULL;
/* Pick a packet */
for (priority = 0; priority < priority_limit; priority++) {
if (!list_empty(&hw->tx_queue[priority])) {
packet = list_first_entry(
&hw->tx_queue[priority],
struct ipw_tx_packet,
queue);
hw->tx_queued--;
list_del(&packet->queue);
break;
}
}
if (!packet) {
hw->tx_queued = 0;
spin_unlock_irqrestore(&hw->lock, flags);
return 0;
}
spin_unlock_irqrestore(&hw->lock, flags);
/* Send */
do_send_packet(hw, packet);
/* Check if more to send */
spin_lock_irqsave(&hw->lock, flags);
for (priority = 0; priority < priority_limit; priority++)
if (!list_empty(&hw->tx_queue[priority])) {
more_to_send = 1;
break;
}
if (!more_to_send)
hw->tx_queued = 0;
}
spin_unlock_irqrestore(&hw->lock, flags);
return more_to_send;
}
/*
* Send and receive all queued packets.
*/
static void ipwireless_do_tasklet(struct tasklet_struct *t)
{
struct ipw_hardware *hw = from_tasklet(hw, t, tasklet);
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
if (hw->shutting_down) {
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
if (hw->to_setup == 1) {
/*
* Initial setup data sent to hardware
*/
hw->to_setup = 2;
spin_unlock_irqrestore(&hw->lock, flags);
ipw_setup_hardware(hw);
ipw_send_setup_packet(hw);
send_pending_packet(hw, PRIO_SETUP + 1);
get_packets_from_hw(hw);
} else {
int priority_limit = get_current_packet_priority(hw);
int again;
spin_unlock_irqrestore(&hw->lock, flags);
do {
again = send_pending_packet(hw, priority_limit);
again |= get_packets_from_hw(hw);
} while (again);
}
}
/*
* return true if the card is physically present.
*/
static int is_card_present(struct ipw_hardware *hw)
{
if (hw->hw_version == HW_VERSION_1)
return inw(hw->base_port + IOIR) != 0xFFFF;
else
return readl(&hw->memory_info_regs->memreg_card_present) ==
CARD_PRESENT_VALUE;
}
static irqreturn_t ipwireless_handle_v1_interrupt(int irq,
struct ipw_hardware *hw)
{
unsigned short irqn;
irqn = inw(hw->base_port + IOIR);
/* Check if card is present */
if (irqn == 0xFFFF)
return IRQ_NONE;
else if (irqn != 0) {
unsigned short ack = 0;
unsigned long flags;
/* Transmit complete. */
if (irqn & IR_TXINTR) {
ack |= IR_TXINTR;
spin_lock_irqsave(&hw->lock, flags);
hw->tx_ready = 1;
spin_unlock_irqrestore(&hw->lock, flags);
}
/* Received data */
if (irqn & IR_RXINTR) {
ack |= IR_RXINTR;
spin_lock_irqsave(&hw->lock, flags);
hw->rx_ready++;
spin_unlock_irqrestore(&hw->lock, flags);
}
if (ack != 0) {
outw(ack, hw->base_port + IOIR);
tasklet_schedule(&hw->tasklet);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw)
{
unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
csr &= 0xfffd;
writew(csr, &hw->memregs_CCR->reg_config_and_status);
}
static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
struct ipw_hardware *hw)
{
int tx = 0;
int rx = 0;
int rx_repeat = 0;
int try_mem_tx_old;
unsigned long flags;
do {
unsigned short memtx = readw(hw->memreg_tx);
unsigned short memtx_serial;
unsigned short memrxdone =
readw(&hw->memory_info_regs->memreg_rx_done);
try_mem_tx_old = 0;
/* check whether the interrupt was generated by ipwireless card */
if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) {
/* check if the card uses memreg_tx_old register */
if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
memtx = readw(&hw->memory_info_regs->memreg_tx_old);
if (memtx & MEMTX_TX) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": Using memreg_tx_old\n");
hw->memreg_tx =
&hw->memory_info_regs->memreg_tx_old;
} else {
return IRQ_NONE;
}
} else
return IRQ_NONE;
}
/*
* See if the card is physically present. Note that while it is
* powering up, it appears not to be present.
*/
if (!is_card_present(hw)) {
acknowledge_pcmcia_interrupt(hw);
return IRQ_HANDLED;
}
memtx_serial = memtx & (unsigned short) 0xff00;
if (memtx & MEMTX_TX) {
writew(memtx_serial, hw->memreg_tx);
if (hw->serial_number_detected) {
if (memtx_serial != hw->last_memtx_serial) {
hw->last_memtx_serial = memtx_serial;
spin_lock_irqsave(&hw->lock, flags);
hw->rx_ready++;
spin_unlock_irqrestore(&hw->lock, flags);
rx = 1;
} else
/* Ignore 'Timer Recovery' duplicates. */
rx_repeat = 1;
} else {
/*
* If a non-zero serial number is seen, then enable
* serial number checking.
*/
if (memtx_serial != 0) {
hw->serial_number_detected = 1;
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": memreg_tx serial num detected\n");
spin_lock_irqsave(&hw->lock, flags);
hw->rx_ready++;
spin_unlock_irqrestore(&hw->lock, flags);
}
rx = 1;
}
}
if (memrxdone & MEMRX_RX_DONE) {
writew(0, &hw->memory_info_regs->memreg_rx_done);
spin_lock_irqsave(&hw->lock, flags);
hw->tx_ready = 1;
spin_unlock_irqrestore(&hw->lock, flags);
tx = 1;
}
if (tx)
writew(MEMRX_PCINTACKK,
&hw->memory_info_regs->memreg_pc_interrupt_ack);
acknowledge_pcmcia_interrupt(hw);
if (tx || rx)
tasklet_schedule(&hw->tasklet);
else if (!rx_repeat) {
if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
if (hw->serial_number_detected)
printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
": spurious interrupt - new_tx mode\n");
else {
printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
": no valid memreg_tx value - switching to the old memreg_tx\n");
hw->memreg_tx =
&hw->memory_info_regs->memreg_tx_old;
try_mem_tx_old = 1;
}
} else
printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
": spurious interrupt - old_tx mode\n");
}
} while (try_mem_tx_old == 1);
return IRQ_HANDLED;
}
irqreturn_t ipwireless_interrupt(int irq, void *dev_id)
{
struct ipw_dev *ipw = dev_id;
if (ipw->hardware->hw_version == HW_VERSION_1)
return ipwireless_handle_v1_interrupt(irq, ipw->hardware);
else
return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware);
}
static void flush_packets_to_hw(struct ipw_hardware *hw)
{
int priority_limit;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
priority_limit = get_current_packet_priority(hw);
spin_unlock_irqrestore(&hw->lock, flags);
while (send_pending_packet(hw, priority_limit));
}
static void send_packet(struct ipw_hardware *hw, int priority,
struct ipw_tx_packet *packet)
{
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
list_add_tail(&packet->queue, &hw->tx_queue[priority]);
hw->tx_queued++;
spin_unlock_irqrestore(&hw->lock, flags);
flush_packets_to_hw(hw);
}
/* Create data packet, non-atomic allocation */
static void *alloc_data_packet(int data_size,
unsigned char dest_addr,
unsigned char protocol)
{
struct ipw_tx_packet *packet = kzalloc(
sizeof(struct ipw_tx_packet) + data_size,
GFP_ATOMIC);
if (!packet)
return NULL;
INIT_LIST_HEAD(&packet->queue);
packet->dest_addr = dest_addr;
packet->protocol = protocol;
packet->length = data_size;
return packet;
}
static void *alloc_ctrl_packet(int header_size,
unsigned char dest_addr,
unsigned char protocol,
unsigned char sig_no)
{
/*
* sig_no is located right after ipw_tx_packet struct in every
* CTRL or SETUP packets, we can use ipw_control_packet as a
* common struct
*/
struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC);
if (!packet)
return NULL;
INIT_LIST_HEAD(&packet->header.queue);
packet->header.dest_addr = dest_addr;
packet->header.protocol = protocol;
packet->header.length = header_size - sizeof(struct ipw_tx_packet);
packet->body.sig_no = sig_no;
return packet;
}
int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx,
const u8 *data, unsigned int length,
void (*callback) (void *cb, unsigned int length),
void *callback_data)
{
struct ipw_tx_packet *packet;
packet = alloc_data_packet(length, (channel_idx + 1),
TL_PROTOCOLID_COM_DATA);
if (!packet)
return -ENOMEM;
packet->packet_callback = callback;
packet->callback_data = callback_data;
memcpy((unsigned char *) packet + sizeof(struct ipw_tx_packet), data,
length);
send_packet(hw, PRIO_DATA, packet);
return 0;
}
static int set_control_line(struct ipw_hardware *hw, int prio,
unsigned int channel_idx, int line, int state)
{
struct ipw_control_packet *packet;
int protocolid = TL_PROTOCOLID_COM_CTRL;
if (prio == PRIO_SETUP)
protocolid = TL_PROTOCOLID_SETUP;
packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet),
(channel_idx + 1), protocolid, line);
if (!packet)
return -ENOMEM;
packet->header.length = sizeof(struct ipw_control_packet_body);
packet->body.value = (state == 0 ? 0 : 1);
send_packet(hw, prio, &packet->header);
return 0;
}
static int set_DTR(struct ipw_hardware *hw, int priority,
unsigned int channel_idx, int state)
{
if (state != 0)
hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR;
else
hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR;
return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state);
}
static int set_RTS(struct ipw_hardware *hw, int priority,
unsigned int channel_idx, int state)
{
if (state != 0)
hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS;
else
hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS;
return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state);
}
int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
int state)
{
return set_DTR(hw, PRIO_CTRL, channel_idx, state);
}
int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
int state)
{
return set_RTS(hw, PRIO_CTRL, channel_idx, state);
}
struct ipw_setup_get_version_query_packet {
struct ipw_tx_packet header;
struct tl_setup_get_version_qry body;
};
struct ipw_setup_config_packet {
struct ipw_tx_packet header;
struct tl_setup_config_msg body;
};
struct ipw_setup_config_done_packet {
struct ipw_tx_packet header;
struct tl_setup_config_done_msg body;
};
struct ipw_setup_open_packet {
struct ipw_tx_packet header;
struct tl_setup_open_msg body;
};
struct ipw_setup_info_packet {
struct ipw_tx_packet header;
struct tl_setup_info_msg body;
};
struct ipw_setup_reboot_msg_ack {
struct ipw_tx_packet header;
struct TlSetupRebootMsgAck body;
};
/* This handles the actual initialization of the card */
static void __handle_setup_get_version_rsp(struct ipw_hardware *hw)
{
struct ipw_setup_config_packet *config_packet;
struct ipw_setup_config_done_packet *config_done_packet;
struct ipw_setup_open_packet *open_packet;
struct ipw_setup_info_packet *info_packet;
int port;
unsigned int channel_idx;
/* generate config packet */
for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
config_packet = alloc_ctrl_packet(
sizeof(struct ipw_setup_config_packet),
ADDR_SETUP_PROT,
TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_CONFIG_MSG);
if (!config_packet)
goto exit_nomem;
config_packet->header.length = sizeof(struct tl_setup_config_msg);
config_packet->body.port_no = port;
config_packet->body.prio_data = PRIO_DATA;
config_packet->body.prio_ctrl = PRIO_CTRL;
send_packet(hw, PRIO_SETUP, &config_packet->header);
}
config_done_packet = alloc_ctrl_packet(
sizeof(struct ipw_setup_config_done_packet),
ADDR_SETUP_PROT,
TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_CONFIG_DONE_MSG);
if (!config_done_packet)
goto exit_nomem;
config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg);
send_packet(hw, PRIO_SETUP, &config_done_packet->header);
/* generate open packet */
for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
open_packet = alloc_ctrl_packet(
sizeof(struct ipw_setup_open_packet),
ADDR_SETUP_PROT,
TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_OPEN_MSG);
if (!open_packet)
goto exit_nomem;
open_packet->header.length = sizeof(struct tl_setup_open_msg);
open_packet->body.port_no = port;
send_packet(hw, PRIO_SETUP, &open_packet->header);
}
for (channel_idx = 0;
channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) {
int ret;
ret = set_DTR(hw, PRIO_SETUP, channel_idx,
(hw->control_lines[channel_idx] &
IPW_CONTROL_LINE_DTR) != 0);
if (ret) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": error setting DTR (%d)\n", ret);
return;
}
ret = set_RTS(hw, PRIO_SETUP, channel_idx,
(hw->control_lines [channel_idx] &
IPW_CONTROL_LINE_RTS) != 0);
if (ret) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": error setting RTS (%d)\n", ret);
return;
}
}
/*
* For NDIS we assume that we are using sync PPP frames, for COM async.
* This driver uses NDIS mode too. We don't bother with translation
* from async -> sync PPP.
*/
info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet),
ADDR_SETUP_PROT,
TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_INFO_MSG);
if (!info_packet)
goto exit_nomem;
info_packet->header.length = sizeof(struct tl_setup_info_msg);
info_packet->body.driver_type = NDISWAN_DRIVER;
info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION;
info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION;
send_packet(hw, PRIO_SETUP, &info_packet->header);
/* Initialization is now complete, so we clear the 'to_setup' flag */
hw->to_setup = 0;
return;
exit_nomem:
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": not enough memory to alloc control packet\n");
hw->to_setup = -1;
}
static void handle_setup_get_version_rsp(struct ipw_hardware *hw,
unsigned char vers_no)
{
del_timer(&hw->setup_timer);
hw->initializing = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n");
if (vers_no == TL_SETUP_VERSION)
__handle_setup_get_version_rsp(hw);
else
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": invalid hardware version no %u\n",
(unsigned int) vers_no);
}
static void ipw_send_setup_packet(struct ipw_hardware *hw)
{
struct ipw_setup_get_version_query_packet *ver_packet;
ver_packet = alloc_ctrl_packet(
sizeof(struct ipw_setup_get_version_query_packet),
ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_GET_VERSION_QRY);
if (!ver_packet)
return;
ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
/*
* Response is handled in handle_received_SETUP_packet
*/
send_packet(hw, PRIO_SETUP, &ver_packet->header);
}
static void handle_received_SETUP_packet(struct ipw_hardware *hw,
unsigned int address,
const unsigned char *data, int len,
int is_last)
{
const union ipw_setup_rx_msg *rx_msg = (const union ipw_setup_rx_msg *) data;
if (address != ADDR_SETUP_PROT) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": setup packet has bad address %d\n", address);
return;
}
switch (rx_msg->sig_no) {
case TL_SETUP_SIGNO_GET_VERSION_RSP:
if (hw->to_setup)
handle_setup_get_version_rsp(hw,
rx_msg->version_rsp_msg.version);
break;
case TL_SETUP_SIGNO_OPEN_MSG:
if (ipwireless_debug) {
unsigned int channel_idx = rx_msg->open_msg.port_no - 1;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": OPEN_MSG [channel %u] reply received\n",
channel_idx);
}
break;
case TL_SETUP_SIGNO_INFO_MSG_ACK:
if (ipwireless_debug)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": card successfully configured as NDISWAN\n");
break;
case TL_SETUP_SIGNO_REBOOT_MSG:
if (hw->to_setup)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": Setup not completed - ignoring reboot msg\n");
else {
struct ipw_setup_reboot_msg_ack *packet;
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": Acknowledging REBOOT message\n");
packet = alloc_ctrl_packet(
sizeof(struct ipw_setup_reboot_msg_ack),
ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_REBOOT_MSG_ACK);
if (!packet) {
pr_err(IPWIRELESS_PCCARD_NAME
": Not enough memory to send reboot packet");
break;
}
packet->header.length =
sizeof(struct TlSetupRebootMsgAck);
send_packet(hw, PRIO_SETUP, &packet->header);
if (hw->reboot_callback)
hw->reboot_callback(hw->reboot_callback_data);
}
break;
default:
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": unknown setup message %u received\n",
(unsigned int) rx_msg->sig_no);
}
}
static void do_close_hardware(struct ipw_hardware *hw)
{
unsigned int irqn;
if (hw->hw_version == HW_VERSION_1) {
/* Disable TX and RX interrupts. */
outw(0, hw->base_port + IOIER);
/* Acknowledge any outstanding interrupt requests */
irqn = inw(hw->base_port + IOIR);
if (irqn & IR_TXINTR)
outw(IR_TXINTR, hw->base_port + IOIR);
if (irqn & IR_RXINTR)
outw(IR_RXINTR, hw->base_port + IOIR);
synchronize_irq(hw->irq);
}
}
struct ipw_hardware *ipwireless_hardware_create(void)
{
int i;
struct ipw_hardware *hw =
kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL);
if (!hw)
return NULL;
hw->irq = -1;
hw->initializing = 1;
hw->tx_ready = 1;
hw->rx_bytes_queued = 0;
hw->rx_pool_size = 0;
hw->last_memtx_serial = (unsigned short) 0xffff;
for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
INIT_LIST_HEAD(&hw->tx_queue[i]);
INIT_LIST_HEAD(&hw->rx_queue);
INIT_LIST_HEAD(&hw->rx_pool);
spin_lock_init(&hw->lock);
tasklet_setup(&hw->tasklet, ipwireless_do_tasklet);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
return hw;
}
void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
unsigned int base_port,
void __iomem *attr_memory,
void __iomem *common_memory,
int is_v2_card,
void (*reboot_callback) (void *data),
void *reboot_callback_data)
{
if (hw->removed) {
hw->removed = 0;
enable_irq(hw->irq);
}
hw->base_port = base_port;
hw->hw_version = (is_v2_card ? HW_VERSION_2 : HW_VERSION_1);
hw->ll_mtu = (hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2);
hw->memregs_CCR = (struct MEMCCR __iomem *)
((unsigned short __iomem *) attr_memory + 0x200);
hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory;
hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new;
hw->reboot_callback = reboot_callback;
hw->reboot_callback_data = reboot_callback_data;
}
void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
{
hw->initializing = 1;
hw->init_loops = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": waiting for card to start up...\n");
ipwireless_setup_timer(&hw->setup_timer);
}
static void ipwireless_setup_timer(struct timer_list *t)
{
struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
hw->init_loops++;
if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY &&
hw->hw_version == HW_VERSION_2 &&
hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": failed to startup using TX2, trying TX\n");
hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old;
hw->init_loops = 0;
}
/* Give up after a certain number of retries */
if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) {
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": card failed to start up!\n");
hw->initializing = 0;
} else {
/* Do not attempt to write to the board if it is not present. */
if (is_card_present(hw)) {
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
hw->to_setup = 1;
hw->tx_ready = 1;
spin_unlock_irqrestore(&hw->lock, flags);
tasklet_schedule(&hw->tasklet);
}
mod_timer(&hw->setup_timer,
jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO));
}
}
/*
* Stop any interrupts from executing so that, once this function returns,
* other layers of the driver can be sure they won't get any more callbacks.
* Thus must be called on a proper process context.
*/
void ipwireless_stop_interrupts(struct ipw_hardware *hw)
{
if (!hw->shutting_down) {
/* Tell everyone we are going down. */
hw->shutting_down = 1;
del_timer(&hw->setup_timer);
/* Prevent the hardware from sending any more interrupts */
do_close_hardware(hw);
}
}
void ipwireless_hardware_free(struct ipw_hardware *hw)
{
int i;
struct ipw_rx_packet *rp, *rq;
struct ipw_tx_packet *tp, *tq;
ipwireless_stop_interrupts(hw);
flush_work(&hw->work_rx);
for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
kfree(hw->packet_assembler[i]);
for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {
list_del(&tp->queue);
kfree(tp);
}
list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
list_del(&rp->queue);
kfree(rp);
}
list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
list_del(&rp->queue);
kfree(rp);
}
kfree(hw);
}
/*
* Associate the specified network with this hardware, so it will receive events
* from it.
*/
void ipwireless_associate_network(struct ipw_hardware *hw,
struct ipw_network *network)
{
hw->network = network;
}
| linux-master | drivers/tty/ipwireless/hardware.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
* Original code
* by Stephen Blackheath <[email protected]>,
* Ben Martel <[email protected]>
*
* Copyrighted as follows:
* Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
*
* Various driver changes and rewrites, port to new kernels
* Copyright (C) 2006-2007 Jiri Kosina
*
* Misc code cleanups and updates
* Copyright (C) 2007 David Sterba
*/
#include "hardware.h"
#include "network.h"
#include "main.h"
#include "tty.h"
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/device_id.h>
#include <pcmcia/ss.h>
#include <pcmcia/ds.h>
static const struct pcmcia_device_id ipw_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100),
PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, ipw_ids);
static void ipwireless_detach(struct pcmcia_device *link);
/*
* Module params
*/
/* Debug mode: more verbose, print sent/recv bytes */
int ipwireless_debug;
int ipwireless_loopback;
int ipwireless_out_queue = 10;
module_param_named(debug, ipwireless_debug, int, 0);
module_param_named(loopback, ipwireless_loopback, int, 0);
module_param_named(out_queue, ipwireless_out_queue, int, 0);
MODULE_PARM_DESC(debug, "switch on debug messages [0]");
MODULE_PARM_DESC(loopback,
"debug: enable ras_raw channel [0]");
MODULE_PARM_DESC(out_queue, "debug: set size of outgoing PPP queue [10]");
/* Executes in process context. */
static void signalled_reboot_work(struct work_struct *work_reboot)
{
struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
work_reboot);
struct pcmcia_device *link = ipw->link;
pcmcia_reset_card(link->socket);
}
static void signalled_reboot_callback(void *callback_data)
{
struct ipw_dev *ipw = (struct ipw_dev *) callback_data;
/* Delegate to process context. */
schedule_work(&ipw->work_reboot);
}
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
/* 0x40 causes it to generate level mode interrupts. */
/* 0x04 enables IREQ pin. */
p_dev->config_index |= 0x44;
p_dev->io_lines = 16;
ret = pcmcia_request_io(p_dev);
if (ret)
return ret;
if (!request_region(p_dev->resource[0]->start,
resource_size(p_dev->resource[0]),
IPWIRELESS_PCCARD_NAME)) {
ret = -EBUSY;
goto exit;
}
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0);
if (ret != 0)
goto exit1;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
goto exit1;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
if (!ipw->common_memory) {
ret = -ENOMEM;
goto exit1;
}
if (!request_mem_region(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]),
IPWIRELESS_PCCARD_NAME)) {
ret = -EBUSY;
goto exit2;
}
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
goto exit3;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
goto exit3;
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
if (!ipw->attr_memory) {
ret = -ENOMEM;
goto exit3;
}
if (!request_mem_region(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]),
IPWIRELESS_PCCARD_NAME)) {
ret = -EBUSY;
goto exit4;
}
return 0;
exit4:
iounmap(ipw->attr_memory);
exit3:
release_mem_region(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
exit2:
iounmap(ipw->common_memory);
exit1:
release_region(p_dev->resource[0]->start,
resource_size(p_dev->resource[0]));
exit:
pcmcia_disable_device(p_dev);
return ret;
}
static int config_ipwireless(struct ipw_dev *ipw)
{
struct pcmcia_device *link = ipw->link;
int ret = 0;
ipw->is_v2_card = 0;
link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM |
CONF_ENABLE_IRQ;
ret = pcmcia_loop_config(link, ipwireless_probe, ipw);
if (ret != 0)
return ret;
INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start,
ipw->attr_memory, ipw->common_memory,
ipw->is_v2_card, signalled_reboot_callback,
ipw);
ret = pcmcia_request_irq(link, ipwireless_interrupt);
if (ret != 0)
goto exit;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
ipw->is_v2_card ? "V2/V3" : "V1");
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": I/O ports %pR, irq %d\n", link->resource[0],
(unsigned int) link->irq);
if (ipw->attr_memory && ipw->common_memory)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": attr memory %pR, common memory %pR\n",
link->resource[3],
link->resource[2]);
ipw->network = ipwireless_network_create(ipw->hardware);
if (!ipw->network)
goto exit;
ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
if (!ipw->tty)
goto exit;
ipwireless_init_hardware_v2_v3(ipw->hardware);
/*
* Do the RequestConfiguration last, because it enables interrupts.
* Then we don't get any interrupts before we're ready for them.
*/
ret = pcmcia_enable_device(link);
if (ret != 0)
goto exit;
return 0;
exit:
if (ipw->common_memory) {
release_mem_region(link->resource[2]->start,
resource_size(link->resource[2]));
iounmap(ipw->common_memory);
}
if (ipw->attr_memory) {
release_mem_region(link->resource[3]->start,
resource_size(link->resource[3]));
iounmap(ipw->attr_memory);
}
pcmcia_disable_device(link);
return -1;
}
static void release_ipwireless(struct ipw_dev *ipw)
{
release_region(ipw->link->resource[0]->start,
resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
iounmap(ipw->common_memory);
}
if (ipw->attr_memory) {
release_mem_region(ipw->link->resource[3]->start,
resource_size(ipw->link->resource[3]));
iounmap(ipw->attr_memory);
}
pcmcia_disable_device(ipw->link);
}
/*
* ipwireless_attach() creates an "instance" of the driver, allocating
* local data structures for one device (one interface). The device
* is registered with Card Services.
*
* The pcmcia_device structure is initialized, but we don't actually
* configure the card at this point -- we wait until we receive a
* card insertion event.
*/
static int ipwireless_attach(struct pcmcia_device *link)
{
struct ipw_dev *ipw;
int ret;
ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL);
if (!ipw)
return -ENOMEM;
ipw->link = link;
link->priv = ipw;
ipw->hardware = ipwireless_hardware_create();
if (!ipw->hardware) {
kfree(ipw);
return -ENOMEM;
}
/* RegisterClient will call config_ipwireless */
ret = config_ipwireless(ipw);
if (ret != 0) {
ipwireless_detach(link);
return ret;
}
return 0;
}
/*
* This deletes a driver "instance". The device is de-registered with
* Card Services. If it has been released, all local data structures
* are freed. Otherwise, the structures will be freed when the device
* is released.
*/
static void ipwireless_detach(struct pcmcia_device *link)
{
struct ipw_dev *ipw = link->priv;
release_ipwireless(ipw);
if (ipw->tty != NULL)
ipwireless_tty_free(ipw->tty);
if (ipw->network != NULL)
ipwireless_network_free(ipw->network);
if (ipw->hardware != NULL)
ipwireless_hardware_free(ipw->hardware);
kfree(ipw);
}
static struct pcmcia_driver me = {
.owner = THIS_MODULE,
.probe = ipwireless_attach,
.remove = ipwireless_detach,
.name = IPWIRELESS_PCCARD_NAME,
.id_table = ipw_ids
};
/*
* Module insertion : initialisation of the module.
* Register the card with cardmgr...
*/
static int __init init_ipwireless(void)
{
int ret;
ret = ipwireless_tty_init();
if (ret != 0)
return ret;
ret = pcmcia_register_driver(&me);
if (ret != 0)
ipwireless_tty_release();
return ret;
}
/*
* Module removal
*/
static void __exit exit_ipwireless(void)
{
pcmcia_unregister_driver(&me);
ipwireless_tty_release();
}
module_init(init_ipwireless);
module_exit(exit_ipwireless);
MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR);
MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/ipwireless/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
* Original code
* by Stephen Blackheath <[email protected]>,
* Ben Martel <[email protected]>
*
* Copyrighted as follows:
* Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
*
* Various driver changes and rewrites, port to new kernels
* Copyright (C) 2006-2007 Jiri Kosina
*
* Misc code cleanups and updates
* Copyright (C) 2007 David Sterba
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ppp_defs.h>
#include <linux/if.h>
#include <linux/ppp-ioctl.h>
#include <linux/sched.h>
#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/uaccess.h>
#include "tty.h"
#include "network.h"
#include "hardware.h"
#include "main.h"
#define IPWIRELESS_PCMCIA_START (0)
#define IPWIRELESS_PCMCIA_MINORS (24)
#define IPWIRELESS_PCMCIA_MINOR_RANGE (8)
#define TTYTYPE_MODEM (0)
#define TTYTYPE_MONITOR (1)
#define TTYTYPE_RAS_RAW (2)
struct ipw_tty {
struct tty_port port;
int index;
struct ipw_hardware *hardware;
unsigned int channel_idx;
unsigned int secondary_channel_idx;
int tty_type;
struct ipw_network *network;
unsigned int control_lines;
struct mutex ipw_tty_mutex;
int tx_bytes_queued;
};
static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS];
static struct tty_driver *ipw_tty_driver;
static char *tty_type_name(int tty_type)
{
static char *channel_names[] = {
"modem",
"monitor",
"RAS-raw"
};
return channel_names[tty_type];
}
static struct ipw_tty *get_tty(int index)
{
/*
* The 'ras_raw' channel is only available when 'loopback' mode
* is enabled.
* Number of minor starts with 16 (_RANGE * _RAS_RAW).
*/
if (!ipwireless_loopback && index >=
IPWIRELESS_PCMCIA_MINOR_RANGE * TTYTYPE_RAS_RAW)
return NULL;
return ttys[index];
}
static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
{
struct ipw_tty *tty = get_tty(linux_tty->index);
if (!tty)
return -ENODEV;
mutex_lock(&tty->ipw_tty_mutex);
if (tty->port.count == 0)
tty->tx_bytes_queued = 0;
tty->port.count++;
tty->port.tty = linux_tty;
linux_tty->driver_data = tty;
if (tty->tty_type == TTYTYPE_MODEM)
ipwireless_ppp_open(tty->network);
mutex_unlock(&tty->ipw_tty_mutex);
return 0;
}
static void do_ipw_close(struct ipw_tty *tty)
{
tty->port.count--;
if (tty->port.count == 0) {
struct tty_struct *linux_tty = tty->port.tty;
if (linux_tty != NULL) {
tty->port.tty = NULL;
linux_tty->driver_data = NULL;
if (tty->tty_type == TTYTYPE_MODEM)
ipwireless_ppp_close(tty->network);
}
}
}
static void ipw_hangup(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
if (!tty)
return;
mutex_lock(&tty->ipw_tty_mutex);
if (tty->port.count == 0) {
mutex_unlock(&tty->ipw_tty_mutex);
return;
}
do_ipw_close(tty);
mutex_unlock(&tty->ipw_tty_mutex);
}
static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
{
ipw_hangup(linux_tty);
}
/* Take data received from hardware, and send it out the tty */
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length)
{
int work = 0;
mutex_lock(&tty->ipw_tty_mutex);
if (!tty->port.count) {
mutex_unlock(&tty->ipw_tty_mutex);
return;
}
mutex_unlock(&tty->ipw_tty_mutex);
work = tty_insert_flip_string(&tty->port, data, length);
if (work != length)
printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
": %d chars not inserted to flip buffer!\n",
length - work);
if (work)
tty_flip_buffer_push(&tty->port);
}
static void ipw_write_packet_sent_callback(void *callback_data,
unsigned int packet_length)
{
struct ipw_tty *tty = callback_data;
/*
* Packet has been sent, so we subtract the number of bytes from our
* tally of outstanding TX bytes.
*/
tty->tx_bytes_queued -= packet_length;
}
static ssize_t ipw_write(struct tty_struct *linux_tty, const u8 *buf,
size_t count)
{
struct ipw_tty *tty = linux_tty->driver_data;
int room, ret;
if (!tty)
return -ENODEV;
mutex_lock(&tty->ipw_tty_mutex);
if (!tty->port.count) {
mutex_unlock(&tty->ipw_tty_mutex);
return -EINVAL;
}
room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
if (room < 0)
room = 0;
/* Don't allow caller to write any more than we have room for */
if (count > room)
count = room;
if (count == 0) {
mutex_unlock(&tty->ipw_tty_mutex);
return 0;
}
ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
buf, count,
ipw_write_packet_sent_callback, tty);
if (ret < 0) {
mutex_unlock(&tty->ipw_tty_mutex);
return 0;
}
tty->tx_bytes_queued += count;
mutex_unlock(&tty->ipw_tty_mutex);
return count;
}
static unsigned int ipw_write_room(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
int room;
/* FIXME: Exactly how is the tty object locked here .. */
if (!tty)
return 0;
if (!tty->port.count)
return 0;
room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
if (room < 0)
room = 0;
return room;
}
static int ipwireless_get_serial_info(struct tty_struct *linux_tty,
struct serial_struct *ss)
{
struct ipw_tty *tty = linux_tty->driver_data;
if (!tty)
return -ENODEV;
if (!tty->port.count)
return -EINVAL;
ss->type = PORT_UNKNOWN;
ss->line = tty->index;
ss->baud_base = 115200;
return 0;
}
static int ipwireless_set_serial_info(struct tty_struct *linux_tty,
struct serial_struct *ss)
{
return 0; /* Keeps the PCMCIA scripts happy. */
}
static unsigned int ipw_chars_in_buffer(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
if (!tty)
return 0;
if (!tty->port.count)
return 0;
return tty->tx_bytes_queued;
}
static int get_control_lines(struct ipw_tty *tty)
{
unsigned int my = tty->control_lines;
unsigned int out = 0;
if (my & IPW_CONTROL_LINE_RTS)
out |= TIOCM_RTS;
if (my & IPW_CONTROL_LINE_DTR)
out |= TIOCM_DTR;
if (my & IPW_CONTROL_LINE_CTS)
out |= TIOCM_CTS;
if (my & IPW_CONTROL_LINE_DSR)
out |= TIOCM_DSR;
if (my & IPW_CONTROL_LINE_DCD)
out |= TIOCM_CD;
return out;
}
static int set_control_lines(struct ipw_tty *tty, unsigned int set,
unsigned int clear)
{
int ret;
if (set & TIOCM_RTS) {
ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 1);
if (ret)
return ret;
if (tty->secondary_channel_idx != -1) {
ret = ipwireless_set_RTS(tty->hardware,
tty->secondary_channel_idx, 1);
if (ret)
return ret;
}
}
if (set & TIOCM_DTR) {
ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 1);
if (ret)
return ret;
if (tty->secondary_channel_idx != -1) {
ret = ipwireless_set_DTR(tty->hardware,
tty->secondary_channel_idx, 1);
if (ret)
return ret;
}
}
if (clear & TIOCM_RTS) {
ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 0);
if (tty->secondary_channel_idx != -1) {
ret = ipwireless_set_RTS(tty->hardware,
tty->secondary_channel_idx, 0);
if (ret)
return ret;
}
}
if (clear & TIOCM_DTR) {
ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 0);
if (tty->secondary_channel_idx != -1) {
ret = ipwireless_set_DTR(tty->hardware,
tty->secondary_channel_idx, 0);
if (ret)
return ret;
}
}
return 0;
}
static int ipw_tiocmget(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
/* FIXME: Exactly how is the tty object locked here .. */
if (!tty)
return -ENODEV;
if (!tty->port.count)
return -EINVAL;
return get_control_lines(tty);
}
static int
ipw_tiocmset(struct tty_struct *linux_tty,
unsigned int set, unsigned int clear)
{
struct ipw_tty *tty = linux_tty->driver_data;
/* FIXME: Exactly how is the tty object locked here .. */
if (!tty)
return -ENODEV;
if (!tty->port.count)
return -EINVAL;
return set_control_lines(tty, set, clear);
}
static int ipw_ioctl(struct tty_struct *linux_tty,
unsigned int cmd, unsigned long arg)
{
struct ipw_tty *tty = linux_tty->driver_data;
if (!tty)
return -ENODEV;
if (!tty->port.count)
return -EINVAL;
/* FIXME: Exactly how is the tty object locked here .. */
if (tty->tty_type == TTYTYPE_MODEM) {
switch (cmd) {
case PPPIOCGCHAN:
{
int chan = ipwireless_ppp_channel_index(
tty->network);
if (chan < 0)
return -ENODEV;
if (put_user(chan, (int __user *) arg))
return -EFAULT;
}
return 0;
case PPPIOCGUNIT:
{
int unit = ipwireless_ppp_unit_number(
tty->network);
if (unit < 0)
return -ENODEV;
if (put_user(unit, (int __user *) arg))
return -EFAULT;
}
return 0;
case FIONREAD:
{
int val = 0;
if (put_user(val, (int __user *) arg))
return -EFAULT;
}
return 0;
case TCFLSH:
return tty_perform_flush(linux_tty, arg);
}
}
return -ENOIOCTLCMD;
}
static int add_tty(int j,
struct ipw_hardware *hardware,
struct ipw_network *network, int channel_idx,
int secondary_channel_idx, int tty_type)
{
ttys[j] = kzalloc(sizeof(struct ipw_tty), GFP_KERNEL);
if (!ttys[j])
return -ENOMEM;
ttys[j]->index = j;
ttys[j]->hardware = hardware;
ttys[j]->channel_idx = channel_idx;
ttys[j]->secondary_channel_idx = secondary_channel_idx;
ttys[j]->network = network;
ttys[j]->tty_type = tty_type;
mutex_init(&ttys[j]->ipw_tty_mutex);
tty_port_init(&ttys[j]->port);
tty_port_register_device(&ttys[j]->port, ipw_tty_driver, j, NULL);
ipwireless_associate_network_tty(network, channel_idx, ttys[j]);
if (secondary_channel_idx != -1)
ipwireless_associate_network_tty(network,
secondary_channel_idx,
ttys[j]);
/* check if we provide raw device (if loopback is enabled) */
if (get_tty(j))
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": registering %s device ttyIPWp%d\n",
tty_type_name(tty_type), j);
return 0;
}
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
struct ipw_network *network)
{
int i, j;
for (i = 0; i < IPWIRELESS_PCMCIA_MINOR_RANGE; i++) {
int allfree = 1;
for (j = i; j < IPWIRELESS_PCMCIA_MINORS;
j += IPWIRELESS_PCMCIA_MINOR_RANGE)
if (ttys[j] != NULL) {
allfree = 0;
break;
}
if (allfree) {
j = i;
if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
TTYTYPE_MODEM))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, -1,
TTYTYPE_MONITOR))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
if (add_tty(j, hardware, network,
IPW_CHANNEL_RAS, -1,
TTYTYPE_RAS_RAW))
return NULL;
return ttys[i];
}
}
return NULL;
}
/*
* Must be called before ipwireless_network_free().
*/
void ipwireless_tty_free(struct ipw_tty *tty)
{
int j;
struct ipw_network *network = ttys[tty->index]->network;
for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS;
j += IPWIRELESS_PCMCIA_MINOR_RANGE) {
struct ipw_tty *ttyj = ttys[j];
if (ttyj) {
mutex_lock(&ttyj->ipw_tty_mutex);
if (get_tty(j))
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": deregistering %s device ttyIPWp%d\n",
tty_type_name(ttyj->tty_type), j);
if (ttyj->port.tty != NULL) {
mutex_unlock(&ttyj->ipw_tty_mutex);
tty_vhangup(ttyj->port.tty);
/* FIXME: Exactly how is the tty object locked here
against a parallel ioctl etc */
/* FIXME2: hangup does not mean all processes
* are gone */
mutex_lock(&ttyj->ipw_tty_mutex);
}
while (ttyj->port.count)
do_ipw_close(ttyj);
ipwireless_disassociate_network_ttys(network,
ttyj->channel_idx);
tty_unregister_device(ipw_tty_driver, j);
tty_port_destroy(&ttyj->port);
ttys[j] = NULL;
mutex_unlock(&ttyj->ipw_tty_mutex);
kfree(ttyj);
}
}
}
static const struct tty_operations tty_ops = {
.open = ipw_open,
.close = ipw_close,
.hangup = ipw_hangup,
.write = ipw_write,
.write_room = ipw_write_room,
.ioctl = ipw_ioctl,
.chars_in_buffer = ipw_chars_in_buffer,
.tiocmget = ipw_tiocmget,
.tiocmset = ipw_tiocmset,
.set_serial = ipwireless_set_serial_info,
.get_serial = ipwireless_get_serial_info,
};
int ipwireless_tty_init(void)
{
int result;
ipw_tty_driver = tty_alloc_driver(IPWIRELESS_PCMCIA_MINORS,
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(ipw_tty_driver))
return PTR_ERR(ipw_tty_driver);
ipw_tty_driver->driver_name = IPWIRELESS_PCCARD_NAME;
ipw_tty_driver->name = "ttyIPWp";
ipw_tty_driver->major = 0;
ipw_tty_driver->minor_start = IPWIRELESS_PCMCIA_START;
ipw_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
ipw_tty_driver->subtype = SERIAL_TYPE_NORMAL;
ipw_tty_driver->init_termios = tty_std_termios;
ipw_tty_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
ipw_tty_driver->init_termios.c_ispeed = 9600;
ipw_tty_driver->init_termios.c_ospeed = 9600;
tty_set_operations(ipw_tty_driver, &tty_ops);
result = tty_register_driver(ipw_tty_driver);
if (result) {
printk(KERN_ERR IPWIRELESS_PCCARD_NAME
": failed to register tty driver\n");
tty_driver_kref_put(ipw_tty_driver);
return result;
}
return 0;
}
void ipwireless_tty_release(void)
{
tty_unregister_driver(ipw_tty_driver);
tty_driver_kref_put(ipw_tty_driver);
}
int ipwireless_tty_is_modem(struct ipw_tty *tty)
{
return tty->tty_type == TTYTYPE_MODEM;
}
void
ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
unsigned int channel_idx,
unsigned int control_lines,
unsigned int changed_mask)
{
unsigned int old_control_lines = tty->control_lines;
tty->control_lines = (tty->control_lines & ~changed_mask)
| (control_lines & changed_mask);
/*
* If DCD is de-asserted, we close the tty so pppd can tell that we
* have gone offline.
*/
if ((old_control_lines & IPW_CONTROL_LINE_DCD)
&& !(tty->control_lines & IPW_CONTROL_LINE_DCD)
&& tty->port.tty) {
tty_hangup(tty->port.tty);
}
}
| linux-master | drivers/tty/ipwireless/tty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <[email protected]>
*
* Based on drivers/spmi/spmi.c:
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/platform_data/x86/apple.h>
static bool is_registered;
static DEFINE_IDA(ctrl_ida);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len;
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
return of_device_modalias(dev, buf, PAGE_SIZE);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *serdev_device_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(serdev_device);
static int serdev_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int rc;
/* TODO: platform modalias */
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
return of_device_uevent_modalias(dev, env);
}
static void serdev_device_release(struct device *dev)
{
struct serdev_device *serdev = to_serdev_device(dev);
kfree(serdev);
}
static const struct device_type serdev_device_type = {
.groups = serdev_device_groups,
.uevent = serdev_device_uevent,
.release = serdev_device_release,
};
static bool is_serdev_device(const struct device *dev)
{
return dev->type == &serdev_device_type;
}
static void serdev_ctrl_release(struct device *dev)
{
struct serdev_controller *ctrl = to_serdev_controller(dev);
ida_simple_remove(&ctrl_ida, ctrl->nr);
kfree(ctrl);
}
static const struct device_type serdev_ctrl_type = {
.release = serdev_ctrl_release,
};
static int serdev_device_match(struct device *dev, struct device_driver *drv)
{
if (!is_serdev_device(dev))
return 0;
/* TODO: platform matching */
if (acpi_driver_match_device(dev, drv))
return 1;
return of_driver_match_device(dev, drv);
}
/**
* serdev_device_add() - add a device previously constructed via serdev_device_alloc()
* @serdev: serdev_device to be added
*/
int serdev_device_add(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
struct device *parent = serdev->dev.parent;
int err;
dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
/* Only a single slave device is currently supported. */
if (ctrl->serdev) {
dev_err(&serdev->dev, "controller busy\n");
return -EBUSY;
}
ctrl->serdev = serdev;
err = device_add(&serdev->dev);
if (err < 0) {
dev_err(&serdev->dev, "Can't add %s, status %pe\n",
dev_name(&serdev->dev), ERR_PTR(err));
goto err_clear_serdev;
}
dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
return 0;
err_clear_serdev:
ctrl->serdev = NULL;
return err;
}
EXPORT_SYMBOL_GPL(serdev_device_add);
/**
* serdev_device_remove(): remove an serdev device
* @serdev: serdev_device to be removed
*/
void serdev_device_remove(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
device_unregister(&serdev->dev);
ctrl->serdev = NULL;
}
EXPORT_SYMBOL_GPL(serdev_device_remove);
int serdev_device_open(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
int ret;
if (!ctrl || !ctrl->ops->open)
return -EINVAL;
ret = ctrl->ops->open(ctrl);
if (ret)
return ret;
ret = pm_runtime_get_sync(&ctrl->dev);
if (ret < 0) {
pm_runtime_put_noidle(&ctrl->dev);
goto err_close;
}
return 0;
err_close:
if (ctrl->ops->close)
ctrl->ops->close(ctrl);
return ret;
}
EXPORT_SYMBOL_GPL(serdev_device_open);
void serdev_device_close(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->close)
return;
pm_runtime_put(&ctrl->dev);
ctrl->ops->close(ctrl);
}
EXPORT_SYMBOL_GPL(serdev_device_close);
static void devm_serdev_device_release(struct device *dev, void *dr)
{
serdev_device_close(*(struct serdev_device **)dr);
}
int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
{
struct serdev_device **dr;
int ret;
dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
ret = serdev_device_open(serdev);
if (ret) {
devres_free(dr);
return ret;
}
*dr = serdev;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_serdev_device_open);
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
}
EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
/**
* serdev_device_write_buf() - write data asynchronously
* @serdev: serdev device
* @buf: data to be written
* @count: number of bytes to write
*
* Write data to the device asynchronously.
*
* Note that any accepted data has only been buffered by the controller; use
* serdev_device_wait_until_sent() to make sure the controller write buffer
* has actually been emptied.
*
* Return: The number of bytes written (less than count if not enough room in
* the write buffer), or a negative errno on errors.
*/
int serdev_device_write_buf(struct serdev_device *serdev,
const unsigned char *buf, size_t count)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->write_buf)
return -EINVAL;
return ctrl->ops->write_buf(ctrl, buf, count);
}
EXPORT_SYMBOL_GPL(serdev_device_write_buf);
/**
* serdev_device_write() - write data synchronously
* @serdev: serdev device
* @buf: data to be written
* @count: number of bytes to write
* @timeout: timeout in jiffies, or 0 to wait indefinitely
*
* Write data to the device synchronously by repeatedly calling
* serdev_device_write() until the controller has accepted all data (unless
* interrupted by a timeout or a signal).
*
* Note that any accepted data has only been buffered by the controller; use
* serdev_device_wait_until_sent() to make sure the controller write buffer
* has actually been emptied.
*
* Note that this function depends on serdev_device_write_wakeup() being
* called in the serdev driver write_wakeup() callback.
*
* Return: The number of bytes written (less than count if interrupted),
* -ETIMEDOUT or -ERESTARTSYS if interrupted before any bytes were written, or
* a negative errno on errors.
*/
int serdev_device_write(struct serdev_device *serdev,
const unsigned char *buf, size_t count,
long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
int written = 0;
int ret;
if (!ctrl || !ctrl->ops->write_buf || !serdev->ops->write_wakeup)
return -EINVAL;
if (timeout == 0)
timeout = MAX_SCHEDULE_TIMEOUT;
mutex_lock(&serdev->write_lock);
do {
reinit_completion(&serdev->write_comp);
ret = ctrl->ops->write_buf(ctrl, buf, count);
if (ret < 0)
break;
written += ret;
buf += ret;
count -= ret;
if (count == 0)
break;
timeout = wait_for_completion_interruptible_timeout(&serdev->write_comp,
timeout);
} while (timeout > 0);
mutex_unlock(&serdev->write_lock);
if (ret < 0)
return ret;
if (timeout <= 0 && written == 0) {
if (timeout == -ERESTARTSYS)
return -ERESTARTSYS;
else
return -ETIMEDOUT;
}
return written;
}
EXPORT_SYMBOL_GPL(serdev_device_write);
void serdev_device_write_flush(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->write_flush)
return;
ctrl->ops->write_flush(ctrl);
}
EXPORT_SYMBOL_GPL(serdev_device_write_flush);
int serdev_device_write_room(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->write_room)
return 0;
return serdev->ctrl->ops->write_room(ctrl);
}
EXPORT_SYMBOL_GPL(serdev_device_write_room);
unsigned int serdev_device_set_baudrate(struct serdev_device *serdev, unsigned int speed)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->set_baudrate)
return 0;
return ctrl->ops->set_baudrate(ctrl, speed);
}
EXPORT_SYMBOL_GPL(serdev_device_set_baudrate);
void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->set_flow_control)
return;
ctrl->ops->set_flow_control(ctrl, enable);
}
EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
int serdev_device_set_parity(struct serdev_device *serdev,
enum serdev_parity parity)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->set_parity)
return -EOPNOTSUPP;
return ctrl->ops->set_parity(ctrl, parity);
}
EXPORT_SYMBOL_GPL(serdev_device_set_parity);
void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->wait_until_sent)
return;
ctrl->ops->wait_until_sent(ctrl, timeout);
}
EXPORT_SYMBOL_GPL(serdev_device_wait_until_sent);
int serdev_device_get_tiocm(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->get_tiocm)
return -EOPNOTSUPP;
return ctrl->ops->get_tiocm(ctrl);
}
EXPORT_SYMBOL_GPL(serdev_device_get_tiocm);
int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->set_tiocm)
return -EOPNOTSUPP;
return ctrl->ops->set_tiocm(ctrl, set, clear);
}
EXPORT_SYMBOL_GPL(serdev_device_set_tiocm);
int serdev_device_break_ctl(struct serdev_device *serdev, int break_state)
{
struct serdev_controller *ctrl = serdev->ctrl;
if (!ctrl || !ctrl->ops->break_ctl)
return -EOPNOTSUPP;
return ctrl->ops->break_ctl(ctrl, break_state);
}
EXPORT_SYMBOL_GPL(serdev_device_break_ctl);
static int serdev_drv_probe(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
int ret;
ret = dev_pm_domain_attach(dev, true);
if (ret)
return ret;
ret = sdrv->probe(to_serdev_device(dev));
if (ret)
dev_pm_domain_detach(dev, true);
return ret;
}
static void serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
if (sdrv->remove)
sdrv->remove(to_serdev_device(dev));
dev_pm_domain_detach(dev, true);
}
static struct bus_type serdev_bus_type = {
.name = "serial",
.match = serdev_device_match,
.probe = serdev_drv_probe,
.remove = serdev_drv_remove,
};
/**
* serdev_device_alloc() - Allocate a new serdev device
* @ctrl: associated controller
*
* Caller is responsible for either calling serdev_device_add() to add the
* newly allocated controller, or calling serdev_device_put() to discard it.
*/
struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
{
struct serdev_device *serdev;
serdev = kzalloc(sizeof(*serdev), GFP_KERNEL);
if (!serdev)
return NULL;
serdev->ctrl = ctrl;
device_initialize(&serdev->dev);
serdev->dev.parent = &ctrl->dev;
serdev->dev.bus = &serdev_bus_type;
serdev->dev.type = &serdev_device_type;
init_completion(&serdev->write_comp);
mutex_init(&serdev->write_lock);
return serdev;
}
EXPORT_SYMBOL_GPL(serdev_device_alloc);
/**
* serdev_controller_alloc() - Allocate a new serdev controller
* @parent: parent device
* @size: size of private data
*
* Caller is responsible for either calling serdev_controller_add() to add the
* newly allocated controller, or calling serdev_controller_put() to discard it.
* The allocated private data region may be accessed via
* serdev_controller_get_drvdata()
*/
struct serdev_controller *serdev_controller_alloc(struct device *parent,
size_t size)
{
struct serdev_controller *ctrl;
int id;
if (WARN_ON(!parent))
return NULL;
ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
if (!ctrl)
return NULL;
id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate serdev controller identifier.\n");
goto err_free;
}
ctrl->nr = id;
device_initialize(&ctrl->dev);
ctrl->dev.type = &serdev_ctrl_type;
ctrl->dev.bus = &serdev_bus_type;
ctrl->dev.parent = parent;
ctrl->dev.of_node = parent->of_node;
serdev_controller_set_drvdata(ctrl, &ctrl[1]);
dev_set_name(&ctrl->dev, "serial%d", id);
pm_runtime_no_callbacks(&ctrl->dev);
pm_suspend_ignore_children(&ctrl->dev, true);
dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
return ctrl;
err_free:
kfree(ctrl);
return NULL;
}
EXPORT_SYMBOL_GPL(serdev_controller_alloc);
static int of_serdev_register_devices(struct serdev_controller *ctrl)
{
struct device_node *node;
struct serdev_device *serdev = NULL;
int err;
bool found = false;
for_each_available_child_of_node(ctrl->dev.of_node, node) {
if (!of_get_property(node, "compatible", NULL))
continue;
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
serdev = serdev_device_alloc(ctrl);
if (!serdev)
continue;
device_set_node(&serdev->dev, of_fwnode_handle(node));
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
"failure adding device. status %pe\n",
ERR_PTR(err));
serdev_device_put(serdev);
} else
found = true;
}
if (!found)
return -ENODEV;
return 0;
}
#ifdef CONFIG_ACPI
#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
struct acpi_serdev_lookup {
acpi_handle device_handle;
acpi_handle controller_handle;
int n;
int index;
};
/**
* serdev_acpi_get_uart_resource - Gets UARTSerialBus resource if type matches
* @ares: ACPI resource
* @uart: Pointer to UARTSerialBus resource will be returned here
*
* Checks if the given ACPI resource is of type UARTSerialBus.
* In this case, returns a pointer to it to the caller.
*
* Return: True if resource type is of UARTSerialBus, otherwise false.
*/
bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
struct acpi_resource_uart_serialbus **uart)
{
struct acpi_resource_uart_serialbus *sb;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return false;
sb = &ares->data.uart_serial_bus;
if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
return false;
*uart = sb;
return true;
}
EXPORT_SYMBOL_GPL(serdev_acpi_get_uart_resource);
static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
{
struct acpi_serdev_lookup *lookup = data;
struct acpi_resource_uart_serialbus *sb;
acpi_status status;
if (!serdev_acpi_get_uart_resource(ares, &sb))
return 1;
if (lookup->index != -1 && lookup->n++ != lookup->index)
return 1;
status = acpi_get_handle(lookup->device_handle,
sb->resource_source.string_ptr,
&lookup->controller_handle);
if (ACPI_FAILURE(status))
return 1;
/*
* NOTE: Ideally, we would also want to retrieve other properties here,
* once setting them before opening the device is supported by serdev.
*/
return 1;
}
static int acpi_serdev_do_lookup(struct acpi_device *adev,
struct acpi_serdev_lookup *lookup)
{
struct list_head resource_list;
int ret;
lookup->device_handle = acpi_device_handle(adev);
lookup->controller_handle = NULL;
lookup->n = 0;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
acpi_serdev_parse_resource, lookup);
acpi_dev_free_resource_list(&resource_list);
if (ret < 0)
return -EINVAL;
return 0;
}
static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
struct acpi_device *adev)
{
struct acpi_serdev_lookup lookup;
int ret;
if (acpi_bus_get_status(adev) || !adev->status.present)
return -EINVAL;
/* Look for UARTSerialBusV2 resource */
lookup.index = -1; // we only care for the last device
ret = acpi_serdev_do_lookup(adev, &lookup);
if (ret)
return ret;
/*
* Apple machines provide an empty resource template, so on those
* machines just look for immediate children with a "baud" property
* (from the _DSM method) instead.
*/
if (!lookup.controller_handle && x86_apple_machine &&
!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL))
acpi_get_parent(adev->handle, &lookup.controller_handle);
/* Make sure controller and ResourceSource handle match */
if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
return -ENODEV;
return 0;
}
static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
struct acpi_device *adev)
{
struct serdev_device *serdev;
int err;
serdev = serdev_device_alloc(ctrl);
if (!serdev) {
dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
dev_name(&adev->dev));
return AE_NO_MEMORY;
}
ACPI_COMPANION_SET(&serdev->dev, adev);
acpi_device_set_enumerated(adev);
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
"failure adding ACPI serdev device. status %pe\n",
ERR_PTR(err));
serdev_device_put(serdev);
}
return AE_OK;
}
static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
{ "INT3511", 0 },
{ "INT3512", 0 },
{ },
};
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct serdev_controller *ctrl = data;
if (!adev || acpi_device_enumerated(adev))
return AE_OK;
/* Skip if black listed */
if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
return AE_OK;
if (acpi_serdev_check_resources(ctrl, adev))
return AE_OK;
return acpi_serdev_register_device(ctrl, adev);
}
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
acpi_status status;
bool skip;
int ret;
if (!has_acpi_companion(ctrl->dev.parent))
return -ENODEV;
/*
* Skip registration on boards where the ACPI tables are known to
* contain buggy devices. Note serdev_controller_add() must still
* succeed in this case, so that the proper serdev devices can be
* added "manually" later.
*/
ret = acpi_quirk_skip_serdev_enumeration(ctrl->dev.parent, &skip);
if (ret)
return ret;
if (skip)
return 0;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
SERDEV_ACPI_MAX_SCAN_DEPTH,
acpi_serdev_add_device, NULL, ctrl, NULL);
if (ACPI_FAILURE(status))
dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
if (!ctrl->serdev)
return -ENODEV;
return 0;
}
#else
static inline int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
return -ENODEV;
}
#endif /* CONFIG_ACPI */
/**
* serdev_controller_add() - Add an serdev controller
* @ctrl: controller to be registered.
*
* Register a controller previously allocated via serdev_controller_alloc() with
* the serdev core.
*/
int serdev_controller_add(struct serdev_controller *ctrl)
{
int ret_of, ret_acpi, ret;
/* Can't register until after driver model init */
if (WARN_ON(!is_registered))
return -EAGAIN;
ret = device_add(&ctrl->dev);
if (ret)
return ret;
pm_runtime_enable(&ctrl->dev);
ret_of = of_serdev_register_devices(ctrl);
ret_acpi = acpi_serdev_register_devices(ctrl);
if (ret_of && ret_acpi) {
dev_dbg(&ctrl->dev, "no devices registered: of:%pe acpi:%pe\n",
ERR_PTR(ret_of), ERR_PTR(ret_acpi));
ret = -ENODEV;
goto err_rpm_disable;
}
dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
ctrl->nr, &ctrl->dev);
return 0;
err_rpm_disable:
pm_runtime_disable(&ctrl->dev);
device_del(&ctrl->dev);
return ret;
};
EXPORT_SYMBOL_GPL(serdev_controller_add);
/* Remove a device associated with a controller */
static int serdev_remove_device(struct device *dev, void *data)
{
struct serdev_device *serdev = to_serdev_device(dev);
if (dev->type == &serdev_device_type)
serdev_device_remove(serdev);
return 0;
}
/**
* serdev_controller_remove(): remove an serdev controller
* @ctrl: controller to remove
*
* Remove a serdev controller. Caller is responsible for calling
* serdev_controller_put() to discard the allocated controller.
*/
void serdev_controller_remove(struct serdev_controller *ctrl)
{
if (!ctrl)
return;
device_for_each_child(&ctrl->dev, NULL, serdev_remove_device);
pm_runtime_disable(&ctrl->dev);
device_del(&ctrl->dev);
}
EXPORT_SYMBOL_GPL(serdev_controller_remove);
/**
* __serdev_device_driver_register() - Register client driver with serdev core
* @sdrv: client driver to be associated with client-device.
* @owner: client driver owner to set.
*
* This API will register the client driver with the serdev framework.
* It is typically called from the driver's module-init function.
*/
int __serdev_device_driver_register(struct serdev_device_driver *sdrv, struct module *owner)
{
sdrv->driver.bus = &serdev_bus_type;
sdrv->driver.owner = owner;
/* force drivers to async probe so I/O is possible in probe */
sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
static void __exit serdev_exit(void)
{
bus_unregister(&serdev_bus_type);
ida_destroy(&ctrl_ida);
}
module_exit(serdev_exit);
static int __init serdev_init(void)
{
int ret;
ret = bus_register(&serdev_bus_type);
if (ret)
return ret;
is_registered = true;
return 0;
}
/* Must be before serial drivers register */
postcore_initcall(serdev_init);
MODULE_AUTHOR("Rob Herring <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Serial attached device bus");
| linux-master | drivers/tty/serdev/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/serdev.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/poll.h>
#define SERPORT_ACTIVE 1
struct serport {
struct tty_port *port;
struct tty_struct *tty;
struct tty_driver *tty_drv;
int tty_idx;
unsigned long flags;
};
/*
* Callback functions from the tty port.
*/
static size_t ttyport_receive_buf(struct tty_port *port, const u8 *cp,
const u8 *fp, size_t count)
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
int ret;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
return 0;
ret = serdev_controller_receive_buf(ctrl, cp, count);
dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
"receive_buf returns %d (count = %zu)\n",
ret, count);
if (ret < 0)
return 0;
else if (ret > count)
return count;
return ret;
}
static void ttyport_write_wakeup(struct tty_port *port)
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty;
tty = tty_port_tty_get(port);
if (!tty)
return;
if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
test_bit(SERPORT_ACTIVE, &serport->flags))
serdev_controller_write_wakeup(ctrl);
/* Wake up any tty_wait_until_sent() */
wake_up_interruptible(&tty->write_wait);
tty_kref_put(tty);
}
static const struct tty_port_client_operations client_ops = {
.receive_buf = ttyport_receive_buf,
.write_wakeup = ttyport_write_wakeup,
};
/*
* Callback functions from the serdev core.
*/
static int ttyport_write_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t len)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
return 0;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return tty->ops->write(serport->tty, data, len);
}
static void ttyport_write_flush(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
tty_driver_flush_buffer(tty);
}
static int ttyport_write_room(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
return tty_write_room(tty);
}
static int ttyport_open(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty;
struct ktermios ktermios;
int ret;
tty = tty_init_dev(serport->tty_drv, serport->tty_idx);
if (IS_ERR(tty))
return PTR_ERR(tty);
serport->tty = tty;
if (!tty->ops->open || !tty->ops->close) {
ret = -ENODEV;
goto err_unlock;
}
ret = tty->ops->open(serport->tty, NULL);
if (ret)
goto err_close;
tty_unlock(serport->tty);
/* Bring the UART into a known 8 bits no parity hw fc state */
ktermios = tty->termios;
ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP |
INLCR | IGNCR | ICRNL | IXON);
ktermios.c_oflag &= ~OPOST;
ktermios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
ktermios.c_cflag &= ~(CSIZE | PARENB);
ktermios.c_cflag |= CS8;
ktermios.c_cflag |= CRTSCTS;
/* Hangups are not supported so make sure to ignore carrier detect. */
ktermios.c_cflag |= CLOCAL;
tty_set_termios(tty, &ktermios);
set_bit(SERPORT_ACTIVE, &serport->flags);
return 0;
err_close:
tty->ops->close(tty, NULL);
err_unlock:
tty_unlock(tty);
tty_release_struct(tty, serport->tty_idx);
return ret;
}
static void ttyport_close(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
clear_bit(SERPORT_ACTIVE, &serport->flags);
tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
tty_unlock(tty);
tty_release_struct(tty, serport->tty_idx);
}
static unsigned int ttyport_set_baudrate(struct serdev_controller *ctrl, unsigned int speed)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
struct ktermios ktermios = tty->termios;
ktermios.c_cflag &= ~CBAUD;
tty_termios_encode_baud_rate(&ktermios, speed, speed);
/* tty_set_termios() return not checked as it is always 0 */
tty_set_termios(tty, &ktermios);
return ktermios.c_ospeed;
}
static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
struct ktermios ktermios = tty->termios;
if (enable)
ktermios.c_cflag |= CRTSCTS;
else
ktermios.c_cflag &= ~CRTSCTS;
tty_set_termios(tty, &ktermios);
}
static int ttyport_set_parity(struct serdev_controller *ctrl,
enum serdev_parity parity)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
struct ktermios ktermios = tty->termios;
ktermios.c_cflag &= ~(PARENB | PARODD | CMSPAR);
if (parity != SERDEV_PARITY_NONE) {
ktermios.c_cflag |= PARENB;
if (parity == SERDEV_PARITY_ODD)
ktermios.c_cflag |= PARODD;
}
tty_set_termios(tty, &ktermios);
if ((tty->termios.c_cflag & (PARENB | PARODD | CMSPAR)) !=
(ktermios.c_cflag & (PARENB | PARODD | CMSPAR)))
return -EINVAL;
return 0;
}
static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
tty_wait_until_sent(tty, timeout);
}
static int ttyport_get_tiocm(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
if (!tty->ops->tiocmget)
return -EOPNOTSUPP;
return tty->ops->tiocmget(tty);
}
static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, unsigned int clear)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
if (!tty->ops->tiocmset)
return -EOPNOTSUPP;
return tty->ops->tiocmset(tty, set, clear);
}
static int ttyport_break_ctl(struct serdev_controller *ctrl, unsigned int break_state)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
if (!tty->ops->break_ctl)
return -EOPNOTSUPP;
return tty->ops->break_ctl(tty, break_state);
}
static const struct serdev_controller_ops ctrl_ops = {
.write_buf = ttyport_write_buf,
.write_flush = ttyport_write_flush,
.write_room = ttyport_write_room,
.open = ttyport_open,
.close = ttyport_close,
.set_flow_control = ttyport_set_flow_control,
.set_parity = ttyport_set_parity,
.set_baudrate = ttyport_set_baudrate,
.wait_until_sent = ttyport_wait_until_sent,
.get_tiocm = ttyport_get_tiocm,
.set_tiocm = ttyport_set_tiocm,
.break_ctl = ttyport_break_ctl,
};
struct device *serdev_tty_port_register(struct tty_port *port,
struct device *parent,
struct tty_driver *drv, int idx)
{
struct serdev_controller *ctrl;
struct serport *serport;
int ret;
if (!port || !drv || !parent)
return ERR_PTR(-ENODEV);
ctrl = serdev_controller_alloc(parent, sizeof(struct serport));
if (!ctrl)
return ERR_PTR(-ENOMEM);
serport = serdev_controller_get_drvdata(ctrl);
serport->port = port;
serport->tty_idx = idx;
serport->tty_drv = drv;
ctrl->ops = &ctrl_ops;
port->client_ops = &client_ops;
port->client_data = ctrl;
ret = serdev_controller_add(ctrl);
if (ret)
goto err_reset_data;
dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
return &ctrl->dev;
err_reset_data:
port->client_data = NULL;
port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return ERR_PTR(ret);
}
int serdev_tty_port_unregister(struct tty_port *port)
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
if (!serport)
return -ENODEV;
serdev_controller_remove(ctrl);
port->client_data = NULL;
port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return 0;
}
| linux-master | drivers/tty/serdev/serdev-ttyport.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2010, 2014, 2022 The Linux Foundation. All rights reserved. */
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kfifo.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/dcc.h>
#include <asm/processor.h>
#include "hvc_console.h"
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
#define DCC_INBUF_SIZE 128
#define DCC_OUTBUF_SIZE 1024
/* Lock to serialize access to DCC fifo */
static DEFINE_SPINLOCK(dcc_lock);
static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
while (__dcc_getstatus() & DCC_STATUS_TX)
cpu_relax();
__dcc_putchar(ch);
}
static void dcc_early_write(struct console *con, const char *s, unsigned n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
}
static int __init dcc_early_console_setup(struct earlycon_device *device,
const char *opt)
{
device->con->write = dcc_early_write;
return 0;
}
EARLYCON_DECLARE(dcc, dcc_early_console_setup);
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
for (i = 0; i < count; i++) {
while (__dcc_getstatus() & DCC_STATUS_TX)
cpu_relax();
__dcc_putchar(buf[i]);
}
return count;
}
static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
{
int i;
for (i = 0; i < count; ++i)
if (__dcc_getstatus() & DCC_STATUS_RX)
buf[i] = __dcc_getchar();
else
break;
return i;
}
/*
* Check if the DCC is enabled. If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
* then we assume then this function will be called first on core0. That way,
* dcc_core0_available will be true only if it's available on core0.
*/
static bool hvc_dcc_check(void)
{
unsigned long time = jiffies + (HZ / 10);
static bool dcc_core0_available;
/*
* If we're not on core 0, but we previously confirmed that DCC is
* active, then just return true.
*/
int cpu = get_cpu();
if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP) && cpu && dcc_core0_available) {
put_cpu();
return true;
}
put_cpu();
/* Write a test character to check if it is handled */
__dcc_putchar('\n');
while (time_is_after_jiffies(time)) {
if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
dcc_core0_available = true;
return true;
}
}
return false;
}
/*
* Workqueue function that writes the output FIFO to the DCC on core 0.
*/
static void dcc_put_work(struct work_struct *work)
{
unsigned char ch;
unsigned long irqflags;
spin_lock_irqsave(&dcc_lock, irqflags);
/* While there's data in the output FIFO, write it to the DCC */
while (kfifo_get(&outbuf, &ch))
hvc_dcc_put_chars(0, &ch, 1);
/* While we're at it, check for any input characters */
while (!kfifo_is_full(&inbuf)) {
if (!hvc_dcc_get_chars(0, &ch, 1))
break;
kfifo_put(&inbuf, ch);
}
spin_unlock_irqrestore(&dcc_lock, irqflags);
}
static DECLARE_WORK(dcc_pwork, dcc_put_work);
/*
* Workqueue function that reads characters from DCC and puts them into the
* input FIFO.
*/
static void dcc_get_work(struct work_struct *work)
{
unsigned char ch;
unsigned long irqflags;
/*
* Read characters from DCC and put them into the input FIFO, as
* long as there is room and we have characters to read.
*/
spin_lock_irqsave(&dcc_lock, irqflags);
while (!kfifo_is_full(&inbuf)) {
if (!hvc_dcc_get_chars(0, &ch, 1))
break;
kfifo_put(&inbuf, ch);
}
spin_unlock_irqrestore(&dcc_lock, irqflags);
}
static DECLARE_WORK(dcc_gwork, dcc_get_work);
/*
* Write characters directly to the DCC if we're on core 0 and the FIFO
* is empty, or write them to the FIFO if we're not.
*/
static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
{
int len;
unsigned long irqflags;
if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
return hvc_dcc_put_chars(vt, buf, count);
spin_lock_irqsave(&dcc_lock, irqflags);
if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
len = kfifo_in(&outbuf, buf, count);
spin_unlock_irqrestore(&dcc_lock, irqflags);
/*
* We just push data to the output FIFO, so schedule the
* workqueue that will actually write that data to DCC.
* CPU hotplug is disabled in dcc_init so CPU0 cannot be
* offlined after the cpu online check.
*/
if (cpu_online(0))
schedule_work_on(0, &dcc_pwork);
return len;
}
/*
* If we're already on core 0, and the FIFO is empty, then just
* write the data to DCC.
*/
len = hvc_dcc_put_chars(vt, buf, count);
spin_unlock_irqrestore(&dcc_lock, irqflags);
return len;
}
/*
* Read characters directly from the DCC if we're on core 0 and the FIFO
* is empty, or read them from the FIFO if we're not.
*/
static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
{
int len;
unsigned long irqflags;
if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
return hvc_dcc_get_chars(vt, buf, count);
spin_lock_irqsave(&dcc_lock, irqflags);
if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
len = kfifo_out(&inbuf, buf, count);
spin_unlock_irqrestore(&dcc_lock, irqflags);
/*
* If the FIFO was empty, there may be characters in the DCC
* that we haven't read yet. Schedule a workqueue to fill
* the input FIFO, so that the next time this function is
* called, we'll have data. CPU hotplug is disabled in dcc_init
* so CPU0 cannot be offlined after the cpu online check.
*/
if (!len && cpu_online(0))
schedule_work_on(0, &dcc_gwork);
return len;
}
/*
* If we're already on core 0, and the FIFO is empty, then just
* read the data from DCC.
*/
len = hvc_dcc_get_chars(vt, buf, count);
spin_unlock_irqrestore(&dcc_lock, irqflags);
return len;
}
static const struct hv_ops hvc_dcc_get_put_ops = {
.get_chars = hvc_dcc0_get_chars,
.put_chars = hvc_dcc0_put_chars,
};
static int __init hvc_dcc_console_init(void)
{
int ret;
if (!hvc_dcc_check())
return -ENODEV;
/* Returns -1 if error */
ret = hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
return ret < 0 ? -ENODEV : 0;
}
console_initcall(hvc_dcc_console_init);
static int __init hvc_dcc_init(void)
{
struct hvc_struct *p;
if (!hvc_dcc_check())
return -ENODEV;
if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP)) {
pr_warn("\n");
pr_warn("********************************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** HVC_DCC_SERIALIZE_SMP SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
pr_warn("** **\n");
pr_warn("** This means that this is a DEBUG kernel and unsafe for **\n");
pr_warn("** production use and has important feature like CPU hotplug **\n");
pr_warn("** disabled. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging the **\n");
pr_warn("** kernel, report this immediately to your vendor! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("********************************************************************\n");
cpu_hotplug_disable();
}
p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return PTR_ERR_OR_ZERO(p);
}
device_initcall(hvc_dcc_init);
| linux-master | drivers/tty/hvc/hvc_dcc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* udbg interface to hvc_console.c
*
* (C) Copyright David Gibson, IBM Corporation 2008.
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/udbg.h>
#include "hvc_console.h"
static struct hvc_struct *hvc_udbg_dev;
static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
{
int i;
for (i = 0; i < count && udbg_putc; i++)
udbg_putc(buf[i]);
return i;
}
static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
{
int i, c;
if (!udbg_getc_poll)
return 0;
for (i = 0; i < count; i++) {
if ((c = udbg_getc_poll()) == -1)
break;
buf[i] = c;
}
return i;
}
static const struct hv_ops hvc_udbg_ops = {
.get_chars = hvc_udbg_get,
.put_chars = hvc_udbg_put,
};
static int __init hvc_udbg_init(void)
{
struct hvc_struct *hp;
if (!udbg_putc)
return -ENODEV;
BUG_ON(hvc_udbg_dev);
hp = hvc_alloc(0, 0, &hvc_udbg_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_udbg_dev = hp;
return 0;
}
device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
if (!udbg_putc)
return -ENODEV;
hvc_instantiate(0, 0, &hvc_udbg_ops);
add_preferred_console("hvc", 0, NULL);
return 0;
}
console_initcall(hvc_udbg_console_init);
| linux-master | drivers/tty/hvc/hvc_udbg.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <asm/hvsi.h>
#include "hvc_console.h"
static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
{
packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
/* Assumes that always succeeds, works in practice */
return pv->put_chars(pv->termno, (char *)packet, packet->len);
}
static void hvsi_start_handshake(struct hvsi_priv *pv)
{
struct hvsi_query q;
/* Reset state */
pv->established = 0;
atomic_set(&pv->seqno, 0);
pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
/* Send version query */
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
hvsi_send_packet(pv, &q.hdr);
}
static int hvsi_send_close(struct hvsi_priv *pv)
{
struct hvsi_control ctrl;
pv->established = 0;
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
ctrl.hdr.len = sizeof(struct hvsi_control);
ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
return hvsi_send_packet(pv, &ctrl.hdr);
}
static void hvsi_cd_change(struct hvsi_priv *pv, int cd)
{
if (cd)
pv->mctrl |= TIOCM_CD;
else {
pv->mctrl &= ~TIOCM_CD;
/* We copy the existing hvsi driver semantics
* here which are to trigger a hangup when
* we get a carrier loss.
* Closing our connection to the server will
* do just that.
*/
if (!pv->is_console && pv->opened) {
pr_devel("HVSI@%x Carrier lost, hanging up !\n",
pv->termno);
hvsi_send_close(pv);
}
}
}
static void hvsi_got_control(struct hvsi_priv *pv)
{
struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
switch (be16_to_cpu(pkt->verb)) {
case VSV_CLOSE_PROTOCOL:
/* We restart the handshaking */
hvsi_start_handshake(pv);
break;
case VSV_MODEM_CTL_UPDATE:
/* Transition of carrier detect */
hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
break;
}
}
static void hvsi_got_query(struct hvsi_priv *pv)
{
struct hvsi_query *pkt = (struct hvsi_query *)pv->inbuf;
struct hvsi_query_response r;
/* We only handle version queries */
if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
return;
pr_devel("HVSI@%x: Got version query, sending response...\n",
pv->termno);
/* Send version response */
r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
r.hdr.len = sizeof(struct hvsi_query_response);
r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
r.u.version = HVSI_VERSION;
r.query_seqno = pkt->hdr.seqno;
hvsi_send_packet(pv, &r.hdr);
/* Assume protocol is open now */
pv->established = 1;
}
static void hvsi_got_response(struct hvsi_priv *pv)
{
struct hvsi_query_response *r =
(struct hvsi_query_response *)pv->inbuf;
switch(r->verb) {
case VSV_SEND_MODEM_CTL_STATUS:
hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD);
pv->mctrl_update = 1;
break;
}
}
static int hvsi_check_packet(struct hvsi_priv *pv)
{
u8 len, type;
/* Check header validity. If it's invalid, we ditch
* the whole buffer and hope we eventually resync
*/
if (pv->inbuf[0] < 0xfc) {
pv->inbuf_len = pv->inbuf_pktlen = 0;
return 0;
}
type = pv->inbuf[0];
len = pv->inbuf[1];
/* Packet incomplete ? */
if (pv->inbuf_len < len)
return 0;
pr_devel("HVSI@%x: Got packet type %x len %d bytes:\n",
pv->termno, type, len);
/* We have a packet, yay ! Handle it */
switch(type) {
case VS_DATA_PACKET_HEADER:
pv->inbuf_pktlen = len - 4;
pv->inbuf_cur = 4;
return 1;
case VS_CONTROL_PACKET_HEADER:
hvsi_got_control(pv);
break;
case VS_QUERY_PACKET_HEADER:
hvsi_got_query(pv);
break;
case VS_QUERY_RESPONSE_PACKET_HEADER:
hvsi_got_response(pv);
break;
}
/* Swallow packet and retry */
pv->inbuf_len -= len;
memmove(pv->inbuf, &pv->inbuf[len], pv->inbuf_len);
return 1;
}
static int hvsi_get_packet(struct hvsi_priv *pv)
{
/* If we have room in the buffer, ask HV for more */
if (pv->inbuf_len < HVSI_INBUF_SIZE)
pv->inbuf_len += pv->get_chars(pv->termno,
&pv->inbuf[pv->inbuf_len],
HVSI_INBUF_SIZE - pv->inbuf_len);
/*
* If we have at least 4 bytes in the buffer, check for
* a full packet and retry
*/
if (pv->inbuf_len >= 4)
return hvsi_check_packet(pv);
return 0;
}
int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
{
unsigned int tries, read = 0;
if (WARN_ON(!pv))
return -ENXIO;
/* If we aren't open, don't do anything in order to avoid races
* with connection establishment. The hvc core will call this
* before we have returned from notifier_add(), and we need to
* avoid multiple users playing with the receive buffer
*/
if (!pv->opened)
return 0;
/* We try twice, once with what data we have and once more
* after we try to fetch some more from the hypervisor
*/
for (tries = 1; count && tries < 2; tries++) {
/* Consume existing data packet */
if (pv->inbuf_pktlen) {
unsigned int l = min(count, (int)pv->inbuf_pktlen);
memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l);
pv->inbuf_cur += l;
pv->inbuf_pktlen -= l;
count -= l;
read += l;
}
if (count == 0)
break;
/* Data packet fully consumed, move down remaning data */
if (pv->inbuf_cur) {
pv->inbuf_len -= pv->inbuf_cur;
memmove(pv->inbuf, &pv->inbuf[pv->inbuf_cur],
pv->inbuf_len);
pv->inbuf_cur = 0;
}
/* Try to get another packet */
if (hvsi_get_packet(pv))
tries--;
}
if (!pv->established) {
pr_devel("HVSI@%x: returning -EPIPE\n", pv->termno);
return -EPIPE;
}
return read;
}
int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count)
{
struct hvsi_data dp;
int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA);
if (WARN_ON(!pv))
return -ENODEV;
dp.hdr.type = VS_DATA_PACKET_HEADER;
dp.hdr.len = adjcount + sizeof(struct hvsi_header);
memcpy(dp.data, buf, adjcount);
rc = hvsi_send_packet(pv, &dp.hdr);
if (rc <= 0)
return rc;
return adjcount;
}
static void maybe_msleep(unsigned long ms)
{
/* During early boot, IRQs are disabled, use mdelay */
if (irqs_disabled())
mdelay(ms);
else
msleep(ms);
}
int hvsilib_read_mctrl(struct hvsi_priv *pv)
{
struct hvsi_query q;
int rc, timeout;
pr_devel("HVSI@%x: Querying modem control status...\n",
pv->termno);
pv->mctrl_update = 0;
q.hdr.type = VS_QUERY_PACKET_HEADER;
q.hdr.len = sizeof(struct hvsi_query);
q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
rc = hvsi_send_packet(pv, &q.hdr);
if (rc <= 0) {
pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
return rc;
}
/* Try for up to 200ms */
for (timeout = 0; timeout < 20; timeout++) {
if (!pv->established)
return -ENXIO;
if (pv->mctrl_update)
return 0;
if (!hvsi_get_packet(pv))
maybe_msleep(10);
}
return -EIO;
}
int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
{
struct hvsi_control ctrl;
unsigned short mctrl;
mctrl = pv->mctrl;
if (dtr)
mctrl |= TIOCM_DTR;
else
mctrl &= ~TIOCM_DTR;
if (mctrl == pv->mctrl)
return 0;
pv->mctrl = mctrl;
pr_devel("HVSI@%x: %s DTR...\n", pv->termno,
dtr ? "Setting" : "Clearing");
ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
ctrl.hdr.len = sizeof(struct hvsi_control);
ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
ctrl.mask = cpu_to_be32(HVSI_TSDTR);
ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0);
return hvsi_send_packet(pv, &ctrl.hdr);
}
void hvsilib_establish(struct hvsi_priv *pv)
{
int timeout;
pr_devel("HVSI@%x: Establishing...\n", pv->termno);
/* Try for up to 200ms, there can be a packet to
* start the process waiting for us...
*/
for (timeout = 0; timeout < 20; timeout++) {
if (pv->established)
goto established;
if (!hvsi_get_packet(pv))
maybe_msleep(10);
}
/* Failed, send a close connection packet just
* in case
*/
pr_devel("HVSI@%x: ... sending close\n", pv->termno);
hvsi_send_close(pv);
/* Then restart handshake */
pr_devel("HVSI@%x: ... restarting handshake\n", pv->termno);
hvsi_start_handshake(pv);
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
/* Try for up to 400ms */
for (timeout = 0; timeout < 40; timeout++) {
if (pv->established)
goto established;
if (!hvsi_get_packet(pv))
maybe_msleep(10);
}
if (!pv->established) {
pr_devel("HVSI@%x: Timeout handshaking, giving up !\n",
pv->termno);
return;
}
established:
/* Query modem control lines */
pr_devel("HVSI@%x: ... established, reading mctrl\n", pv->termno);
hvsilib_read_mctrl(pv);
/* Set our own DTR */
pr_devel("HVSI@%x: ... setting mctrl\n", pv->termno);
hvsilib_write_mctrl(pv, 1);
/* Set the opened flag so reads are allowed */
wmb();
pv->opened = 1;
}
int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp)
{
pr_devel("HVSI@%x: open !\n", pv->termno);
/* Keep track of the tty data structure */
pv->tty = tty_port_tty_get(&hp->port);
hvsilib_establish(pv);
return 0;
}
void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp)
{
unsigned long flags;
pr_devel("HVSI@%x: close !\n", pv->termno);
if (!pv->is_console) {
pr_devel("HVSI@%x: Not a console, tearing down\n",
pv->termno);
/* Clear opened, synchronize with khvcd */
spin_lock_irqsave(&hp->lock, flags);
pv->opened = 0;
spin_unlock_irqrestore(&hp->lock, flags);
/* Clear our own DTR */
if (!pv->tty || (pv->tty->termios.c_cflag & HUPCL))
hvsilib_write_mctrl(pv, 0);
/* Tear down the connection */
hvsi_send_close(pv);
}
tty_kref_put(pv->tty);
pv->tty = NULL;
}
void hvsilib_init(struct hvsi_priv *pv,
int (*get_chars)(uint32_t termno, char *buf, int count),
int (*put_chars)(uint32_t termno, const char *buf,
int count),
int termno, int is_console)
{
memset(pv, 0, sizeof(*pv));
pv->get_chars = get_chars;
pv->put_chars = put_chars;
pv->termno = termno;
pv->is_console = is_console;
}
| linux-master | drivers/tty/hvc/hvsi_lib.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IBM RTAS driver interface to hvc_console.c
*
* (C) Copyright IBM Corporation 2001-2005
* (C) Copyright Red Hat, Inc. 2005
*
* Author(s): Maximino Augilar <IBM STI Design Center>
* : Ryan S. Arnold <[email protected]>
* : Utz Bacher <[email protected]>
* : David Woodhouse <[email protected]>
*
* inspired by drivers/char/hvc_console.c
* written by Anton Blanchard and Paul Mackerras
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/irq.h>
#include <asm/rtas.h>
#include "hvc_console.h"
#define hvc_rtas_cookie 0x67781e15
static struct hvc_struct *hvc_rtas_dev;
static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
int count)
{
int i;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
break;
}
return i;
}
static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
{
int i, c;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_get_char_token, 0, 2, &c))
break;
buf[i] = c;
}
return i;
}
static const struct hv_ops hvc_rtas_get_put_ops = {
.get_chars = hvc_rtas_read_console,
.put_chars = hvc_rtas_write_console,
};
static int __init hvc_rtas_init(void)
{
struct hvc_struct *hp;
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
BUG_ON(hvc_rtas_dev);
/* Allocate an hvc_struct for the console device we instantiated
* earlier. Save off hp so that we can return it on exit */
hp = hvc_alloc(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_rtas_dev = hp;
return 0;
}
device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
{
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops);
add_preferred_console("hvc", 0, NULL);
return 0;
}
console_initcall(hvc_rtas_console_init);
| linux-master | drivers/tty/hvc/hvc_rtas.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* xen console driver interface to hvc_console.c
*
* (c) 2007 Gerd Hoffmann <[email protected]>
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/serial_core.h>
#include <asm/io.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/hvm.h>
#include <xen/grant_table.h>
#include <xen/page.h>
#include <xen/events.h>
#include <xen/interface/io/console.h>
#include <xen/interface/sched.h>
#include <xen/hvc-console.h>
#include <xen/xenbus.h>
#include "hvc_console.h"
#define HVC_COOKIE 0x58656e /* "Xen" in hex */
struct xencons_info {
struct list_head list;
struct xenbus_device *xbdev;
struct xencons_interface *intf;
unsigned int evtchn;
XENCONS_RING_IDX out_cons;
unsigned int out_cons_same;
struct hvc_struct *hvc;
int irq;
int vtermno;
grant_ref_t gntref;
spinlock_t ring_lock;
};
static LIST_HEAD(xenconsoles);
static DEFINE_SPINLOCK(xencons_lock);
/* ------------------------------------------------------------------ */
static struct xencons_info *vtermno_to_xencons(int vtermno)
{
struct xencons_info *entry, *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&xencons_lock, flags);
if (list_empty(&xenconsoles)) {
spin_unlock_irqrestore(&xencons_lock, flags);
return NULL;
}
list_for_each_entry(entry, &xenconsoles, list) {
if (entry->vtermno == vtermno) {
ret = entry;
break;
}
}
spin_unlock_irqrestore(&xencons_lock, flags);
return ret;
}
static inline int xenbus_devid_to_vtermno(int devid)
{
return devid + HVC_COOKIE;
}
static inline void notify_daemon(struct xencons_info *cons)
{
/* Use evtchn: this is called early, before irq is set up. */
notify_remote_via_evtchn(cons->evtchn);
}
static int __write_console(struct xencons_info *xencons,
const char *data, int len)
{
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
int sent = 0;
unsigned long flags;
spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */
if ((prod - cons) > sizeof(intf->out)) {
spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
spin_unlock_irqrestore(&xencons->ring_lock, flags);
if (sent)
notify_daemon(xencons);
return sent;
}
static int domU_write_console(uint32_t vtermno, const char *data, int len)
{
int ret = len;
struct xencons_info *cons = vtermno_to_xencons(vtermno);
if (cons == NULL)
return -EINVAL;
/*
* Make sure the whole buffer is emitted, polling if
* necessary. We don't ever want to rely on the hvc daemon
* because the most interesting console output is when the
* kernel is crippled.
*/
while (len) {
int sent = __write_console(cons, data, len);
if (sent < 0)
return sent;
data += sent;
len -= sent;
if (unlikely(len))
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
}
return ret;
}
static int domU_read_console(uint32_t vtermno, char *buf, int len)
{
struct xencons_interface *intf;
XENCONS_RING_IDX cons, prod;
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
unsigned long flags;
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */
if ((prod - cons) > sizeof(intf->in)) {
spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
while (cons != prod && recv < len)
buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
mb(); /* read ring before consuming */
intf->in_cons = cons;
/*
* When to mark interrupt having been spurious:
* - there was no new data to be read, and
* - the backend did not consume some output bytes, and
* - the previous round with no read data didn't see consumed bytes
* (we might have a race with an interrupt being in flight while
* updating xencons->out_cons, so account for that by allowing one
* round without any visible reason)
*/
if (intf->out_cons != xencons->out_cons) {
xencons->out_cons = intf->out_cons;
xencons->out_cons_same = 0;
}
if (!recv && xencons->out_cons_same++ > 1) {
eoiflag = XEN_EOI_FLAG_SPURIOUS;
}
spin_unlock_irqrestore(&xencons->ring_lock, flags);
if (recv) {
notify_daemon(xencons);
}
xen_irq_lateeoi(xencons->irq, eoiflag);
return recv;
}
static const struct hv_ops domU_hvc_ops = {
.get_chars = domU_read_console,
.put_chars = domU_write_console,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
static int dom0_read_console(uint32_t vtermno, char *buf, int len)
{
return HYPERVISOR_console_io(CONSOLEIO_read, len, buf);
}
/*
* Either for a dom0 to write to the system console, or a domU with a
* debug version of Xen
*/
static int dom0_write_console(uint32_t vtermno, const char *str, int len)
{
int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
if (rc < 0)
return rc;
return len;
}
static const struct hv_ops dom0_hvc_ops = {
.get_chars = dom0_read_console,
.put_chars = dom0_write_console,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
static int xen_hvm_console_init(void)
{
int r;
uint64_t v = 0;
unsigned long gfn, flags;
struct xencons_info *info;
if (!xen_hvm_domain())
return -ENODEV;
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
spin_lock_init(&info->ring_lock);
} else if (info->intf != NULL) {
/* already configured */
return 0;
}
/*
* If the toolstack (or the hypervisor) hasn't set these values, the
* default value is 0. Even though gfn = 0 and evtchn = 0 are
* theoretically correct values, in practice they never are and they
* mean that a legacy toolstack hasn't initialized the pv console correctly.
*/
r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
if (r < 0 || v == 0)
goto err;
info->evtchn = v;
v = 0;
r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
if (r < 0 || v == 0)
goto err;
gfn = v;
info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
err:
kfree(info);
return -ENODEV;
}
static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
{
spin_lock_init(&info->ring_lock);
info->evtchn = xen_start_info->console.domU.evtchn;
/* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = vtermno;
list_add_tail(&info->list, &xenconsoles);
return 0;
}
static int xen_pv_console_init(void)
{
struct xencons_info *info;
unsigned long flags;
if (!xen_pv_domain())
return -ENODEV;
if (!xen_start_info->console.domU.evtchn)
return -ENODEV;
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
} else if (info->intf != NULL) {
/* already configured */
return 0;
}
spin_lock_irqsave(&xencons_lock, flags);
xencons_info_pv_init(info, HVC_COOKIE);
spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
static int xen_initial_domain_console_init(void)
{
struct xencons_info *info;
unsigned long flags;
if (!xen_initial_domain())
return -ENODEV;
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
spin_lock_init(&info->ring_lock);
}
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
info->vtermno = HVC_COOKIE;
spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
static void xen_console_update_evtchn(struct xencons_info *info)
{
if (xen_hvm_domain()) {
uint64_t v = 0;
int err;
err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
if (!err && v)
info->evtchn = v;
} else
info->evtchn = xen_start_info->console.domU.evtchn;
}
void xen_console_resume(void)
{
struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
if (info != NULL && info->irq) {
if (!xen_initial_domain())
xen_console_update_evtchn(info);
rebind_evtchn_irq(info->evtchn, info->irq);
}
}
#ifdef CONFIG_HVC_XEN_FRONTEND
static void xencons_disconnect_backend(struct xencons_info *info)
{
if (info->irq > 0)
unbind_from_irqhandler(info->irq, NULL);
info->irq = 0;
if (info->evtchn > 0)
xenbus_free_evtchn(info->xbdev, info->evtchn);
info->evtchn = 0;
if (info->gntref > 0)
gnttab_free_grant_references(info->gntref);
info->gntref = 0;
if (info->hvc != NULL)
hvc_remove(info->hvc);
info->hvc = NULL;
}
static void xencons_free(struct xencons_info *info)
{
free_page((unsigned long)info->intf);
info->intf = NULL;
info->vtermno = 0;
kfree(info);
}
static int xen_console_remove(struct xencons_info *info)
{
unsigned long flags;
xencons_disconnect_backend(info);
spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
spin_unlock_irqrestore(&xencons_lock, flags);
if (info->xbdev != NULL)
xencons_free(info);
else {
if (xen_hvm_domain())
iounmap(info->intf);
kfree(info);
}
return 0;
}
static void xencons_remove(struct xenbus_device *dev)
{
xen_console_remove(dev_get_drvdata(&dev->dev));
}
static int xencons_connect_backend(struct xenbus_device *dev,
struct xencons_info *info)
{
int ret, evtchn, devid, ref, irq;
struct xenbus_transaction xbt;
grant_ref_t gref_head;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
return ret;
info->evtchn = evtchn;
irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
if (irq < 0)
return irq;
info->irq = irq;
devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
info->hvc = hvc_alloc(xenbus_devid_to_vtermno(devid),
irq, &domU_hvc_ops, 256);
if (IS_ERR(info->hvc))
return PTR_ERR(info->hvc);
ret = gnttab_alloc_grant_references(1, &gref_head);
if (ret < 0)
return ret;
info->gntref = gref_head;
ref = gnttab_claim_grant_reference(&gref_head);
if (ref < 0)
return ref;
gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
virt_to_gfn(info->intf), 0);
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
return ret;
}
ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
evtchn);
if (ret)
goto error_xenbus;
ret = xenbus_transaction_end(xbt, 0);
if (ret) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
return ret;
}
xenbus_switch_state(dev, XenbusStateInitialised);
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
return ret;
}
static int xencons_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, devid;
struct xencons_info *info;
unsigned long flags;
devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
if (devid == 0)
return -ENODEV;
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
spin_lock_init(&info->ring_lock);
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->vtermno = xenbus_devid_to_vtermno(devid);
info->intf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!info->intf)
goto error_nomem;
ret = xencons_connect_backend(dev, info);
if (ret < 0)
goto error;
spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
error_nomem:
ret = -ENOMEM;
xenbus_dev_fatal(dev, ret, "allocating device memory");
error:
xencons_disconnect_backend(info);
xencons_free(info);
return ret;
}
static int xencons_resume(struct xenbus_device *dev)
{
struct xencons_info *info = dev_get_drvdata(&dev->dev);
xencons_disconnect_backend(info);
memset(info->intf, 0, XEN_PAGE_SIZE);
return xencons_connect_backend(dev, info);
}
static void xencons_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
switch (backend_state) {
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
break;
case XenbusStateConnected:
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static const struct xenbus_device_id xencons_ids[] = {
{ "console" },
{ "" }
};
static struct xenbus_driver xencons_driver = {
.name = "xenconsole",
.ids = xencons_ids,
.probe = xencons_probe,
.remove = xencons_remove,
.resume = xencons_resume,
.otherend_changed = xencons_backend_changed,
.not_essential = true,
};
#endif /* CONFIG_HVC_XEN_FRONTEND */
static int __init xen_hvc_init(void)
{
int r;
struct xencons_info *info;
const struct hv_ops *ops;
if (!xen_domain())
return -ENODEV;
if (xen_initial_domain()) {
ops = &dom0_hvc_ops;
r = xen_initial_domain_console_init();
if (r < 0)
return r;
info = vtermno_to_xencons(HVC_COOKIE);
} else {
ops = &domU_hvc_ops;
if (xen_hvm_domain())
r = xen_hvm_console_init();
else
r = xen_pv_console_init();
if (r < 0)
return r;
info = vtermno_to_xencons(HVC_COOKIE);
info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
}
if (info->irq < 0)
info->irq = 0; /* NO_IRQ */
else
irq_set_noprobe(info->irq);
info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
if (IS_ERR(info->hvc)) {
unsigned long flags;
r = PTR_ERR(info->hvc);
spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
spin_unlock_irqrestore(&xencons_lock, flags);
if (info->irq)
unbind_from_irqhandler(info->irq, NULL);
kfree(info);
return r;
}
r = 0;
#ifdef CONFIG_HVC_XEN_FRONTEND
r = xenbus_register_frontend(&xencons_driver);
#endif
return r;
}
device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
const struct hv_ops *ops;
if (!xen_domain())
return 0;
if (xen_initial_domain())
ops = &dom0_hvc_ops;
else {
int r;
ops = &domU_hvc_ops;
if (xen_hvm_domain())
r = xen_hvm_console_init();
else
r = xen_pv_console_init();
if (r < 0)
return r;
}
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
console_initcall(xen_cons_init);
#ifdef CONFIG_X86
static void xen_hvm_early_write(uint32_t vtermno, const char *str, int len)
{
if (xen_cpuid_base())
outsb(0xe9, str, len);
}
#else
static void xen_hvm_early_write(uint32_t vtermno, const char *str, int len) { }
#endif
#ifdef CONFIG_EARLY_PRINTK
static int __init xenboot_console_setup(struct console *console, char *string)
{
static struct xencons_info xenboot;
if (xen_initial_domain() || !xen_pv_domain())
return 0;
return xencons_info_pv_init(&xenboot, 0);
}
static void xenboot_write_console(struct console *console, const char *string,
unsigned len)
{
unsigned int linelen, off = 0;
const char *pos;
if (dom0_write_console(0, string, len) >= 0)
return;
if (!xen_pv_domain()) {
xen_hvm_early_write(0, string, len);
return;
}
if (domU_write_console(0, "(early) ", 8) < 0)
return;
while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
linelen = pos-string+off;
if (off + linelen > len)
break;
domU_write_console(0, string+off, linelen);
domU_write_console(0, "\r\n", 2);
off += linelen + 1;
}
if (off < len)
domU_write_console(0, string+off, len-off);
}
struct console xenboot_console = {
.name = "xenboot",
.write = xenboot_write_console,
.setup = xenboot_console_setup,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
#endif /* CONFIG_EARLY_PRINTK */
void xen_raw_console_write(const char *str)
{
ssize_t len = strlen(str);
int rc = 0;
if (xen_domain()) {
rc = dom0_write_console(0, str, len);
if (rc != -ENOSYS || !xen_hvm_domain())
return;
}
xen_hvm_early_write(0, str, len);
}
void xen_raw_printk(const char *fmt, ...)
{
static char buf[512];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
xen_raw_console_write(buf);
}
static void xenboot_earlycon_write(struct console *console,
const char *string,
unsigned len)
{
dom0_write_console(0, string, len);
}
static int __init xenboot_earlycon_setup(struct earlycon_device *device,
const char *opt)
{
device->con->write = xenboot_earlycon_write;
return 0;
}
EARLYCON_DECLARE(xenboot, xenboot_earlycon_setup);
| linux-master | drivers/tty/hvc/hvc_xen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001,2008
*
* This file contains the IRQ specific code for hvc_console
*
*/
#include <linux/interrupt.h>
#include "hvc_console.h"
static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
{
/* if hvc_poll request a repoll, then kick the hvcd thread */
if (hvc_poll(dev_instance))
hvc_kick();
/*
* We're safe to always return IRQ_HANDLED as the hvcd thread will
* iterate through each hvc_struct.
*/
return IRQ_HANDLED;
}
/*
* For IRQ based systems these callbacks can be used
*/
int notifier_add_irq(struct hvc_struct *hp, int irq)
{
int rc;
if (!irq) {
hp->irq_requested = 0;
return 0;
}
rc = request_irq(irq, hvc_handle_interrupt, hp->flags,
"hvc_console", hp);
if (!rc)
hp->irq_requested = 1;
return rc;
}
void notifier_del_irq(struct hvc_struct *hp, int irq)
{
if (!hp->irq_requested)
return;
free_irq(irq, hp);
hp->irq_requested = 0;
}
void notifier_hangup_irq(struct hvc_struct *hp, int irq)
{
notifier_del_irq(hp, irq);
}
| linux-master | drivers/tty/hvc/hvc_irq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IBM eServer Hypervisor Virtual Console Server Device Driver
* Copyright (C) 2003, 2004 IBM Corp.
* Ryan S. Arnold ([email protected])
*
* Author(s) : Ryan S. Arnold <[email protected]>
*
* This is the device driver for the IBM Hypervisor Virtual Console Server,
* "hvcs". The IBM hvcs provides a tty driver interface to allow Linux
* user space applications access to the system consoles of logically
* partitioned operating systems, e.g. Linux, running on the same partitioned
* Power5 ppc64 system. Physical hardware consoles per partition are not
* practical on this hardware so system consoles are accessed by this driver
* using inter-partition firmware interfaces to virtual terminal devices.
*
* A vty is known to the HMC as a "virtual serial server adapter". It is a
* virtual terminal device that is created by firmware upon partition creation
* to act as a partitioned OS's console device.
*
* Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64
* Linux system upon their creation by the HMC or their exposure during boot.
* The non-user interactive backend of this driver is implemented as a vio
* device driver so that it can receive notification of vty-server lifetimes
* after it registers with the vio bus to handle vty-server probe and remove
* callbacks.
*
* Many vty-servers can be configured to connect to one vty, but a vty can
* only be actively connected to by a single vty-server, in any manner, at one
* time. If the HMC is currently hosting the console for a target Linux
* partition; attempts to open the tty device to the partition's console using
* the hvcs on any partition will return -EBUSY with every open attempt until
* the HMC frees the connection between its vty-server and the desired
* partition's vty device. Conversely, a vty-server may only be connected to
* a single vty at one time even though it may have several configured vty
* partner possibilities.
*
* Firmware does not provide notification of vty partner changes to this
* driver. This means that an HMC Super Admin may add or remove partner vtys
* from a vty-server's partner list but the changes will not be signaled to
* the vty-server. Firmware only notifies the driver when a vty-server is
* added or removed from the system. To compensate for this deficiency, this
* driver implements a sysfs update attribute which provides a method for
* rescanning partner information upon a user's request.
*
* Each vty-server, prior to being exposed to this driver is reference counted
* using the 2.6 Linux kernel kref construct.
*
* For direction on installation and usage of this driver please reference
* Documentation/powerpc/hvcs.rst.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/major.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/hvconsole.h>
#include <asm/hvcserver.h>
#include <linux/uaccess.h>
#include <linux/termios_internal.h>
#include <asm/vio.h>
/*
* 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
* Removed braces around single statements following conditionals. Removed '=
* 0' after static int declarations since these default to zero. Removed
* list_for_each_safe() and replaced with list_for_each_entry() in
* hvcs_get_by_index(). The 'safe' version is un-needed now that the driver is
* using spinlocks. Changed spin_lock_irqsave() to spin_lock() when locking
* hvcs_structs_lock and hvcs_pi_lock since these are not touched in an int
* handler. Initialized hvcs_structs_lock and hvcs_pi_lock to
* SPIN_LOCK_UNLOCKED at declaration time rather than in hvcs_module_init().
* Added spin_lock around list_del() in destroy_hvcs_struct() to protect the
* list traversals from a deletion. Removed '= NULL' from pointer declaration
* statements since they are initialized NULL by default. Removed wmb()
* instances from hvcs_try_write(). They probably aren't needed with locking in
* place. Added check and cleanup for hvcs_pi_buff = kmalloc() in
* hvcs_module_init(). Exposed hvcs_struct.index via a sysfs attribute so that
* the coupling between /dev/hvcs* and a vty-server can be automatically
* determined. Moved kobject_put() in hvcs_open outside of the
* spin_unlock_irqrestore().
*
* 1.3.1 -> 1.3.2 Changed method for determining hvcs_struct->index and had it
* align with how the tty layer always assigns the lowest index available. This
* change resulted in a list of ints that denotes which indexes are available.
* Device additions and removals use the new hvcs_get_index() and
* hvcs_return_index() helper functions. The list is created with
* hvsc_alloc_index_list() and it is destroyed with hvcs_free_index_list().
* Without these fixes hotplug vty-server adapter support goes crazy with this
* driver if the user removes a vty-server adapter. Moved free_irq() outside of
* the hvcs_final_close() function in order to get it out of the spinlock.
* Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping
* on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from
* arch/powerepc/include/asm/hvcserver.h
*
* 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to
* prevent possible lockup with realtime scheduling as similarly pointed out by
* akpm in hvc_console. Changed resulted in the removal of hvcs_final_close()
* to reorder cleanup operations and prevent discarding of pending data during
* an hvcs_close(). Removed spinlock protection of hvcs_struct data members in
* hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed.
*/
#define HVCS_DRIVER_VERSION "1.3.3"
MODULE_AUTHOR("Ryan S. Arnold <[email protected]>");
MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HVCS_DRIVER_VERSION);
/*
* Wait this long per iteration while trying to push buffered data to the
* hypervisor before allowing the tty to complete a close operation.
*/
#define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
/*
* Since the Linux TTY code does not currently (2-04-2004) support dynamic
* addition of tty derived devices and we shouldn't allocate thousands of
* tty_device pointers when the number of vty-server & vty partner connections
* will most often be much lower than this, we'll arbitrarily allocate
* HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we
* register the tty_driver. This can be overridden using an insmod parameter.
*/
#define HVCS_DEFAULT_SERVER_ADAPTERS 64
/*
* The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device
* nodes as a sanity check. Theoretically there can be over 1 Billion
* vty-server & vty partner connections.
*/
#define HVCS_MAX_SERVER_ADAPTERS 1024
/*
* We let Linux assign us a major number and we start the minors at zero. There
* is no intuitive mapping between minor number and the target vty-server
* adapter except that each new vty-server adapter is always assigned to the
* smallest minor number available.
*/
#define HVCS_MINOR_START 0
/*
* The hcall interface involves putting 8 chars into each of two registers.
* We load up those 2 registers (in arch/powerpc/platforms/pseries/hvconsole.c)
* by casting char[16] to long[2]. It would work without __ALIGNED__, but a
* little (tiny) bit slower because an unaligned load is slower than aligned
* load.
*/
#define __ALIGNED__ __attribute__((__aligned__(8)))
/*
* How much data can firmware send with each hvc_put_chars()? Maybe this
* should be moved into an architecture specific area.
*/
#define HVCS_BUFF_LEN 16
/*
* This is the maximum amount of data we'll let the user send us (hvcs_write) at
* once in a chunk as a sanity check.
*/
#define HVCS_MAX_FROM_USER 4096
/*
* Be careful when adding flags to this line discipline. Don't add anything
* that will cause echoing or we'll go into recursive loop echoing chars back
* and forth with the console drivers.
*/
static const struct ktermios hvcs_tty_termios = {
.c_iflag = IGNBRK | IGNPAR,
.c_oflag = OPOST,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
.c_ospeed = 38400
};
/*
* This value is used to take the place of a command line parameter when the
* module is inserted. It starts as -1 and stays as such if the user doesn't
* specify a module insmod parameter. If they DO specify one then it is set to
* the value of the integer passed in.
*/
static int hvcs_parm_num_devs = -1;
module_param(hvcs_parm_num_devs, int, 0);
static const char hvcs_driver_name[] = "hvcs";
static const char hvcs_device_node[] = "hvcs";
/* Status of partner info rescan triggered via sysfs. */
static int hvcs_rescan_status;
static struct tty_driver *hvcs_tty_driver;
/*
* In order to be somewhat sane this driver always associates the hvcs_struct
* index element with the numerically equal tty->index. This means that a
* hotplugged vty-server adapter will always map to the lowest index valued
* device node. If vty-servers were hotplug removed from the system and then
* new ones added the new vty-server may have the largest slot number of all
* the vty-server adapters in the partition but it may have the lowest dev node
* index of all the adapters due to the hole left by the hotplug removed
* adapter. There are a set of functions provided to get the lowest index for
* a new device as well as return the index to the list. This list is allocated
* with a number of elements equal to the number of device nodes requested when
* the module was inserted.
*/
static int *hvcs_index_list;
/*
* How large is the list? This is kept for traversal since the list is
* dynamically created.
*/
static int hvcs_index_count;
/*
* Used by the khvcsd to pick up I/O operations when the kernel_thread is
* already awake but potentially shifted to TASK_INTERRUPTIBLE state.
*/
static int hvcs_kicked;
/*
* Use by the kthread construct for task operations like waking the sleeping
* thread and stopping the kthread.
*/
static struct task_struct *hvcs_task;
/*
* We allocate this for the use of all of the hvcs_structs when they fetch
* partner info.
*/
static unsigned long *hvcs_pi_buff;
/* Only allow one hvcs_struct to use the hvcs_pi_buff at a time. */
static DEFINE_SPINLOCK(hvcs_pi_lock);
/* One vty-server per hvcs_struct */
struct hvcs_struct {
struct tty_port port;
spinlock_t lock;
/*
* This index identifies this hvcs device as the complement to a
* specific tty index.
*/
unsigned int index;
/*
* Used to tell the driver kernel_thread what operations need to take
* place upon this hvcs_struct instance.
*/
int todo_mask;
/*
* This buffer is required so that when hvcs_write_room() reports that
* it can send HVCS_BUFF_LEN characters that it will buffer the full
* HVCS_BUFF_LEN characters if need be. This is essential for opost
* writes since they do not do high level buffering and expect to be
* able to send what the driver commits to sending buffering
* [e.g. tab to space conversions in n_tty.c opost()].
*/
char buffer[HVCS_BUFF_LEN];
int chars_in_buffer;
/*
* Any variable below is valid before a tty is connected and
* stays valid after the tty is disconnected. These shouldn't be
* whacked until the kobject refcount reaches zero though some entries
* may be changed via sysfs initiatives.
*/
int connected; /* is the vty-server currently connected to a vty? */
uint32_t p_unit_address; /* partner unit address */
uint32_t p_partition_ID; /* partner partition ID */
char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */
struct list_head next; /* list management */
struct vio_dev *vdev;
struct completion *destroyed;
};
static LIST_HEAD(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
static DEFINE_MUTEX(hvcs_init_mutex);
static int hvcs_get_pi(struct hvcs_struct *hvcsd);
static int hvcs_rescan_devices_list(void);
static void hvcs_partner_free(struct hvcs_struct *hvcsd);
static int hvcs_initialize(void);
#define HVCS_SCHED_READ 0x00000001
#define HVCS_QUICK_READ 0x00000002
#define HVCS_TRY_WRITE 0x00000004
#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
{
return dev_get_drvdata(&viod->dev);
}
/* The sysfs interface for the driver and devices */
static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
size_t count)
{
/*
* Don't need this feature at the present time because firmware doesn't
* yet support multiple partners.
*/
printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
return -EPERM;
}
static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(current_vty,
S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
/* writing a '0' to this sysfs entry will result in the disconnect. */
if (simple_strtol(buf, NULL, 0) != 0)
return -EINVAL;
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->port.count > 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
printk(KERN_INFO "HVCS: vterm state unchanged. "
"The hvcs device node is still in use.\n");
return -EPERM;
}
if (hvcsd->connected == 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
printk(KERN_INFO "HVCS: vterm state unchanged. The"
" vty-server is not connected to a vty.\n");
return -EPERM;
}
hvcs_partner_free(hvcsd);
printk(KERN_INFO "HVCS: Closed vty-server@%X and"
" partner vty@%X:%d connection.\n",
hvcsd->vdev->unit_address,
hvcsd->p_unit_address,
(uint32_t)hvcsd->p_partition_ID);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return count;
}
static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%d\n", hvcsd->connected);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
hvcs_vterm_state_show, hvcs_vterm_state_store);
static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%d\n", hvcsd->index);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
static struct attribute *hvcs_dev_attrs[] = {
&dev_attr_partner_vtys.attr,
&dev_attr_partner_clcs.attr,
&dev_attr_current_vty.attr,
&dev_attr_vterm_state.attr,
&dev_attr_index.attr,
NULL,
};
ATTRIBUTE_GROUPS(hvcs_dev);
static ssize_t rescan_show(struct device_driver *ddp, char *buf)
{
/* A 1 means it is updating, a 0 means it is done updating */
return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
}
static ssize_t rescan_store(struct device_driver *ddp, const char * buf,
size_t count)
{
if ((simple_strtol(buf, NULL, 0) != 1)
&& (hvcs_rescan_status != 0))
return -EINVAL;
hvcs_rescan_status = 1;
printk(KERN_INFO "HVCS: rescanning partner info for all"
" vty-servers.\n");
hvcs_rescan_devices_list();
hvcs_rescan_status = 0;
return count;
}
static DRIVER_ATTR_RW(rescan);
static struct attribute *hvcs_attrs[] = {
&driver_attr_rescan.attr,
NULL,
};
ATTRIBUTE_GROUPS(hvcs);
static void hvcs_kick(void)
{
hvcs_kicked = 1;
wmb();
wake_up_process(hvcs_task);
}
static void hvcs_unthrottle(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&hvcsd->lock, flags);
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
hvcs_kick();
}
static void hvcs_throttle(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&hvcsd->lock, flags);
vio_disable_interrupts(hvcsd->vdev);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
/*
* If the device is being removed we don't have to worry about this interrupt
* handler taking any further interrupts because they are disabled which means
* the hvcs_struct will always be valid in this handler.
*/
static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance)
{
struct hvcs_struct *hvcsd = dev_instance;
spin_lock(&hvcsd->lock);
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock(&hvcsd->lock);
hvcs_kick();
return IRQ_HANDLED;
}
/* This function must be called with the hvcsd->lock held */
static void hvcs_try_write(struct hvcs_struct *hvcsd)
{
uint32_t unit_address = hvcsd->vdev->unit_address;
struct tty_struct *tty = hvcsd->port.tty;
int sent;
if (hvcsd->todo_mask & HVCS_TRY_WRITE) {
/* won't send partial writes */
sent = hvc_put_chars(unit_address,
&hvcsd->buffer[0],
hvcsd->chars_in_buffer );
if (sent > 0) {
hvcsd->chars_in_buffer = 0;
/* wmb(); */
hvcsd->todo_mask &= ~(HVCS_TRY_WRITE);
/* wmb(); */
/*
* We are still obligated to deliver the data to the
* hypervisor even if the tty has been closed because
* we committed to delivering it. But don't try to wake
* a non-existent tty.
*/
if (tty) {
tty_wakeup(tty);
}
}
}
}
static int hvcs_io(struct hvcs_struct *hvcsd)
{
uint32_t unit_address;
struct tty_struct *tty;
char buf[HVCS_BUFF_LEN] __ALIGNED__;
unsigned long flags;
int got = 0;
spin_lock_irqsave(&hvcsd->lock, flags);
unit_address = hvcsd->vdev->unit_address;
tty = hvcsd->port.tty;
hvcs_try_write(hvcsd);
if (!tty || tty_throttled(tty)) {
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
goto bail;
} else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
goto bail;
/* remove the read masks */
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
if (tty_buffer_request_room(&hvcsd->port, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
got = hvc_get_chars(unit_address,
&buf[0],
HVCS_BUFF_LEN);
tty_insert_flip_string(&hvcsd->port, buf, got);
}
/* Give the TTY time to process the data we just sent. */
if (got)
hvcsd->todo_mask |= HVCS_QUICK_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch -- FIXME :js: it is not! */
if (got)
tty_flip_buffer_push(&hvcsd->port);
else {
/* Do this _after_ the flip_buffer_push */
spin_lock_irqsave(&hvcsd->lock, flags);
vio_enable_interrupts(hvcsd->vdev);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
return hvcsd->todo_mask;
bail:
spin_unlock_irqrestore(&hvcsd->lock, flags);
return hvcsd->todo_mask;
}
static int khvcsd(void *unused)
{
struct hvcs_struct *hvcsd;
int hvcs_todo_mask;
__set_current_state(TASK_RUNNING);
do {
hvcs_todo_mask = 0;
hvcs_kicked = 0;
wmb();
spin_lock(&hvcs_structs_lock);
list_for_each_entry(hvcsd, &hvcs_structs, next) {
hvcs_todo_mask |= hvcs_io(hvcsd);
}
spin_unlock(&hvcs_structs_lock);
/*
* If any of the hvcs adapters want to try a write or quick read
* don't schedule(), yield a smidgen then execute the hvcs_io
* thread again for those that want the write.
*/
if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) {
yield();
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
if (!hvcs_kicked)
schedule();
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
return 0;
}
static const struct vio_device_id hvcs_driver_table[] = {
{"serial-server", "hvterm2"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
static void hvcs_return_index(int index)
{
/* Paranoia check */
if (!hvcs_index_list)
return;
if (index < 0 || index >= hvcs_index_count)
return;
if (hvcs_index_list[index] == -1)
return;
else
hvcs_index_list[index] = -1;
}
static void hvcs_destruct_port(struct tty_port *p)
{
struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port);
struct vio_dev *vdev;
struct completion *comp;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
spin_lock_irqsave(&hvcsd->lock, flags);
comp = hvcsd->destroyed;
/* the list_del poisons the pointers */
list_del(&(hvcsd->next));
if (hvcsd->connected == 1) {
hvcs_partner_free(hvcsd);
printk(KERN_INFO "HVCS: Closed vty-server@%X and"
" partner vty@%X:%d connection.\n",
hvcsd->vdev->unit_address,
hvcsd->p_unit_address,
(uint32_t)hvcsd->p_partition_ID);
}
printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
hvcsd->vdev->unit_address);
vdev = hvcsd->vdev;
hvcsd->vdev = NULL;
hvcsd->p_unit_address = 0;
hvcsd->p_partition_ID = 0;
hvcsd->destroyed = NULL;
hvcs_return_index(hvcsd->index);
memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
kfree(hvcsd);
if (comp)
complete(comp);
}
static const struct tty_port_operations hvcs_port_ops = {
.destruct = hvcs_destruct_port,
};
static int hvcs_get_index(void)
{
int i;
/* Paranoia check */
if (!hvcs_index_list) {
printk(KERN_ERR "HVCS: hvcs_index_list NOT valid!.\n");
return -EFAULT;
}
/* Find the numerically lowest first free index. */
for(i = 0; i < hvcs_index_count; i++) {
if (hvcs_index_list[i] == -1) {
hvcs_index_list[i] = 0;
return i;
}
}
return -1;
}
static int hvcs_probe(
struct vio_dev *dev,
const struct vio_device_id *id)
{
struct hvcs_struct *hvcsd;
int index, rc;
if (!dev || !id) {
printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
return -EPERM;
}
/* Make sure we are properly initialized */
rc = hvcs_initialize();
if (rc) {
pr_err("HVCS: Failed to initialize core driver.\n");
return rc;
}
/* early to avoid cleanup on failure */
index = hvcs_get_index();
if (index < 0) {
return -EFAULT;
}
hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL);
if (!hvcsd)
return -ENODEV;
tty_port_init(&hvcsd->port);
hvcsd->port.ops = &hvcs_port_ops;
spin_lock_init(&hvcsd->lock);
hvcsd->vdev = dev;
dev_set_drvdata(&dev->dev, hvcsd);
hvcsd->index = index;
/* hvcsd->index = ++hvcs_struct_count; */
hvcsd->chars_in_buffer = 0;
hvcsd->todo_mask = 0;
hvcsd->connected = 0;
/*
* This will populate the hvcs_struct's partner info fields for the
* first time.
*/
if (hvcs_get_pi(hvcsd)) {
printk(KERN_ERR "HVCS: Failed to fetch partner"
" info for vty-server@%X on device probe.\n",
hvcsd->vdev->unit_address);
}
/*
* If a user app opens a tty that corresponds to this vty-server before
* the hvcs_struct has been added to the devices list then the user app
* will get -ENODEV.
*/
spin_lock(&hvcs_structs_lock);
list_add_tail(&(hvcsd->next), &hvcs_structs);
spin_unlock(&hvcs_structs_lock);
printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
/*
* DON'T enable interrupts here because there is no user to receive the
* data.
*/
return 0;
}
static void hvcs_remove(struct vio_dev *dev)
{
struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
DECLARE_COMPLETION_ONSTACK(comp);
unsigned long flags;
struct tty_struct *tty;
/* By this time the vty-server won't be getting any more interrupts */
spin_lock_irqsave(&hvcsd->lock, flags);
hvcsd->destroyed = ∁
tty = tty_port_tty_get(&hvcsd->port);
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
* The tty should always be valid at this time unless a
* simultaneous tty close already cleaned up the hvcs_struct.
*/
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
tty_port_put(&hvcsd->port);
wait_for_completion(&comp);
printk(KERN_INFO "HVCS: vty-server@%X removed from the"
" vio bus.\n", dev->unit_address);
};
static struct vio_driver hvcs_vio_driver = {
.id_table = hvcs_driver_table,
.probe = hvcs_probe,
.remove = hvcs_remove,
.name = hvcs_driver_name,
.driver = {
.groups = hvcs_groups,
.dev_groups = hvcs_dev_groups,
},
};
/* Only called from hvcs_get_pi please */
static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
{
hvcsd->p_unit_address = pi->unit_address;
hvcsd->p_partition_ID = pi->partition_ID;
/* copy the null-term char too */
strscpy(hvcsd->p_location_code, pi->location_code,
sizeof(hvcsd->p_location_code));
}
/*
* Traverse the list and add the partner info that is found to the hvcs_struct
* struct entry. NOTE: At this time I know that partner info will return a
* single entry but in the future there may be multiple partner info entries per
* vty-server and you'll want to zero out that list and reset it. If for some
* reason you have an old version of this driver but there IS more than one
* partner info then hvcsd->p_* will hold the last partner info data from the
* firmware query. A good way to update this code would be to replace the three
* partner info fields in hvcs_struct with a list of hvcs_partner_info
* instances.
*
* This function must be called with the hvcsd->lock held.
*/
static int hvcs_get_pi(struct hvcs_struct *hvcsd)
{
struct hvcs_partner_info *pi;
uint32_t unit_address = hvcsd->vdev->unit_address;
struct list_head head;
int retval;
spin_lock(&hvcs_pi_lock);
if (!hvcs_pi_buff) {
spin_unlock(&hvcs_pi_lock);
return -EFAULT;
}
retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff);
spin_unlock(&hvcs_pi_lock);
if (retval) {
printk(KERN_ERR "HVCS: Failed to fetch partner"
" info for vty-server@%x.\n", unit_address);
return retval;
}
/* nixes the values if the partner vty went away */
hvcsd->p_unit_address = 0;
hvcsd->p_partition_ID = 0;
list_for_each_entry(pi, &head, node)
hvcs_set_pi(pi, hvcsd);
hvcs_free_partner_info(&head);
return 0;
}
/*
* This function is executed by the driver "rescan" sysfs entry. It shouldn't
* be executed elsewhere, in order to prevent deadlock issues.
*/
static int hvcs_rescan_devices_list(void)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
list_for_each_entry(hvcsd, &hvcs_structs, next) {
spin_lock_irqsave(&hvcsd->lock, flags);
hvcs_get_pi(hvcsd);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
spin_unlock(&hvcs_structs_lock);
return 0;
}
/*
* Farm this off into its own function because it could be more complex once
* multiple partners support is added. This function should be called with
* the hvcsd->lock held.
*/
static int hvcs_has_pi(struct hvcs_struct *hvcsd)
{
if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID))
return 0;
return 1;
}
/*
* NOTE: It is possible that the super admin removed a partner vty and then
* added a different vty as the new partner.
*
* This function must be called with the hvcsd->lock held.
*/
static int hvcs_partner_connect(struct hvcs_struct *hvcsd)
{
int retval;
unsigned int unit_address = hvcsd->vdev->unit_address;
/*
* If there wasn't any pi when the device was added it doesn't meant
* there isn't any now. This driver isn't notified when a new partner
* vty is added to a vty-server so we discover changes on our own.
* Please see comments in hvcs_register_connection() for justification
* of this bizarre code.
*/
retval = hvcs_register_connection(unit_address,
hvcsd->p_partition_ID,
hvcsd->p_unit_address);
if (!retval) {
hvcsd->connected = 1;
return 0;
} else if (retval != -EINVAL)
return retval;
/*
* As per the spec re-get the pi and try again if -EINVAL after the
* first connection attempt.
*/
if (hvcs_get_pi(hvcsd))
return -ENOMEM;
if (!hvcs_has_pi(hvcsd))
return -ENODEV;
retval = hvcs_register_connection(unit_address,
hvcsd->p_partition_ID,
hvcsd->p_unit_address);
if (retval != -EINVAL) {
hvcsd->connected = 1;
return retval;
}
/*
* EBUSY is the most likely scenario though the vty could have been
* removed or there really could be an hcall error due to the parameter
* data but thanks to ambiguous firmware return codes we can't really
* tell.
*/
printk(KERN_INFO "HVCS: vty-server or partner"
" vty is busy. Try again later.\n");
return -EBUSY;
}
/* This function must be called with the hvcsd->lock held */
static void hvcs_partner_free(struct hvcs_struct *hvcsd)
{
int retval;
do {
retval = hvcs_free_connection(hvcsd->vdev->unit_address);
} while (retval == -EBUSY);
hvcsd->connected = 0;
}
/* This helper function must be called WITHOUT the hvcsd->lock held */
static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
unsigned int irq, struct vio_dev *vdev)
{
unsigned long flags;
int rc;
/*
* It is possible that the vty-server was removed between the time that
* the conn was registered and now.
*/
rc = request_irq(irq, &hvcs_handle_interrupt, 0, "ibmhvcs", hvcsd);
if (!rc) {
/*
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.
*/
if (vio_enable_interrupts(vdev) == H_SUCCESS)
return 0;
else {
printk(KERN_ERR "HVCS: int enable failed for"
" vty-server@%X.\n", unit_address);
free_irq(irq, hvcsd);
}
} else
printk(KERN_ERR "HVCS: irq req failed for"
" vty-server@%X.\n", unit_address);
spin_lock_irqsave(&hvcsd->lock, flags);
hvcs_partner_free(hvcsd);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return rc;
}
/*
* This always increments the kref ref count if the call is successful.
* Please remember to dec when you are done with the instance.
*
* NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
* calling this function or you will get deadlock.
*/
static struct hvcs_struct *hvcs_get_by_index(int index)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
list_for_each_entry(hvcsd, &hvcs_structs, next) {
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->index == index) {
tty_port_get(&hvcsd->port);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
return hvcsd;
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
spin_unlock(&hvcs_structs_lock);
return NULL;
}
static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct hvcs_struct *hvcsd;
struct vio_dev *vdev;
unsigned long unit_address, flags;
unsigned int irq;
int retval;
/*
* Is there a vty-server that shares the same index?
* This function increments the kref index.
*/
hvcsd = hvcs_get_by_index(tty->index);
if (!hvcsd) {
printk(KERN_WARNING "HVCS: open failed, no device associated"
" with tty->index %d.\n", tty->index);
return -ENODEV;
}
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->connected == 0) {
retval = hvcs_partner_connect(hvcsd);
if (retval) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
printk(KERN_WARNING "HVCS: partner connect failed.\n");
goto err_put;
}
}
hvcsd->port.count = 0;
hvcsd->port.tty = tty;
tty->driver_data = hvcsd;
memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
/*
* Save these in the spinlock for the enable operations that need them
* outside of the spinlock.
*/
irq = hvcsd->vdev->irq;
vdev = hvcsd->vdev;
unit_address = hvcsd->vdev->unit_address;
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
* This must be done outside of the spinlock because it requests irqs
* and will grab the spinlock and free the connection if it fails.
*/
retval = hvcs_enable_device(hvcsd, unit_address, irq, vdev);
if (retval) {
printk(KERN_WARNING "HVCS: enable device failed.\n");
goto err_put;
}
retval = tty_port_install(&hvcsd->port, driver, tty);
if (retval)
goto err_irq;
return 0;
err_irq:
spin_lock_irqsave(&hvcsd->lock, flags);
vio_disable_interrupts(hvcsd->vdev);
spin_unlock_irqrestore(&hvcsd->lock, flags);
free_irq(irq, hvcsd);
err_put:
tty_port_put(&hvcsd->port);
return retval;
}
/*
* This is invoked via the tty_open interface when a user app connects to the
* /dev node.
*/
static int hvcs_open(struct tty_struct *tty, struct file *filp)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&hvcsd->lock, flags);
hvcsd->port.count++;
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
hvcs_kick();
printk(KERN_INFO "HVCS: vty-server@%X connection opened.\n",
hvcsd->vdev->unit_address );
return 0;
}
static void hvcs_close(struct tty_struct *tty, struct file *filp)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
int irq;
/*
* Is someone trying to close the file associated with this device after
* we have hung up? If so tty->driver_data wouldn't be valid.
*/
if (tty_hung_up_p(filp))
return;
/*
* No driver_data means that this close was probably issued after a
* failed hvcs_open by the tty layer's release_dev() api and we can just
* exit cleanly.
*/
if (!tty->driver_data)
return;
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->port.count == 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
return;
} else if (--hvcsd->port.count == 0) {
vio_disable_interrupts(hvcsd->vdev);
/*
* NULL this early so that the kernel_thread doesn't try to
* execute any operations on the TTY even though it is obligated
* to deliver any pending I/O to the hypervisor.
*/
hvcsd->port.tty = NULL;
irq = hvcsd->vdev->irq;
spin_unlock_irqrestore(&hvcsd->lock, flags);
tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
free_irq(irq, hvcsd);
return;
} else if (hvcsd->port.count < 0) {
printk(KERN_ERR "HVCS: vty-server@%X open_count: %d is mismanaged.\n",
hvcsd->vdev->unit_address, hvcsd->port.count);
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
static void hvcs_cleanup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
/*
* This line is important because it tells hvcs_open that this
* device needs to be re-configured the next time hvcs_open is
* called.
*/
tty->driver_data = NULL;
tty_port_put(&hvcsd->port);
}
static void hvcs_hangup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
int irq;
spin_lock_irqsave(&hvcsd->lock, flags);
/*
* Don't kref put inside the spinlock because the destruction
* callback may use the spinlock and it may get called before the
* spinlock has been released.
*/
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask = 0;
hvcsd->port.tty = NULL;
hvcsd->port.count = 0;
/* This will drop any buffered data on the floor which is OK in a hangup
* scenario. */
memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
hvcsd->chars_in_buffer = 0;
irq = hvcsd->vdev->irq;
spin_unlock_irqrestore(&hvcsd->lock, flags);
free_irq(irq, hvcsd);
}
/*
* NOTE: This is almost always from_user since user level apps interact with the
* /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by
* hvcs_remove (which removes the target device and executes tty_hangup()) that
* tty_hangup will allow hvcs_write time to complete execution before it
* terminates our device.
*/
static ssize_t hvcs_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned int unit_address;
const unsigned char *charbuf;
unsigned long flags;
size_t total_sent = 0;
size_t tosend = 0;
int result = 0;
/*
* If they don't check the return code off of their open they may
* attempt this even if there is no connected device.
*/
if (!hvcsd)
return -ENODEV;
/* Reasonable size to prevent user level flooding */
if (count > HVCS_MAX_FROM_USER) {
printk(KERN_WARNING "HVCS write: count being truncated to"
" HVCS_MAX_FROM_USER.\n");
count = HVCS_MAX_FROM_USER;
}
charbuf = buf;
spin_lock_irqsave(&hvcsd->lock, flags);
/*
* Somehow an open succeeded but the device was removed or the
* connection terminated between the vty-server and partner vty during
* the middle of a write operation? This is a crummy place to do this
* but we want to keep it all in the spinlock.
*/
if (hvcsd->port.count <= 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
return -ENODEV;
}
unit_address = hvcsd->vdev->unit_address;
while (count > 0) {
tosend = min_t(size_t, count,
(HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
/*
* No more space, this probably means that the last call to
* hvcs_write() didn't succeed and the buffer was filled up.
*/
if (!tosend)
break;
memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer],
&charbuf[total_sent],
tosend);
hvcsd->chars_in_buffer += tosend;
result = 0;
/*
* If this is true then we don't want to try writing to the
* hypervisor because that is the kernel_threads job now. We'll
* just add to the buffer.
*/
if (!(hvcsd->todo_mask & HVCS_TRY_WRITE))
/* won't send partial writes */
result = hvc_put_chars(unit_address,
&hvcsd->buffer[0],
hvcsd->chars_in_buffer);
/*
* Since we know we have enough room in hvcsd->buffer for
* tosend we record that it was sent regardless of whether the
* hypervisor actually took it because we have it buffered.
*/
total_sent+=tosend;
count-=tosend;
if (result == 0) {
hvcsd->todo_mask |= HVCS_TRY_WRITE;
hvcs_kick();
break;
}
hvcsd->chars_in_buffer = 0;
/*
* Test after the chars_in_buffer reset otherwise this could
* deadlock our writes if hvc_put_chars fails.
*/
if (result < 0)
break;
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
if (result == -1)
return -EIO;
else
return total_sent;
}
/*
* This is really asking how much can we guarantee that we can send or that we
* absolutely WILL BUFFER if we can't send it. This driver MUST honor the
* return value, hence the reason for hvcs_struct buffering.
*/
static unsigned int hvcs_write_room(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
if (!hvcsd || hvcsd->port.count <= 0)
return 0;
return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
}
static unsigned int hvcs_chars_in_buffer(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
return hvcsd->chars_in_buffer;
}
static const struct tty_operations hvcs_ops = {
.install = hvcs_install,
.open = hvcs_open,
.close = hvcs_close,
.cleanup = hvcs_cleanup,
.hangup = hvcs_hangup,
.write = hvcs_write,
.write_room = hvcs_write_room,
.chars_in_buffer = hvcs_chars_in_buffer,
.unthrottle = hvcs_unthrottle,
.throttle = hvcs_throttle,
};
static int hvcs_alloc_index_list(int n)
{
int i;
hvcs_index_list = kmalloc_array(n, sizeof(hvcs_index_count),
GFP_KERNEL);
if (!hvcs_index_list)
return -ENOMEM;
hvcs_index_count = n;
for (i = 0; i < hvcs_index_count; i++)
hvcs_index_list[i] = -1;
return 0;
}
static void hvcs_free_index_list(void)
{
/* Paranoia check to be thorough. */
kfree(hvcs_index_list);
hvcs_index_list = NULL;
hvcs_index_count = 0;
}
static int hvcs_initialize(void)
{
int rc, num_ttys_to_alloc;
mutex_lock(&hvcs_init_mutex);
if (hvcs_task) {
mutex_unlock(&hvcs_init_mutex);
return 0;
}
/* Has the user specified an overload with an insmod param? */
if (hvcs_parm_num_devs <= 0 ||
(hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) {
num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS;
} else
num_ttys_to_alloc = hvcs_parm_num_devs;
hvcs_tty_driver = tty_alloc_driver(num_ttys_to_alloc,
TTY_DRIVER_REAL_RAW);
if (IS_ERR(hvcs_tty_driver)) {
mutex_unlock(&hvcs_init_mutex);
return PTR_ERR(hvcs_tty_driver);
}
if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
rc = -ENOMEM;
goto index_fail;
}
hvcs_tty_driver->driver_name = hvcs_driver_name;
hvcs_tty_driver->name = hvcs_device_node;
/*
* We'll let the system assign us a major number, indicated by leaving
* it blank.
*/
hvcs_tty_driver->minor_start = HVCS_MINOR_START;
hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
/*
* We role our own so that we DONT ECHO. We can't echo because the
* device we are connecting to already echoes by default and this would
* throw us into a horrible recursive echo-echo-echo loop.
*/
hvcs_tty_driver->init_termios = hvcs_tty_termios;
tty_set_operations(hvcs_tty_driver, &hvcs_ops);
/*
* The following call will result in sysfs entries that denote the
* dynamically assigned major and minor numbers for our devices.
*/
if (tty_register_driver(hvcs_tty_driver)) {
printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
rc = -EIO;
goto register_fail;
}
hvcs_pi_buff = (unsigned long *) __get_free_page(GFP_KERNEL);
if (!hvcs_pi_buff) {
rc = -ENOMEM;
goto buff_alloc_fail;
}
hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
if (IS_ERR(hvcs_task)) {
printk(KERN_ERR "HVCS: khvcsd creation failed.\n");
rc = -EIO;
goto kthread_fail;
}
mutex_unlock(&hvcs_init_mutex);
return 0;
kthread_fail:
free_page((unsigned long)hvcs_pi_buff);
buff_alloc_fail:
tty_unregister_driver(hvcs_tty_driver);
register_fail:
hvcs_free_index_list();
index_fail:
tty_driver_kref_put(hvcs_tty_driver);
hvcs_tty_driver = NULL;
mutex_unlock(&hvcs_init_mutex);
return rc;
}
static int __init hvcs_module_init(void)
{
int rc = vio_register_driver(&hvcs_vio_driver);
if (rc) {
printk(KERN_ERR "HVCS: can't register vio driver\n");
return rc;
}
pr_info("HVCS: Driver registered.\n");
return 0;
}
static void __exit hvcs_module_exit(void)
{
/*
* This driver receives hvcs_remove callbacks for each device upon
* module removal.
*/
vio_unregister_driver(&hvcs_vio_driver);
if (!hvcs_task)
return;
/*
* This synchronous operation will wake the khvcsd kthread if it is
* asleep and will return when khvcsd has terminated.
*/
kthread_stop(hvcs_task);
spin_lock(&hvcs_pi_lock);
free_page((unsigned long)hvcs_pi_buff);
hvcs_pi_buff = NULL;
spin_unlock(&hvcs_pi_lock);
tty_unregister_driver(hvcs_tty_driver);
hvcs_free_index_list();
tty_driver_kref_put(hvcs_tty_driver);
printk(KERN_INFO "HVCS: driver module removed.\n");
}
module_init(hvcs_module_init);
module_exit(hvcs_module_exit);
| linux-master | drivers/tty/hvc/hvcs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
* Copyright (C) 2001 Paul Mackerras <[email protected]>, IBM
* Copyright (C) 2004 Benjamin Herrenschmidt <[email protected]>, IBM Corp.
* Copyright (C) 2004 IBM Corporation
*
* Additional Author(s):
* Ryan S. Arnold <[email protected]>
*/
#include <linux/console.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kbd_kern.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/major.h>
#include <linux/atomic.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/serial_core.h>
#include <linux/uaccess.h>
#include "hvc_console.h"
#define HVC_MAJOR 229
#define HVC_MINOR 0
/*
* Wait this long per iteration while trying to push buffered data to the
* hypervisor before allowing the tty to complete a close operation.
*/
#define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
/*
* These sizes are most efficient for vio, because they are the
* native transfer size. We could make them selectable in the
* future to better deal with backends that want other buffer sizes.
*/
#define N_OUTBUF 16
#define N_INBUF 16
#define __ALIGNED__ __attribute__((__aligned__(L1_CACHE_BYTES)))
static struct tty_driver *hvc_driver;
static struct task_struct *hvc_task;
/* Picks up late kicks after list walk but before schedule() */
static int hvc_kicked;
/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
static int hvc_init(void);
#ifdef CONFIG_MAGIC_SYSRQ
static int sysrq_pressed;
#endif
/* dynamic list of hvc_struct instances */
static LIST_HEAD(hvc_structs);
/*
* Protect the list of hvc_struct instances from inserts and removals during
* list traversal.
*/
static DEFINE_MUTEX(hvc_structs_mutex);
/*
* This value is used to assign a tty->index value to a hvc_struct based
* upon order of exposure via hvc_probe(), when we can not match it to
* a console candidate registered with hvc_instantiate().
*/
static int last_hvc = -1;
/*
* Do not call this function with either the hvc_structs_mutex or the hvc_struct
* lock held. If successful, this function increments the kref reference
* count against the target hvc_struct so it should be released when finished.
*/
static struct hvc_struct *hvc_get_by_index(int index)
{
struct hvc_struct *hp;
unsigned long flags;
mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
spin_lock_irqsave(&hp->lock, flags);
if (hp->index == index) {
tty_port_get(&hp->port);
spin_unlock_irqrestore(&hp->lock, flags);
mutex_unlock(&hvc_structs_mutex);
return hp;
}
spin_unlock_irqrestore(&hp->lock, flags);
}
hp = NULL;
mutex_unlock(&hvc_structs_mutex);
return hp;
}
static int __hvc_flush(const struct hv_ops *ops, uint32_t vtermno, bool wait)
{
if (wait)
might_sleep();
if (ops->flush)
return ops->flush(vtermno, wait);
return 0;
}
static int hvc_console_flush(const struct hv_ops *ops, uint32_t vtermno)
{
return __hvc_flush(ops, vtermno, false);
}
/*
* Wait for the console to flush before writing more to it. This sleeps.
*/
static int hvc_flush(struct hvc_struct *hp)
{
return __hvc_flush(hp->ops, hp->vtermno, true);
}
/*
* Initial console vtermnos for console API usage prior to full console
* initialization. Any vty adapter outside this range will not have usable
* console interfaces but can still be used as a tty device. This has to be
* static because kmalloc will not work during early console init.
*/
static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
/*
* Console APIs, NOT TTY. These APIs are available immediately when
* hvc_console_setup() finds adapters.
*/
static void hvc_console_print(struct console *co, const char *b,
unsigned count)
{
char c[N_OUTBUF] __ALIGNED__;
unsigned i = 0, n = 0;
int r, donecr = 0, index = co->index;
/* Console access attempt outside of acceptable console range. */
if (index >= MAX_NR_HVC_CONSOLES)
return;
/* This console adapter was removed so it is not usable. */
if (vtermnos[index] == -1)
return;
while (count > 0 || i > 0) {
if (count > 0 && i < sizeof(c)) {
if (b[n] == '\n' && !donecr) {
c[i++] = '\r';
donecr = 1;
} else {
c[i++] = b[n++];
donecr = 0;
--count;
}
} else {
r = cons_ops[index]->put_chars(vtermnos[index], c, i);
if (r <= 0) {
/* throw away characters on error
* but spin in case of -EAGAIN */
if (r != -EAGAIN) {
i = 0;
} else {
hvc_console_flush(cons_ops[index],
vtermnos[index]);
}
} else if (r > 0) {
i -= r;
if (i > 0)
memmove(c, c+r, i);
}
}
}
hvc_console_flush(cons_ops[index], vtermnos[index]);
}
static struct tty_driver *hvc_console_device(struct console *c, int *index)
{
if (vtermnos[c->index] == -1)
return NULL;
*index = c->index;
return hvc_driver;
}
static int hvc_console_setup(struct console *co, char *options)
{
if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
return -ENODEV;
if (vtermnos[co->index] == -1)
return -ENODEV;
return 0;
}
static struct console hvc_console = {
.name = "hvc",
.write = hvc_console_print,
.device = hvc_console_device,
.setup = hvc_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/*
* Early console initialization. Precedes driver initialization.
*
* (1) we are first, and the user specified another driver
* -- index will remain -1
* (2) we are first and the user specified no driver
* -- index will be set to 0, then we will fail setup.
* (3) we are first and the user specified our driver
* -- index will be set to user specified driver, and we will fail
* (4) we are after driver, and this initcall will register us
* -- if the user didn't specify a driver then the console will match
*
* Note that for cases 2 and 3, we will match later when the io driver
* calls hvc_instantiate() and call register again.
*/
static int __init hvc_console_init(void)
{
register_console(&hvc_console);
return 0;
}
console_initcall(hvc_console_init);
/* callback when the kboject ref count reaches zero. */
static void hvc_port_destruct(struct tty_port *port)
{
struct hvc_struct *hp = container_of(port, struct hvc_struct, port);
unsigned long flags;
mutex_lock(&hvc_structs_mutex);
spin_lock_irqsave(&hp->lock, flags);
list_del(&(hp->next));
spin_unlock_irqrestore(&hp->lock, flags);
mutex_unlock(&hvc_structs_mutex);
kfree(hp);
}
static void hvc_check_console(int index)
{
/* Already registered, bail out */
if (console_is_registered(&hvc_console))
return;
/* If this index is what the user requested, then register
* now (setup won't fail at this point). It's ok to just
* call register again if previously .setup failed.
*/
if (index == hvc_console.index)
register_console(&hvc_console);
}
/*
* hvc_instantiate() is an early console discovery method which locates
* consoles * prior to the vio subsystem discovering them. Hotplugged
* vty adapters do NOT get an hvc_instantiate() callback since they
* appear after early console init.
*/
int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
{
struct hvc_struct *hp;
if (index < 0 || index >= MAX_NR_HVC_CONSOLES)
return -1;
if (vtermnos[index] != -1)
return -1;
/* make sure no tty has been registered in this index */
hp = hvc_get_by_index(index);
if (hp) {
tty_port_put(&hp->port);
return -1;
}
vtermnos[index] = vtermno;
cons_ops[index] = ops;
/* check if we need to re-register the kernel console */
hvc_check_console(index);
return 0;
}
EXPORT_SYMBOL_GPL(hvc_instantiate);
/* Wake the sleeping khvcd */
void hvc_kick(void)
{
hvc_kicked = 1;
wake_up_process(hvc_task);
}
EXPORT_SYMBOL_GPL(hvc_kick);
static void hvc_unthrottle(struct tty_struct *tty)
{
hvc_kick();
}
static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct hvc_struct *hp;
int rc;
/* Auto increments kref reference if found. */
hp = hvc_get_by_index(tty->index);
if (!hp)
return -ENODEV;
tty->driver_data = hp;
rc = tty_port_install(&hp->port, driver, tty);
if (rc)
tty_port_put(&hp->port);
return rc;
}
/*
* The TTY interface won't be used until after the vio layer has exposed the vty
* adapter to the kernel.
*/
static int hvc_open(struct tty_struct *tty, struct file * filp)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&hp->port.lock, flags);
/* Check and then increment for fast path open. */
if (hp->port.count++ > 0) {
spin_unlock_irqrestore(&hp->port.lock, flags);
hvc_kick();
return 0;
} /* else count == 0 */
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, tty);
if (hp->ops->notifier_add)
rc = hp->ops->notifier_add(hp, hp->data);
/*
* If the notifier fails we return an error. The tty layer
* will call hvc_close() after a failed open but we don't want to clean
* up there so we'll clean up here and clear out the previously set
* tty fields and return the kref reference.
*/
if (rc) {
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
} else {
/* We are ready... raise DTR/RTS */
if (C_BAUD(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, true);
tty_port_set_initialized(&hp->port, true);
}
/* Force wakeup of the polling thread */
hvc_kick();
return rc;
}
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
if (tty_hung_up_p(filp))
return;
spin_lock_irqsave(&hp->port.lock, flags);
if (--hp->port.count == 0) {
spin_unlock_irqrestore(&hp->port.lock, flags);
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
if (!tty_port_initialized(&hp->port))
return;
if (C_HUPCL(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, false);
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
/*
* Chain calls chars_in_buffer() and returns immediately if
* there is no buffered data otherwise sleeps on a wait queue
* waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
tty_port_set_initialized(&hp->port, false);
} else {
if (hp->port.count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
hp->vtermno, hp->port.count);
spin_unlock_irqrestore(&hp->port.lock, flags);
}
}
static void hvc_cleanup(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
tty_port_put(&hp->port);
}
static void hvc_hangup(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
if (!hp)
return;
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
spin_lock_irqsave(&hp->port.lock, flags);
/*
* The N_TTY line discipline has problems such that in a close vs
* open->hangup case this can be called after the final close so prevent
* that from happening for now.
*/
if (hp->port.count <= 0) {
spin_unlock_irqrestore(&hp->port.lock, flags);
return;
}
hp->port.count = 0;
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, NULL);
hp->n_outbuf = 0;
if (hp->ops->notifier_hangup)
hp->ops->notifier_hangup(hp, hp->data);
}
/*
* Push buffered characters whether they were just recently buffered or waiting
* on a blocked hypervisor. Call this function with hp->lock held.
*/
static int hvc_push(struct hvc_struct *hp)
{
int n;
n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf);
if (n <= 0) {
if (n == 0 || n == -EAGAIN) {
hp->do_wakeup = 1;
return 0;
}
/* throw away output on error; this happens when
there is no session connected to the vterm. */
hp->n_outbuf = 0;
} else
hp->n_outbuf -= n;
if (hp->n_outbuf > 0)
memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
else
hp->do_wakeup = 1;
return n;
}
static ssize_t hvc_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
size_t rsize, written = 0;
/* This write was probably executed during a tty close. */
if (!hp)
return -EPIPE;
/* FIXME what's this (unprotected) check for? */
if (hp->port.count <= 0)
return -EIO;
while (count > 0) {
int ret = 0;
spin_lock_irqsave(&hp->lock, flags);
rsize = hp->outbuf_size - hp->n_outbuf;
if (rsize) {
if (rsize > count)
rsize = count;
memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
count -= rsize;
buf += rsize;
hp->n_outbuf += rsize;
written += rsize;
}
if (hp->n_outbuf > 0)
ret = hvc_push(hp);
spin_unlock_irqrestore(&hp->lock, flags);
if (!ret)
break;
if (count) {
if (hp->n_outbuf > 0)
hvc_flush(hp);
cond_resched();
}
}
/*
* Racy, but harmless, kick thread if there is still pending data.
*/
if (hp->n_outbuf)
hvc_kick();
return written;
}
/**
* hvc_set_winsz() - Resize the hvc tty terminal window.
* @work: work structure.
*
* The routine shall not be called within an atomic context because it
* might sleep.
*
* Locking: hp->lock
*/
static void hvc_set_winsz(struct work_struct *work)
{
struct hvc_struct *hp;
unsigned long hvc_flags;
struct tty_struct *tty;
struct winsize ws;
hp = container_of(work, struct hvc_struct, tty_resize);
tty = tty_port_tty_get(&hp->port);
if (!tty)
return;
spin_lock_irqsave(&hp->lock, hvc_flags);
ws = hp->ws;
spin_unlock_irqrestore(&hp->lock, hvc_flags);
tty_do_resize(tty, &ws);
tty_kref_put(tty);
}
/*
* This is actually a contract between the driver and the tty layer outlining
* how much write room the driver can guarantee will be sent OR BUFFERED. This
* driver MUST honor the return value.
*/
static unsigned int hvc_write_room(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp)
return 0;
return hp->outbuf_size - hp->n_outbuf;
}
static unsigned int hvc_chars_in_buffer(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp)
return 0;
return hp->n_outbuf;
}
/*
* timeout will vary between the MIN and MAX values defined here. By default
* and during console activity we will use a default MIN_TIMEOUT of 10. When
* the console is idle, we increase the timeout value on each pass through
* msleep until we reach the max. This may be noticeable as a brief (average
* one second) delay on the console before the console responds to input when
* there has been no input for some time.
*/
#define MIN_TIMEOUT (10)
#define MAX_TIMEOUT (2000)
static u32 timeout = MIN_TIMEOUT;
/*
* Maximum number of bytes to get from the console driver if hvc_poll is
* called from driver (and can't sleep). Any more than this and we break
* and start polling with khvcd. This value was derived from an OpenBMC
* console with the OPAL driver that results in about 0.25ms interrupts off
* latency.
*/
#define HVC_ATOMIC_READ_MAX 128
#define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002
static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
{
struct tty_struct *tty;
int i, n, count, poll_mask = 0;
char buf[N_INBUF] __ALIGNED__;
unsigned long flags;
int read_total = 0;
int written_total = 0;
spin_lock_irqsave(&hp->lock, flags);
/* Push pending writes */
if (hp->n_outbuf > 0)
written_total = hvc_push(hp);
/* Reschedule us if still some write pending */
if (hp->n_outbuf > 0) {
poll_mask |= HVC_POLL_WRITE;
/* If hvc_push() was not able to write, sleep a few msecs */
timeout = (written_total) ? 0 : MIN_TIMEOUT;
}
if (may_sleep) {
spin_unlock_irqrestore(&hp->lock, flags);
cond_resched();
spin_lock_irqsave(&hp->lock, flags);
}
/* No tty attached, just skip */
tty = tty_port_tty_get(&hp->port);
if (tty == NULL)
goto bail;
/* Now check if we can get data (are we throttled ?) */
if (tty_throttled(tty))
goto out;
/* If we aren't notifier driven and aren't throttled, we always
* request a reschedule
*/
if (!hp->irq_requested)
poll_mask |= HVC_POLL_READ;
read_again:
/* Read data if any */
count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */
if (count == 0) {
poll_mask |= HVC_POLL_READ;
goto out;
}
n = hp->ops->get_chars(hp->vtermno, buf, count);
if (n <= 0) {
/* Hangup the tty when disconnected from host */
if (n == -EPIPE) {
spin_unlock_irqrestore(&hp->lock, flags);
tty_hangup(tty);
spin_lock_irqsave(&hp->lock, flags);
} else if ( n == -EAGAIN ) {
/*
* Some back-ends can only ensure a certain min
* num of bytes read, which may be > 'count'.
* Let the tty clear the flip buff to make room.
*/
poll_mask |= HVC_POLL_READ;
}
goto out;
}
for (i = 0; i < n; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ
if (hp->index == hvc_console.index) {
/* Handle the SysRq Hack */
/* XXX should support a sequence */
if (buf[i] == '\x0f') { /* ^O */
/* if ^O is pressed again, reset
* sysrq_pressed and flip ^O char */
sysrq_pressed = !sysrq_pressed;
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
handle_sysrq(buf[i]);
sysrq_pressed = 0;
continue;
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(&hp->port, buf[i], 0);
}
read_total += n;
if (may_sleep) {
/* Keep going until the flip is full */
spin_unlock_irqrestore(&hp->lock, flags);
cond_resched();
spin_lock_irqsave(&hp->lock, flags);
goto read_again;
} else if (read_total < HVC_ATOMIC_READ_MAX) {
/* Break and defer if it's a large read in atomic */
goto read_again;
}
/*
* Latency break, schedule another poll immediately.
*/
poll_mask |= HVC_POLL_READ;
out:
/* Wakeup write queue if necessary */
if (hp->do_wakeup) {
hp->do_wakeup = 0;
tty_wakeup(tty);
}
bail:
spin_unlock_irqrestore(&hp->lock, flags);
if (read_total) {
/* Activity is occurring, so reset the polling backoff value to
a minimum for performance. */
timeout = MIN_TIMEOUT;
tty_flip_buffer_push(&hp->port);
}
tty_kref_put(tty);
return poll_mask;
}
int hvc_poll(struct hvc_struct *hp)
{
return __hvc_poll(hp, false);
}
EXPORT_SYMBOL_GPL(hvc_poll);
/**
* __hvc_resize() - Update terminal window size information.
* @hp: HVC console pointer
* @ws: Terminal window size structure
*
* Stores the specified window size information in the hvc structure of @hp.
* The function schedule the tty resize update.
*
* Locking: Locking free; the function MUST be called holding hp->lock
*/
void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
{
hp->ws = ws;
schedule_work(&hp->tty_resize);
}
EXPORT_SYMBOL_GPL(__hvc_resize);
/*
* This kthread is either polling or interrupt driven. This is determined by
* calling hvc_poll() who determines whether a console adapter support
* interrupts.
*/
static int khvcd(void *unused)
{
int poll_mask;
struct hvc_struct *hp;
set_freezable();
do {
poll_mask = 0;
hvc_kicked = 0;
try_to_freeze();
wmb();
if (!cpus_are_in_xmon()) {
mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
poll_mask |= __hvc_poll(hp, true);
cond_resched();
}
mutex_unlock(&hvc_structs_mutex);
} else
poll_mask |= HVC_POLL_READ;
if (hvc_kicked)
continue;
set_current_state(TASK_INTERRUPTIBLE);
if (!hvc_kicked) {
if (poll_mask == 0)
schedule();
else {
unsigned long j_timeout;
if (timeout < MAX_TIMEOUT)
timeout += (timeout >> 6) + 1;
/*
* We don't use msleep_interruptible otherwise
* "kick" will fail to wake us up
*/
j_timeout = msecs_to_jiffies(timeout) + 1;
schedule_timeout_interruptible(j_timeout);
}
}
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
return 0;
}
static int hvc_tiocmget(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp || !hp->ops->tiocmget)
return -EINVAL;
return hp->ops->tiocmget(hp);
}
static int hvc_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp || !hp->ops->tiocmset)
return -EINVAL;
return hp->ops->tiocmset(hp, set, clear);
}
#ifdef CONFIG_CONSOLE_POLL
static int hvc_poll_init(struct tty_driver *driver, int line, char *options)
{
return 0;
}
static int hvc_poll_get_char(struct tty_driver *driver, int line)
{
struct tty_struct *tty = driver->ttys[0];
struct hvc_struct *hp = tty->driver_data;
int n;
char ch;
n = hp->ops->get_chars(hp->vtermno, &ch, 1);
if (n <= 0)
return NO_POLL_CHAR;
return ch;
}
static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch)
{
struct tty_struct *tty = driver->ttys[0];
struct hvc_struct *hp = tty->driver_data;
int n;
do {
n = hp->ops->put_chars(hp->vtermno, &ch, 1);
} while (n <= 0);
}
#endif
static const struct tty_operations hvc_ops = {
.install = hvc_install,
.open = hvc_open,
.close = hvc_close,
.cleanup = hvc_cleanup,
.write = hvc_write,
.hangup = hvc_hangup,
.unthrottle = hvc_unthrottle,
.write_room = hvc_write_room,
.chars_in_buffer = hvc_chars_in_buffer,
.tiocmget = hvc_tiocmget,
.tiocmset = hvc_tiocmset,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = hvc_poll_init,
.poll_get_char = hvc_poll_get_char,
.poll_put_char = hvc_poll_put_char,
#endif
};
static const struct tty_port_operations hvc_port_ops = {
.destruct = hvc_port_destruct,
};
struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
const struct hv_ops *ops,
int outbuf_size)
{
struct hvc_struct *hp;
int i;
/* We wait until a driver actually comes along */
if (atomic_inc_not_zero(&hvc_needs_init)) {
int err = hvc_init();
if (err)
return ERR_PTR(err);
}
hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
hp->vtermno = vtermno;
hp->data = data;
hp->ops = ops;
hp->outbuf_size = outbuf_size;
hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
tty_port_init(&hp->port);
hp->port.ops = &hvc_port_ops;
INIT_WORK(&hp->tty_resize, hvc_set_winsz);
spin_lock_init(&hp->lock);
mutex_lock(&hvc_structs_mutex);
/*
* find index to use:
* see if this vterm id matches one registered for console.
*/
for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
if (vtermnos[i] == hp->vtermno &&
cons_ops[i] == hp->ops)
break;
if (i >= MAX_NR_HVC_CONSOLES) {
/* find 'empty' slot for console */
for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
}
/* no matching slot, just use a counter */
if (i == MAX_NR_HVC_CONSOLES)
i = ++last_hvc + MAX_NR_HVC_CONSOLES;
}
hp->index = i;
if (i < MAX_NR_HVC_CONSOLES) {
cons_ops[i] = ops;
vtermnos[i] = vtermno;
}
list_add_tail(&(hp->next), &hvc_structs);
mutex_unlock(&hvc_structs_mutex);
/* check if we need to re-register the kernel console */
hvc_check_console(i);
return hp;
}
EXPORT_SYMBOL_GPL(hvc_alloc);
int hvc_remove(struct hvc_struct *hp)
{
unsigned long flags;
struct tty_struct *tty;
tty = tty_port_tty_get(&hp->port);
console_lock();
spin_lock_irqsave(&hp->lock, flags);
if (hp->index < MAX_NR_HVC_CONSOLES) {
vtermnos[hp->index] = -1;
cons_ops[hp->index] = NULL;
}
/* Don't whack hp->irq because tty_hangup() will need to free the irq. */
spin_unlock_irqrestore(&hp->lock, flags);
console_unlock();
/*
* We 'put' the instance that was grabbed when the kref instance
* was initialized using kref_init(). Let the last holder of this
* kref cause it to be removed, which will probably be the tty_vhangup
* below.
*/
tty_port_put(&hp->port);
/*
* This function call will auto chain call hvc_hangup.
*/
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
return 0;
}
EXPORT_SYMBOL_GPL(hvc_remove);
/* Driver initialization: called as soon as someone uses hvc_alloc(). */
static int hvc_init(void)
{
struct tty_driver *drv;
int err;
/* We need more than hvc_count adapters due to hotplug additions. */
drv = tty_alloc_driver(HVC_ALLOC_TTY_ADAPTERS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS);
if (IS_ERR(drv)) {
err = PTR_ERR(drv);
goto out;
}
drv->driver_name = "hvc";
drv->name = "hvc";
drv->major = HVC_MAJOR;
drv->minor_start = HVC_MINOR;
drv->type = TTY_DRIVER_TYPE_SYSTEM;
drv->init_termios = tty_std_termios;
tty_set_operations(drv, &hvc_ops);
/* Always start the kthread because there can be hotplug vty adapters
* added later. */
hvc_task = kthread_run(khvcd, NULL, "khvcd");
if (IS_ERR(hvc_task)) {
printk(KERN_ERR "Couldn't create kthread for console.\n");
err = PTR_ERR(hvc_task);
goto put_tty;
}
err = tty_register_driver(drv);
if (err) {
printk(KERN_ERR "Couldn't register hvc console driver\n");
goto stop_thread;
}
/*
* Make sure tty is fully registered before allowing it to be
* found by hvc_console_device.
*/
smp_mb();
hvc_driver = drv;
return 0;
stop_thread:
kthread_stop(hvc_task);
hvc_task = NULL;
put_tty:
tty_driver_kref_put(drv);
out:
return err;
}
| linux-master | drivers/tty/hvc/hvc_console.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004 Hollis Blanchard <[email protected]>, IBM
*/
/* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
* and the service processor on IBM pSeries servers. On these servers, there
* are no serial ports under the OS's control, and sometimes there is no other
* console available either. However, the service processor has two standard
* serial ports, so this over-complicated protocol allows the OS to control
* those ports by proxy.
*
* Besides data, the procotol supports the reading/writing of the serial
* port's DTR line, and the reading of the CD line. This is to allow the OS to
* control a modem attached to the service processor's serial port. Note that
* the OS cannot change the speed of the port through this protocol.
*/
#undef DEBUG
#include <linux/console.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/major.h>
#include <linux/kernel.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
#include <linux/uaccess.h>
#include <asm/vio.h>
#include <asm/param.h>
#include <asm/hvsi.h>
#define HVSI_MAJOR 229
#define HVSI_MINOR 128
#define MAX_NR_HVSI_CONSOLES 4
#define HVSI_TIMEOUT (5*HZ)
#define HVSI_VERSION 1
#define HVSI_MAX_PACKET 256
#define HVSI_MAX_READ 16
#define HVSI_MAX_OUTGOING_DATA 12
#define N_OUTBUF 12
/*
* we pass data via two 8-byte registers, so we would like our char arrays
* properly aligned for those loads.
*/
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
struct hvsi_struct {
struct tty_port port;
struct delayed_work writer;
struct work_struct handshaker;
wait_queue_head_t emptyq; /* woken when outbuf is emptied */
wait_queue_head_t stateq; /* woken when HVSI state changes */
spinlock_t lock;
int index;
uint8_t throttle_buf[128];
uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
/* inbuf is for packet reassembly. leave a little room for leftovers. */
uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ];
uint8_t *inbuf_end;
int n_throttle;
int n_outbuf;
uint32_t vtermno;
uint32_t virq;
atomic_t seqno; /* HVSI packet sequence number */
uint16_t mctrl;
uint8_t state; /* HVSI protocol state */
uint8_t flags;
#ifdef CONFIG_MAGIC_SYSRQ
uint8_t sysrq;
#endif /* CONFIG_MAGIC_SYSRQ */
};
static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES];
static struct tty_driver *hvsi_driver;
static int hvsi_count;
static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
enum HVSI_PROTOCOL_STATE {
HVSI_CLOSED,
HVSI_WAIT_FOR_VER_RESPONSE,
HVSI_WAIT_FOR_VER_QUERY,
HVSI_OPEN,
HVSI_WAIT_FOR_MCTRL_RESPONSE,
HVSI_FSP_DIED,
};
#define HVSI_CONSOLE 0x1
static inline int is_console(struct hvsi_struct *hp)
{
return hp->flags & HVSI_CONSOLE;
}
static inline int is_open(struct hvsi_struct *hp)
{
/* if we're waiting for an mctrl then we're already open */
return (hp->state == HVSI_OPEN)
|| (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
}
static inline void print_state(struct hvsi_struct *hp)
{
#ifdef DEBUG
static const char *state_names[] = {
"HVSI_CLOSED",
"HVSI_WAIT_FOR_VER_RESPONSE",
"HVSI_WAIT_FOR_VER_QUERY",
"HVSI_OPEN",
"HVSI_WAIT_FOR_MCTRL_RESPONSE",
"HVSI_FSP_DIED",
};
const char *name = (hp->state < ARRAY_SIZE(state_names))
? state_names[hp->state] : "UNKNOWN";
pr_debug("hvsi%i: state = %s\n", hp->index, name);
#endif /* DEBUG */
}
static inline void __set_state(struct hvsi_struct *hp, int state)
{
hp->state = state;
print_state(hp);
wake_up_all(&hp->stateq);
}
static inline void set_state(struct hvsi_struct *hp, int state)
{
unsigned long flags;
spin_lock_irqsave(&hp->lock, flags);
__set_state(hp, state);
spin_unlock_irqrestore(&hp->lock, flags);
}
static inline int len_packet(const uint8_t *packet)
{
return (int)((struct hvsi_header *)packet)->len;
}
static inline int is_header(const uint8_t *packet)
{
struct hvsi_header *header = (struct hvsi_header *)packet;
return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER;
}
static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
{
if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
return 0; /* don't even have the packet header */
if (hp->inbuf_end < (packet + len_packet(packet)))
return 0; /* don't have the rest of the packet */
return 1;
}
/* shift remaining bytes in packetbuf down */
static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
{
int remaining = (int)(hp->inbuf_end - read_to);
pr_debug("%s: %i chars remain\n", __func__, remaining);
if (read_to != hp->inbuf)
memmove(hp->inbuf, read_to, remaining);
hp->inbuf_end = hp->inbuf + remaining;
}
#ifdef DEBUG
#define dbg_dump_packet(packet) dump_packet(packet)
#define dbg_dump_hex(data, len) dump_hex(data, len)
#else
#define dbg_dump_packet(packet) do { } while (0)
#define dbg_dump_hex(data, len) do { } while (0)
#endif
static void dump_hex(const uint8_t *data, int len)
{
int i;
printk(" ");
for (i=0; i < len; i++)
printk("%.2x", data[i]);
printk("\n ");
for (i=0; i < len; i++) {
if (isprint(data[i]))
printk("%c", data[i]);
else
printk(".");
}
printk("\n");
}
static void dump_packet(uint8_t *packet)
{
struct hvsi_header *header = (struct hvsi_header *)packet;
printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len,
header->seqno);
dump_hex(packet, header->len);
}
static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
{
unsigned long got;
got = hvc_get_chars(hp->vtermno, buf, count);
return got;
}
static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
struct tty_struct *tty, struct hvsi_struct **to_handshake)
{
struct hvsi_control *header = (struct hvsi_control *)packet;
switch (be16_to_cpu(header->verb)) {
case VSV_MODEM_CTL_UPDATE:
if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
/* CD went away; no more connection */
pr_debug("hvsi%i: CD dropped\n", hp->index);
hp->mctrl &= TIOCM_CD;
if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
}
break;
case VSV_CLOSE_PROTOCOL:
pr_debug("hvsi%i: service processor came back\n", hp->index);
if (hp->state != HVSI_CLOSED) {
*to_handshake = hp;
}
break;
default:
printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ",
hp->index);
dump_packet(packet);
break;
}
}
static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
{
struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
uint32_t mctrl_word;
switch (hp->state) {
case HVSI_WAIT_FOR_VER_RESPONSE:
__set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
break;
case HVSI_WAIT_FOR_MCTRL_RESPONSE:
hp->mctrl = 0;
mctrl_word = be32_to_cpu(resp->u.mctrl_word);
if (mctrl_word & HVSI_TSDTR)
hp->mctrl |= TIOCM_DTR;
if (mctrl_word & HVSI_TSCD)
hp->mctrl |= TIOCM_CD;
__set_state(hp, HVSI_OPEN);
break;
default:
printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
dump_packet(packet);
break;
}
}
/* respond to service processor's version query */
static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
{
struct hvsi_query_response packet __ALIGNED__;
int wrote;
packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
packet.hdr.len = sizeof(struct hvsi_query_response);
packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
packet.u.version = HVSI_VERSION;
packet.query_seqno = cpu_to_be16(query_seqno+1);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
hp->index);
return -EIO;
}
return 0;
}
static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
{
struct hvsi_query *query = (struct hvsi_query *)packet;
switch (hp->state) {
case HVSI_WAIT_FOR_VER_QUERY:
hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
__set_state(hp, HVSI_OPEN);
break;
default:
printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
dump_packet(packet);
break;
}
}
static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
{
int i;
for (i=0; i < len; i++) {
char c = buf[i];
#ifdef CONFIG_MAGIC_SYSRQ
if (c == '\0') {
hp->sysrq = 1;
continue;
} else if (hp->sysrq) {
handle_sysrq(c);
hp->sysrq = 0;
continue;
}
#endif /* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(&hp->port, c, 0);
}
}
/*
* We could get 252 bytes of data at once here. But the tty layer only
* throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
* it. Accordingly we won't send more than 128 bytes at a time to the flip
* buffer, which will give the tty buffer a chance to throttle us. Should the
* value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
* revisited.
*/
#define TTY_THRESHOLD_THROTTLE 128
static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet)
{
const struct hvsi_header *header = (const struct hvsi_header *)packet;
const uint8_t *data = packet + sizeof(struct hvsi_header);
int datalen = header->len - sizeof(struct hvsi_header);
int overflow = datalen - TTY_THRESHOLD_THROTTLE;
pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data);
if (datalen == 0)
return false;
if (overflow > 0) {
pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
datalen = TTY_THRESHOLD_THROTTLE;
}
hvsi_insert_chars(hp, data, datalen);
if (overflow > 0) {
/*
* we still have more data to deliver, so we need to save off the
* overflow and send it later
*/
pr_debug("%s: deferring overflow\n", __func__);
memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
hp->n_throttle = overflow;
}
return true;
}
/*
* Returns true/false indicating data successfully read from hypervisor.
* Used both to get packets for tty connections and to advance the state
* machine during console handshaking (in which case tty = NULL and we ignore
* incoming data).
*/
static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
struct hvsi_struct **handshake)
{
uint8_t *packet = hp->inbuf;
int chunklen;
bool flip = false;
*handshake = NULL;
chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
if (chunklen == 0) {
pr_debug("%s: 0-length read\n", __func__);
return 0;
}
pr_debug("%s: got %i bytes\n", __func__, chunklen);
dbg_dump_hex(hp->inbuf_end, chunklen);
hp->inbuf_end += chunklen;
/* handle all completed packets */
while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
struct hvsi_header *header = (struct hvsi_header *)packet;
if (!is_header(packet)) {
printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
/* skip bytes until we find a header or run out of data */
while ((packet < hp->inbuf_end) && (!is_header(packet)))
packet++;
continue;
}
pr_debug("%s: handling %i-byte packet\n", __func__,
len_packet(packet));
dbg_dump_packet(packet);
switch (header->type) {
case VS_DATA_PACKET_HEADER:
if (!is_open(hp))
break;
flip = hvsi_recv_data(hp, packet);
break;
case VS_CONTROL_PACKET_HEADER:
hvsi_recv_control(hp, packet, tty, handshake);
break;
case VS_QUERY_RESPONSE_PACKET_HEADER:
hvsi_recv_response(hp, packet);
break;
case VS_QUERY_PACKET_HEADER:
hvsi_recv_query(hp, packet);
break;
default:
printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n",
hp->index, header->type);
dump_packet(packet);
break;
}
packet += len_packet(packet);
if (*handshake) {
pr_debug("%s: handshake\n", __func__);
break;
}
}
compact_inbuf(hp, packet);
if (flip)
tty_flip_buffer_push(&hp->port);
return 1;
}
static void hvsi_send_overflow(struct hvsi_struct *hp)
{
pr_debug("%s: delivering %i bytes overflow\n", __func__,
hp->n_throttle);
hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
hp->n_throttle = 0;
}
/*
* must get all pending data because we only get an irq on empty->non-empty
* transition
*/
static irqreturn_t hvsi_interrupt(int irq, void *arg)
{
struct hvsi_struct *hp = (struct hvsi_struct *)arg;
struct hvsi_struct *handshake;
struct tty_struct *tty;
unsigned long flags;
int again = 1;
pr_debug("%s\n", __func__);
tty = tty_port_tty_get(&hp->port);
while (again) {
spin_lock_irqsave(&hp->lock, flags);
again = hvsi_load_chunk(hp, tty, &handshake);
spin_unlock_irqrestore(&hp->lock, flags);
if (handshake) {
pr_debug("hvsi%i: attempting re-handshake\n", handshake->index);
schedule_work(&handshake->handshaker);
}
}
spin_lock_irqsave(&hp->lock, flags);
if (tty && hp->n_throttle && !tty_throttled(tty)) {
/* we weren't hung up and we weren't throttled, so we can
* deliver the rest now */
hvsi_send_overflow(hp);
tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
tty_kref_put(tty);
return IRQ_HANDLED;
}
/* for boot console, before the irq handler is running */
static int __init poll_for_state(struct hvsi_struct *hp, int state)
{
unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
for (;;) {
hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
if (hp->state == state)
return 0;
mdelay(5);
if (time_after(jiffies, end_jiffies))
return -EIO;
}
}
/* wait for irq handler to change our state */
static int wait_for_state(struct hvsi_struct *hp, int state)
{
int ret = 0;
if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
ret = -EIO;
return ret;
}
static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
{
struct hvsi_query packet __ALIGNED__;
int wrote;
packet.hdr.type = VS_QUERY_PACKET_HEADER;
packet.hdr.len = sizeof(struct hvsi_query);
packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.verb = cpu_to_be16(verb);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
wrote);
return -EIO;
}
return 0;
}
static int hvsi_get_mctrl(struct hvsi_struct *hp)
{
int ret;
set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
ret = hvsi_wait(hp, HVSI_OPEN);
if (ret < 0) {
printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
set_state(hp, HVSI_OPEN);
return ret;
}
pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
return 0;
}
/* note that we can only set DTR */
static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
{
struct hvsi_control packet __ALIGNED__;
int wrote;
packet.hdr.type = VS_CONTROL_PACKET_HEADER;
packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = sizeof(struct hvsi_control);
packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
packet.mask = cpu_to_be32(HVSI_TSDTR);
if (mctrl & TIOCM_DTR)
packet.word = cpu_to_be32(HVSI_TSDTR);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
return -EIO;
}
return 0;
}
static void hvsi_drain_input(struct hvsi_struct *hp)
{
uint8_t buf[HVSI_MAX_READ] __ALIGNED__;
unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
while (time_before(end_jiffies, jiffies))
if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
break;
}
static int hvsi_handshake(struct hvsi_struct *hp)
{
int ret;
/*
* We could have a CLOSE or other data waiting for us before we even try
* to open; try to throw it all away so we don't get confused. (CLOSE
* is the first message sent up the pipe when the FSP comes online. We
* need to distinguish between "it came up a while ago and we're the first
* user" and "it was just reset before it saw our handshake packet".)
*/
hvsi_drain_input(hp);
set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
if (ret < 0) {
printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
return ret;
}
ret = hvsi_wait(hp, HVSI_OPEN);
if (ret < 0)
return ret;
return 0;
}
static void hvsi_handshaker(struct work_struct *work)
{
struct hvsi_struct *hp =
container_of(work, struct hvsi_struct, handshaker);
if (hvsi_handshake(hp) >= 0)
return;
printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
if (is_console(hp)) {
/*
* ttys will re-attempt the handshake via hvsi_open, but
* the console will not.
*/
printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
}
}
static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
{
struct hvsi_data packet __ALIGNED__;
int ret;
BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
packet.hdr.type = VS_DATA_PACKET_HEADER;
packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = count + sizeof(struct hvsi_header);
memcpy(&packet.data, buf, count);
ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
if (ret == packet.hdr.len) {
/* return the number of chars written, not the packet length */
return count;
}
return ret; /* return any errors */
}
static void hvsi_close_protocol(struct hvsi_struct *hp)
{
struct hvsi_control packet __ALIGNED__;
packet.hdr.type = VS_CONTROL_PACKET_HEADER;
packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = 6;
packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
}
static int hvsi_open(struct tty_struct *tty, struct file *filp)
{
struct hvsi_struct *hp;
unsigned long flags;
int ret;
pr_debug("%s\n", __func__);
hp = &hvsi_ports[tty->index];
tty->driver_data = hp;
mb();
if (hp->state == HVSI_FSP_DIED)
return -EIO;
tty_port_tty_set(&hp->port, tty);
spin_lock_irqsave(&hp->lock, flags);
hp->port.count++;
atomic_set(&hp->seqno, 0);
h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
spin_unlock_irqrestore(&hp->lock, flags);
if (is_console(hp))
return 0; /* this has already been handshaked as the console */
ret = hvsi_handshake(hp);
if (ret < 0) {
printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
return ret;
}
ret = hvsi_get_mctrl(hp);
if (ret < 0) {
printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
return ret;
}
ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
if (ret < 0) {
printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
return ret;
}
return 0;
}
/* wait for hvsi_write_worker to empty hp->outbuf */
static void hvsi_flush_output(struct hvsi_struct *hp)
{
wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
/* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
cancel_delayed_work_sync(&hp->writer);
flush_work(&hp->handshaker);
/*
* it's also possible that our timeout expired and hvsi_write_worker
* didn't manage to push outbuf. poof.
*/
hp->n_outbuf = 0;
}
static void hvsi_close(struct tty_struct *tty, struct file *filp)
{
struct hvsi_struct *hp = tty->driver_data;
unsigned long flags;
pr_debug("%s\n", __func__);
if (tty_hung_up_p(filp))
return;
spin_lock_irqsave(&hp->lock, flags);
if (--hp->port.count == 0) {
tty_port_tty_set(&hp->port, NULL);
hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
/* only close down connection if it is not the console */
if (!is_console(hp)) {
h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
__set_state(hp, HVSI_CLOSED);
/*
* any data delivered to the tty layer after this will be
* discarded (except for XON/XOFF)
*/
tty->closing = 1;
spin_unlock_irqrestore(&hp->lock, flags);
/* let any existing irq handlers finish. no more will start. */
synchronize_irq(hp->virq);
/* hvsi_write_worker will re-schedule until outbuf is empty. */
hvsi_flush_output(hp);
/* tell FSP to stop sending data */
hvsi_close_protocol(hp);
/*
* drain anything FSP is still in the middle of sending, and let
* hvsi_handshake drain the rest on the next open.
*/
hvsi_drain_input(hp);
spin_lock_irqsave(&hp->lock, flags);
}
} else if (hp->port.count < 0)
printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
hp - hvsi_ports, hp->port.count);
spin_unlock_irqrestore(&hp->lock, flags);
}
static void hvsi_hangup(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
unsigned long flags;
pr_debug("%s\n", __func__);
tty_port_tty_set(&hp->port, NULL);
spin_lock_irqsave(&hp->lock, flags);
hp->port.count = 0;
hp->n_outbuf = 0;
spin_unlock_irqrestore(&hp->lock, flags);
}
/* called with hp->lock held */
static void hvsi_push(struct hvsi_struct *hp)
{
int n;
if (hp->n_outbuf <= 0)
return;
n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
if (n > 0) {
/* success */
pr_debug("%s: wrote %i chars\n", __func__, n);
hp->n_outbuf = 0;
} else if (n == -EIO) {
__set_state(hp, HVSI_FSP_DIED);
printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
}
}
/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
static void hvsi_write_worker(struct work_struct *work)
{
struct hvsi_struct *hp =
container_of(work, struct hvsi_struct, writer.work);
unsigned long flags;
#ifdef DEBUG
static long start_j = 0;
if (start_j == 0)
start_j = jiffies;
#endif /* DEBUG */
spin_lock_irqsave(&hp->lock, flags);
pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
if (!is_open(hp)) {
/*
* We could have a non-open connection if the service processor died
* while we were busily scheduling ourselves. In that case, it could
* be minutes before the service processor comes back, so only try
* again once a second.
*/
schedule_delayed_work(&hp->writer, HZ);
goto out;
}
hvsi_push(hp);
if (hp->n_outbuf > 0)
schedule_delayed_work(&hp->writer, 10);
else {
#ifdef DEBUG
pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
jiffies - start_j);
start_j = 0;
#endif /* DEBUG */
wake_up_all(&hp->emptyq);
tty_port_tty_wakeup(&hp->port);
}
out:
spin_unlock_irqrestore(&hp->lock, flags);
}
static unsigned int hvsi_write_room(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
return N_OUTBUF - hp->n_outbuf;
}
static unsigned int hvsi_chars_in_buffer(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
return hp->n_outbuf;
}
static ssize_t hvsi_write(struct tty_struct *tty, const u8 *source,
size_t count)
{
struct hvsi_struct *hp = tty->driver_data;
unsigned long flags;
size_t total = 0;
size_t origcount = count;
spin_lock_irqsave(&hp->lock, flags);
pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
if (!is_open(hp)) {
/* we're either closing or not yet open; don't accept data */
pr_debug("%s: not open\n", __func__);
goto out;
}
/*
* when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
* and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
* will see there is no room in outbuf and return.
*/
while ((count > 0) && (hvsi_write_room(tty) > 0)) {
size_t chunksize = min_t(size_t, count, hvsi_write_room(tty));
BUG_ON(hp->n_outbuf < 0);
memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
hp->n_outbuf += chunksize;
total += chunksize;
source += chunksize;
count -= chunksize;
hvsi_push(hp);
}
if (hp->n_outbuf > 0) {
/*
* we weren't able to write it all to the hypervisor.
* schedule another push attempt.
*/
schedule_delayed_work(&hp->writer, 10);
}
out:
spin_unlock_irqrestore(&hp->lock, flags);
if (total != origcount)
pr_debug("%s: wanted %zu, only wrote %zu\n", __func__,
origcount, total);
return total;
}
/*
* I have never seen throttle or unthrottle called, so this little throttle
* buffering scheme may or may not work.
*/
static void hvsi_throttle(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
pr_debug("%s\n", __func__);
h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
}
static void hvsi_unthrottle(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
unsigned long flags;
pr_debug("%s\n", __func__);
spin_lock_irqsave(&hp->lock, flags);
if (hp->n_throttle) {
hvsi_send_overflow(hp);
tty_flip_buffer_push(&hp->port);
}
spin_unlock_irqrestore(&hp->lock, flags);
h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
}
static int hvsi_tiocmget(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
hvsi_get_mctrl(hp);
return hp->mctrl;
}
static int hvsi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct hvsi_struct *hp = tty->driver_data;
unsigned long flags;
uint16_t new_mctrl;
/* we can only alter DTR */
clear &= TIOCM_DTR;
set &= TIOCM_DTR;
spin_lock_irqsave(&hp->lock, flags);
new_mctrl = (hp->mctrl & ~clear) | set;
if (hp->mctrl != new_mctrl) {
hvsi_set_mctrl(hp, new_mctrl);
hp->mctrl = new_mctrl;
}
spin_unlock_irqrestore(&hp->lock, flags);
return 0;
}
static const struct tty_operations hvsi_ops = {
.open = hvsi_open,
.close = hvsi_close,
.write = hvsi_write,
.hangup = hvsi_hangup,
.write_room = hvsi_write_room,
.chars_in_buffer = hvsi_chars_in_buffer,
.throttle = hvsi_throttle,
.unthrottle = hvsi_unthrottle,
.tiocmget = hvsi_tiocmget,
.tiocmset = hvsi_tiocmset,
};
static int __init hvsi_init(void)
{
struct tty_driver *driver;
int i, ret;
driver = tty_alloc_driver(hvsi_count, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
driver->driver_name = "hvsi";
driver->name = "hvsi";
driver->major = HVSI_MAJOR;
driver->minor_start = HVSI_MINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->init_termios = tty_std_termios;
driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
driver->init_termios.c_ispeed = 9600;
driver->init_termios.c_ospeed = 9600;
tty_set_operations(driver, &hvsi_ops);
for (i=0; i < hvsi_count; i++) {
struct hvsi_struct *hp = &hvsi_ports[i];
int ret = 1;
tty_port_link_device(&hp->port, driver, i);
ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp);
if (ret)
printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
hp->virq, ret);
}
hvsi_wait = wait_for_state; /* irqs active now */
ret = tty_register_driver(driver);
if (ret) {
pr_err("Couldn't register hvsi console driver\n");
goto err_free_irq;
}
hvsi_driver = driver;
printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
return 0;
err_free_irq:
hvsi_wait = poll_for_state;
for (i = 0; i < hvsi_count; i++) {
struct hvsi_struct *hp = &hvsi_ports[i];
free_irq(hp->virq, hp);
}
tty_driver_kref_put(driver);
return ret;
}
device_initcall(hvsi_init);
/***** console (not tty) code: *****/
static void hvsi_console_print(struct console *console, const char *buf,
unsigned int count)
{
struct hvsi_struct *hp = &hvsi_ports[console->index];
char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__;
unsigned int i = 0, n = 0;
int ret, donecr = 0;
mb();
if (!is_open(hp))
return;
/*
* ugh, we have to translate LF -> CRLF ourselves, in place.
* copied from hvc_console.c:
*/
while (count > 0 || i > 0) {
if (count > 0 && i < sizeof(c)) {
if (buf[n] == '\n' && !donecr) {
c[i++] = '\r';
donecr = 1;
} else {
c[i++] = buf[n++];
donecr = 0;
--count;
}
} else {
ret = hvsi_put_chars(hp, c, i);
if (ret < 0)
i = 0;
i -= ret;
}
}
}
static struct tty_driver *hvsi_console_device(struct console *console,
int *index)
{
*index = console->index;
return hvsi_driver;
}
static int __init hvsi_console_setup(struct console *console, char *options)
{
struct hvsi_struct *hp;
int ret;
if (console->index < 0 || console->index >= hvsi_count)
return -EINVAL;
hp = &hvsi_ports[console->index];
/* give the FSP a chance to change the baud rate when we re-open */
hvsi_close_protocol(hp);
ret = hvsi_handshake(hp);
if (ret < 0)
return ret;
ret = hvsi_get_mctrl(hp);
if (ret < 0)
return ret;
ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
if (ret < 0)
return ret;
hp->flags |= HVSI_CONSOLE;
return 0;
}
static struct console hvsi_console = {
.name = "hvsi",
.write = hvsi_console_print,
.device = hvsi_console_device,
.setup = hvsi_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static int __init hvsi_console_init(void)
{
struct device_node *vty;
hvsi_wait = poll_for_state; /* no irqs yet; must poll */
/* search device tree for vty nodes */
for_each_compatible_node(vty, "serial", "hvterm-protocol") {
struct hvsi_struct *hp;
const __be32 *vtermno, *irq;
vtermno = of_get_property(vty, "reg", NULL);
irq = of_get_property(vty, "interrupts", NULL);
if (!vtermno || !irq)
continue;
if (hvsi_count >= MAX_NR_HVSI_CONSOLES) {
of_node_put(vty);
break;
}
hp = &hvsi_ports[hvsi_count];
INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
INIT_WORK(&hp->handshaker, hvsi_handshaker);
init_waitqueue_head(&hp->emptyq);
init_waitqueue_head(&hp->stateq);
spin_lock_init(&hp->lock);
tty_port_init(&hp->port);
hp->index = hvsi_count;
hp->inbuf_end = hp->inbuf;
hp->state = HVSI_CLOSED;
hp->vtermno = be32_to_cpup(vtermno);
hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
if (hp->virq == 0) {
printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
__func__, be32_to_cpup(irq));
tty_port_destroy(&hp->port);
continue;
}
hvsi_count++;
}
if (hvsi_count)
register_console(&hvsi_console);
return 0;
}
console_initcall(hvsi_console_init);
| linux-master | drivers/tty/hvc/hvsi.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008 David Gibson, IBM Corporation
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#include <linux/console.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/sbi.h>
#include "hvc_console.h"
static int hvc_sbi_tty_put(uint32_t vtermno, const char *buf, int count)
{
int i;
for (i = 0; i < count; i++)
sbi_console_putchar(buf[i]);
return i;
}
static int hvc_sbi_tty_get(uint32_t vtermno, char *buf, int count)
{
int i, c;
for (i = 0; i < count; i++) {
c = sbi_console_getchar();
if (c < 0)
break;
buf[i] = c;
}
return i;
}
static const struct hv_ops hvc_sbi_ops = {
.get_chars = hvc_sbi_tty_get,
.put_chars = hvc_sbi_tty_put,
};
static int __init hvc_sbi_init(void)
{
return PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_ops, 16));
}
device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void)
{
hvc_instantiate(0, 0, &hvc_sbi_ops);
return 0;
}
console_initcall(hvc_sbi_console_init);
| linux-master | drivers/tty/hvc/hvc_riscv_sbi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* z/VM IUCV hypervisor console (HVC) device driver
*
* This HVC device driver provides terminal access using
* z/VM IUCV communication paths.
*
* Copyright IBM Corp. 2008, 2013
*
* Author(s): Hendrik Brueckner <[email protected]>
*/
#define KMSG_COMPONENT "hvc_iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/moduleparam.h>
#include <linux/tty.h>
#include <linux/wait.h>
#include <net/iucv/iucv.h>
#include "hvc_console.h"
/* General device driver settings */
#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
/* IUCV TTY message */
#define MSG_VERSION 0x02 /* Message version */
#define MSG_TYPE_ERROR 0x01 /* Error message */
#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
#define MSG_TYPE_DATA 0x10 /* Terminal data */
struct iucv_tty_msg {
u8 version; /* Message version */
u8 type; /* Message type */
#define MSG_MAX_DATALEN ((u16)(~0))
u16 datalen; /* Payload length */
u8 data[]; /* Payload buffer */
} __attribute__((packed));
#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
enum iucv_state_t {
IUCV_DISCONN = 0,
IUCV_CONNECTED = 1,
IUCV_SEVERED = 2,
};
enum tty_state_t {
TTY_CLOSED = 0,
TTY_OPENED = 1,
};
struct hvc_iucv_private {
struct hvc_struct *hvc; /* HVC struct reference */
u8 srv_name[8]; /* IUCV service name (ebcdic) */
unsigned char is_console; /* Linux console usage flag */
enum iucv_state_t iucv_state; /* IUCV connection status */
enum tty_state_t tty_state; /* TTY status */
struct iucv_path *path; /* IUCV path pointer */
spinlock_t lock; /* hvc_iucv_private lock */
#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
void *sndbuf; /* send buffer */
size_t sndbuf_len; /* length of send buffer */
#define QUEUE_SNDBUF_DELAY (HZ / 25)
struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
wait_queue_head_t sndbuf_waitq; /* wait for send completion */
struct list_head tty_outqueue; /* outgoing IUCV messages */
struct list_head tty_inqueue; /* incoming IUCV messages */
struct device *dev; /* device structure */
u8 info_path[16]; /* IUCV path info (dev attr) */
};
struct iucv_tty_buffer {
struct list_head list; /* list pointer */
struct iucv_message msg; /* store an IUCV message */
size_t offset; /* data buffer offset */
struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
};
/* IUCV callback handler */
static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
/* Kernel module parameter: use one terminal device as default */
static unsigned long hvc_iucv_devices = 1;
/* Array of allocated hvc iucv tty lines... */
static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
#define IUCV_HVC_CON_IDX (0)
/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
#define MAX_VMID_FILTER (500)
#define FILTER_WILDCARD_CHAR '*'
static size_t hvc_iucv_filter_size;
static void *hvc_iucv_filter;
static const char *hvc_iucv_filter_string;
static DEFINE_RWLOCK(hvc_iucv_filter_lock);
/* Kmem cache and mempool for iucv_tty_buffer elements */
static struct kmem_cache *hvc_iucv_buffer_cache;
static mempool_t *hvc_iucv_mempool;
/* IUCV handler callback functions */
static struct iucv_handler hvc_iucv_handler = {
.path_pending = hvc_iucv_path_pending,
.path_severed = hvc_iucv_path_severed,
.message_complete = hvc_iucv_msg_complete,
.message_pending = hvc_iucv_msg_pending,
};
/**
* hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
* @num: The HVC virtual terminal number (vtermno)
*
* This function returns the struct hvc_iucv_private instance that corresponds
* to the HVC virtual terminal number specified as parameter @num.
*/
static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
{
if (num > hvc_iucv_devices)
return NULL;
return hvc_iucv_table[num];
}
/**
* alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
* @size: Size of the internal buffer used to store data.
* @flags: Memory allocation flags passed to mempool.
*
* This function allocates a new struct iucv_tty_buffer element and, optionally,
* allocates an internal data buffer with the specified size @size.
* The internal data buffer is always allocated with GFP_DMA which is
* required for receiving and sending data with IUCV.
* Note: The total message size arises from the internal buffer size and the
* members of the iucv_tty_msg structure.
* The function returns NULL if memory allocation has failed.
*/
static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
{
struct iucv_tty_buffer *bufp;
bufp = mempool_alloc(hvc_iucv_mempool, flags);
if (!bufp)
return NULL;
memset(bufp, 0, sizeof(*bufp));
if (size > 0) {
bufp->msg.length = MSG_SIZE(size);
bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
if (!bufp->mbuf) {
mempool_free(bufp, hvc_iucv_mempool);
return NULL;
}
bufp->mbuf->version = MSG_VERSION;
bufp->mbuf->type = MSG_TYPE_DATA;
bufp->mbuf->datalen = (u16) size;
}
return bufp;
}
/**
* destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
* @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
*/
static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
{
kfree(bufp->mbuf);
mempool_free(bufp, hvc_iucv_mempool);
}
/**
* destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
* @list: List containing struct iucv_tty_buffer elements.
*/
static void destroy_tty_buffer_list(struct list_head *list)
{
struct iucv_tty_buffer *ent, *next;
list_for_each_entry_safe(ent, next, list, list) {
list_del(&ent->list);
destroy_tty_buffer(ent);
}
}
/**
* hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
* @priv: Pointer to struct hvc_iucv_private
* @buf: HVC buffer for writing received terminal data.
* @count: HVC buffer size.
* @has_more_data: Pointer to an int variable.
*
* The function picks up pending messages from the input queue and receives
* the message data that is then written to the specified buffer @buf.
* If the buffer size @count is less than the data message size, the
* message is kept on the input queue and @has_more_data is set to 1.
* If all message data has been written, the message is removed from
* the input queue.
*
* The function returns the number of bytes written to the terminal, zero if
* there are no pending data messages available or if there is no established
* IUCV path.
* If the IUCV path has been severed, then -EPIPE is returned to cause a
* hang up (that is issued by the HVC layer).
*/
static int hvc_iucv_write(struct hvc_iucv_private *priv,
char *buf, int count, int *has_more_data)
{
struct iucv_tty_buffer *rb;
int written;
int rc;
/* immediately return if there is no IUCV connection */
if (priv->iucv_state == IUCV_DISCONN)
return 0;
/* if the IUCV path has been severed, return -EPIPE to inform the
* HVC layer to hang up the tty device. */
if (priv->iucv_state == IUCV_SEVERED)
return -EPIPE;
/* check if there are pending messages */
if (list_empty(&priv->tty_inqueue))
return 0;
/* receive an iucv message and flip data to the tty (ldisc) */
rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
written = 0;
if (!rb->mbuf) { /* message not yet received ... */
/* allocate mem to store msg data; if no memory is available
* then leave the buffer on the list and re-try later */
rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
if (!rb->mbuf)
return -ENOMEM;
rc = __iucv_message_receive(priv->path, &rb->msg, 0,
rb->mbuf, rb->msg.length, NULL);
switch (rc) {
case 0: /* Successful */
break;
case 2: /* No message found */
case 9: /* Message purged */
break;
default:
written = -EIO;
}
/* remove buffer if an error has occurred or received data
* is not correct */
if (rc || (rb->mbuf->version != MSG_VERSION) ||
(rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
goto out_remove_buffer;
}
switch (rb->mbuf->type) {
case MSG_TYPE_DATA:
written = min_t(int, rb->mbuf->datalen - rb->offset, count);
memcpy(buf, rb->mbuf->data + rb->offset, written);
if (written < (rb->mbuf->datalen - rb->offset)) {
rb->offset += written;
*has_more_data = 1;
goto out_written;
}
break;
case MSG_TYPE_WINSIZE:
if (rb->mbuf->datalen != sizeof(struct winsize))
break;
/* The caller must ensure that the hvc is locked, which
* is the case when called from hvc_iucv_get_chars() */
__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
break;
case MSG_TYPE_ERROR: /* ignored ... */
case MSG_TYPE_TERMENV: /* ignored ... */
case MSG_TYPE_TERMIOS: /* ignored ... */
break;
}
out_remove_buffer:
list_del(&rb->list);
destroy_tty_buffer(rb);
*has_more_data = !list_empty(&priv->tty_inqueue);
out_written:
return written;
}
/**
* hvc_iucv_get_chars() - HVC get_chars operation.
* @vtermno: HVC virtual terminal number.
* @buf: Pointer to a buffer to store data
* @count: Size of buffer available for writing
*
* The HVC thread calls this method to read characters from the back-end.
* If an IUCV communication path has been established, pending IUCV messages
* are received and data is copied into buffer @buf up to @count bytes.
*
* Locking: The routine gets called under an irqsave() spinlock; and
* the routine locks the struct hvc_iucv_private->lock to call
* helper functions.
*/
static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
{
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
int written;
int has_more_data;
if (count <= 0)
return 0;
if (!priv)
return -ENODEV;
spin_lock(&priv->lock);
has_more_data = 0;
written = hvc_iucv_write(priv, buf, count, &has_more_data);
spin_unlock(&priv->lock);
/* if there are still messages on the queue... schedule another run */
if (has_more_data)
hvc_kick();
return written;
}
/**
* hvc_iucv_queue() - Buffer terminal data for sending.
* @priv: Pointer to struct hvc_iucv_private instance.
* @buf: Buffer containing data to send.
* @count: Size of buffer and amount of data to send.
*
* The function queues data for sending. To actually send the buffered data,
* a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
* The function returns the number of data bytes that has been buffered.
*
* If the device is not connected, data is ignored and the function returns
* @count.
* If the buffer is full, the function returns 0.
* If an existing IUCV communicaton path has been severed, -EPIPE is returned
* (that can be passed to HVC layer to cause a tty hangup).
*/
static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
int count)
{
size_t len;
if (priv->iucv_state == IUCV_DISCONN)
return count; /* ignore data */
if (priv->iucv_state == IUCV_SEVERED)
return -EPIPE;
len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
if (!len)
return 0;
memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
priv->sndbuf_len += len;
if (priv->iucv_state == IUCV_CONNECTED)
schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
return len;
}
/**
* hvc_iucv_send() - Send an IUCV message containing terminal data.
* @priv: Pointer to struct hvc_iucv_private instance.
*
* If an IUCV communication path has been established, the buffered output data
* is sent via an IUCV message and the number of bytes sent is returned.
* Returns 0 if there is no established IUCV communication path or
* -EPIPE if an existing IUCV communicaton path has been severed.
*/
static int hvc_iucv_send(struct hvc_iucv_private *priv)
{
struct iucv_tty_buffer *sb;
int rc, len;
if (priv->iucv_state == IUCV_SEVERED)
return -EPIPE;
if (priv->iucv_state == IUCV_DISCONN)
return -EIO;
if (!priv->sndbuf_len)
return 0;
/* allocate internal buffer to store msg data and also compute total
* message length */
sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
if (!sb)
return -ENOMEM;
memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
sb->mbuf->datalen = (u16) priv->sndbuf_len;
sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
list_add_tail(&sb->list, &priv->tty_outqueue);
rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
(void *) sb->mbuf, sb->msg.length);
if (rc) {
/* drop the message here; however we might want to handle
* 0x03 (msg limit reached) by trying again... */
list_del(&sb->list);
destroy_tty_buffer(sb);
}
len = priv->sndbuf_len;
priv->sndbuf_len = 0;
return len;
}
/**
* hvc_iucv_sndbuf_work() - Send buffered data over IUCV
* @work: Work structure.
*
* This work queue function sends buffered output data over IUCV and,
* if not all buffered data could be sent, reschedules itself.
*/
static void hvc_iucv_sndbuf_work(struct work_struct *work)
{
struct hvc_iucv_private *priv;
priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
spin_lock_bh(&priv->lock);
hvc_iucv_send(priv);
spin_unlock_bh(&priv->lock);
}
/**
* hvc_iucv_put_chars() - HVC put_chars operation.
* @vtermno: HVC virtual terminal number.
* @buf: Pointer to an buffer to read data from
* @count: Size of buffer available for reading
*
* The HVC thread calls this method to write characters to the back-end.
* The function calls hvc_iucv_queue() to queue terminal data for sending.
*
* Locking: The method gets called under an irqsave() spinlock; and
* locks struct hvc_iucv_private->lock.
*/
static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
int queued;
if (count <= 0)
return 0;
if (!priv)
return -ENODEV;
spin_lock(&priv->lock);
queued = hvc_iucv_queue(priv, buf, count);
spin_unlock(&priv->lock);
return queued;
}
/**
* hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
* @hp: Pointer to the HVC device (struct hvc_struct)
* @id: Additional data (originally passed to hvc_alloc): the index of an struct
* hvc_iucv_private instance.
*
* The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
* instance that is derived from @id. Always returns 0.
*
* Locking: struct hvc_iucv_private->lock, spin_lock_bh
*/
static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
{
struct hvc_iucv_private *priv;
priv = hvc_iucv_get_private(id);
if (!priv)
return 0;
spin_lock_bh(&priv->lock);
priv->tty_state = TTY_OPENED;
spin_unlock_bh(&priv->lock);
return 0;
}
/**
* hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
* @priv: Pointer to the struct hvc_iucv_private instance.
*/
static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
{
destroy_tty_buffer_list(&priv->tty_outqueue);
destroy_tty_buffer_list(&priv->tty_inqueue);
priv->tty_state = TTY_CLOSED;
priv->iucv_state = IUCV_DISCONN;
priv->sndbuf_len = 0;
}
/**
* tty_outqueue_empty() - Test if the tty outq is empty
* @priv: Pointer to struct hvc_iucv_private instance.
*/
static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
{
int rc;
spin_lock_bh(&priv->lock);
rc = list_empty(&priv->tty_outqueue);
spin_unlock_bh(&priv->lock);
return rc;
}
/**
* flush_sndbuf_sync() - Flush send buffer and wait for completion
* @priv: Pointer to struct hvc_iucv_private instance.
*
* The routine cancels a pending sndbuf work, calls hvc_iucv_send()
* to flush any buffered terminal output data and waits for completion.
*/
static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
{
int sync_wait;
cancel_delayed_work_sync(&priv->sndbuf_work);
spin_lock_bh(&priv->lock);
hvc_iucv_send(priv); /* force sending buffered data */
sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
spin_unlock_bh(&priv->lock);
if (sync_wait)
wait_event_timeout(priv->sndbuf_waitq,
tty_outqueue_empty(priv), HZ/10);
}
/**
* hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
* @priv: Pointer to hvc_iucv_private structure
*
* This routine severs an existing IUCV communication path and hangs
* up the underlying HVC terminal device.
* The hang-up occurs only if an IUCV communication path is established;
* otherwise there is no need to hang up the terminal device.
*
* The IUCV HVC hang-up is separated into two steps:
* 1. After the IUCV path has been severed, the iucv_state is set to
* IUCV_SEVERED.
* 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
* IUCV_SEVERED state causes the tty hang-up in the HVC layer.
*
* If the tty has not yet been opened, clean up the hvc_iucv_private
* structure to allow re-connects.
* If the tty has been opened, let get_chars() return -EPIPE to signal
* the HVC layer to hang up the tty and, if so, wake up the HVC thread
* to call get_chars()...
*
* Special notes on hanging up a HVC terminal instantiated as console:
* Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
* 2. do_tty_hangup() calls tty->ops->close() for console_filp
* => no hangup notifier is called by HVC (default)
* 2. hvc_close() returns because of tty_hung_up_p(filp)
* => no delete notifier is called!
* Finally, the back-end is not being notified, thus, the tty session is
* kept active (TTY_OPEN) to be ready for re-connects.
*
* Locking: spin_lock(&priv->lock) w/o disabling bh
*/
static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
{
struct iucv_path *path;
path = NULL;
spin_lock(&priv->lock);
if (priv->iucv_state == IUCV_CONNECTED) {
path = priv->path;
priv->path = NULL;
priv->iucv_state = IUCV_SEVERED;
if (priv->tty_state == TTY_CLOSED)
hvc_iucv_cleanup(priv);
else
/* console is special (see above) */
if (priv->is_console) {
hvc_iucv_cleanup(priv);
priv->tty_state = TTY_OPENED;
} else
hvc_kick();
}
spin_unlock(&priv->lock);
/* finally sever path (outside of priv->lock due to lock ordering) */
if (path) {
iucv_path_sever(path, NULL);
iucv_path_free(path);
}
}
/**
* hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
* @hp: Pointer to the HVC device (struct hvc_struct)
* @id: Additional data (originally passed to hvc_alloc):
* the index of an struct hvc_iucv_private instance.
*
* This routine notifies the HVC back-end that a tty hangup (carrier loss,
* virtual or otherwise) has occurred.
* The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
* to keep an existing IUCV communication path established.
* (Background: vhangup() is called from user space (by getty or login) to
* disable writing to the tty by other applications).
* If the tty has been opened and an established IUCV path has been severed
* (we caused the tty hangup), the function calls hvc_iucv_cleanup().
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
{
struct hvc_iucv_private *priv;
priv = hvc_iucv_get_private(id);
if (!priv)
return;
flush_sndbuf_sync(priv);
spin_lock_bh(&priv->lock);
/* NOTE: If the hangup was scheduled by ourself (from the iucv
* path_servered callback [IUCV_SEVERED]), we have to clean up
* our structure and to set state to TTY_CLOSED.
* If the tty was hung up otherwise (e.g. vhangup()), then we
* ignore this hangup and keep an established IUCV path open...
* (...the reason is that we are not able to connect back to the
* client if we disconnect on hang up) */
priv->tty_state = TTY_CLOSED;
if (priv->iucv_state == IUCV_SEVERED)
hvc_iucv_cleanup(priv);
spin_unlock_bh(&priv->lock);
}
/**
* hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
* @hp: Pointer the HVC device (struct hvc_struct)
* @active: True to raise or false to lower DTR/RTS lines
*
* This routine notifies the HVC back-end to raise or lower DTR/RTS
* lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
* drop the IUCV connection (similar to hang up the modem).
*/
static void hvc_iucv_dtr_rts(struct hvc_struct *hp, bool active)
{
struct hvc_iucv_private *priv;
struct iucv_path *path;
/* Raising the DTR/RTS is ignored as IUCV connections can be
* established at any times.
*/
if (active)
return;
priv = hvc_iucv_get_private(hp->vtermno);
if (!priv)
return;
/* Lowering the DTR/RTS lines disconnects an established IUCV
* connection.
*/
flush_sndbuf_sync(priv);
spin_lock_bh(&priv->lock);
path = priv->path; /* save reference to IUCV path */
priv->path = NULL;
priv->iucv_state = IUCV_DISCONN;
spin_unlock_bh(&priv->lock);
/* Sever IUCV path outside of priv->lock due to lock ordering of:
* priv->lock <--> iucv_table_lock */
if (path) {
iucv_path_sever(path, NULL);
iucv_path_free(path);
}
}
/**
* hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
* @hp: Pointer to the HVC device (struct hvc_struct)
* @id: Additional data (originally passed to hvc_alloc):
* the index of an struct hvc_iucv_private instance.
*
* This routine notifies the HVC back-end that the last tty device fd has been
* closed. The function cleans up tty resources. The clean-up of the IUCV
* connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
* control setting.
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
{
struct hvc_iucv_private *priv;
priv = hvc_iucv_get_private(id);
if (!priv)
return;
flush_sndbuf_sync(priv);
spin_lock_bh(&priv->lock);
destroy_tty_buffer_list(&priv->tty_outqueue);
destroy_tty_buffer_list(&priv->tty_inqueue);
priv->tty_state = TTY_CLOSED;
priv->sndbuf_len = 0;
spin_unlock_bh(&priv->lock);
}
/**
* hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
* @ipvmid: Originating z/VM user ID (right padded with blanks)
*
* Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
* connect, otherwise non-zero.
*/
static int hvc_iucv_filter_connreq(u8 ipvmid[8])
{
const char *wildcard, *filter_entry;
size_t i, len;
/* Note: default policy is ACCEPT if no filter is set */
if (!hvc_iucv_filter_size)
return 0;
for (i = 0; i < hvc_iucv_filter_size; i++) {
filter_entry = hvc_iucv_filter + (8 * i);
/* If a filter entry contains the filter wildcard character,
* reduce the length to match the leading portion of the user
* ID only (wildcard match). Characters following the wildcard
* are ignored.
*/
wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
len = (wildcard) ? wildcard - filter_entry : 8;
if (0 == memcmp(ipvmid, filter_entry, len))
return 0;
}
return 1;
}
/**
* hvc_iucv_path_pending() - IUCV handler to process a connection request.
* @path: Pending path (struct iucv_path)
* @ipvmid: z/VM system identifier of originator
* @ipuser: User specified data for this path
* (AF_IUCV: port/service name and originator port)
*
* The function uses the @ipuser data to determine if the pending path belongs
* to a terminal managed by this device driver.
* If the path belongs to this driver, ensure that the terminal is not accessed
* multiple times (only one connection to a terminal is allowed).
* If the terminal is not yet connected, the pending path is accepted and is
* associated to the appropriate struct hvc_iucv_private instance.
*
* Returns 0 if @path belongs to a terminal managed by the this device driver;
* otherwise returns -ENODEV in order to dispatch this path to other handlers.
*
* Locking: struct hvc_iucv_private->lock
*/
static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
u8 *ipuser)
{
struct hvc_iucv_private *priv, *tmp;
u8 wildcard[9] = "lnxhvc ";
int i, rc, find_unused;
u8 nuser_data[16];
u8 vm_user_id[9];
ASCEBC(wildcard, sizeof(wildcard));
find_unused = !memcmp(wildcard, ipuser, 8);
/* First, check if the pending path request is managed by this
* IUCV handler:
* - find a disconnected device if ipuser contains the wildcard
* - find the device that matches the terminal ID in ipuser
*/
priv = NULL;
for (i = 0; i < hvc_iucv_devices; i++) {
tmp = hvc_iucv_table[i];
if (!tmp)
continue;
if (find_unused) {
spin_lock(&tmp->lock);
if (tmp->iucv_state == IUCV_DISCONN)
priv = tmp;
spin_unlock(&tmp->lock);
} else if (!memcmp(tmp->srv_name, ipuser, 8))
priv = tmp;
if (priv)
break;
}
if (!priv)
return -ENODEV;
/* Enforce that ipvmid is allowed to connect to us */
read_lock(&hvc_iucv_filter_lock);
rc = hvc_iucv_filter_connreq(ipvmid);
read_unlock(&hvc_iucv_filter_lock);
if (rc) {
iucv_path_sever(path, ipuser);
iucv_path_free(path);
memcpy(vm_user_id, ipvmid, 8);
vm_user_id[8] = 0;
pr_info("A connection request from z/VM user ID %s "
"was refused\n", vm_user_id);
return 0;
}
spin_lock(&priv->lock);
/* If the terminal is already connected or being severed, then sever
* this path to enforce that there is only ONE established communication
* path per terminal. */
if (priv->iucv_state != IUCV_DISCONN) {
iucv_path_sever(path, ipuser);
iucv_path_free(path);
goto out_path_handled;
}
/* accept path */
memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
path->msglim = 0xffff; /* IUCV MSGLIMIT */
path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
if (rc) {
iucv_path_sever(path, ipuser);
iucv_path_free(path);
goto out_path_handled;
}
priv->path = path;
priv->iucv_state = IUCV_CONNECTED;
/* store path information */
memcpy(priv->info_path, ipvmid, 8);
memcpy(priv->info_path + 8, ipuser + 8, 8);
/* flush buffered output data... */
schedule_delayed_work(&priv->sndbuf_work, 5);
out_path_handled:
spin_unlock(&priv->lock);
return 0;
}
/**
* hvc_iucv_path_severed() - IUCV handler to process a path sever.
* @path: Pending path (struct iucv_path)
* @ipuser: User specified data for this path
* (AF_IUCV: port/service name and originator port)
*
* This function calls the hvc_iucv_hangup() function for the
* respective IUCV HVC terminal.
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
{
struct hvc_iucv_private *priv = path->private;
hvc_iucv_hangup(priv);
}
/**
* hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
* @path: Pending path (struct iucv_path)
* @msg: Pointer to the IUCV message
*
* The function puts an incoming message on the input queue for later
* processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
* If the tty has not yet been opened, the message is rejected.
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_msg_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct hvc_iucv_private *priv = path->private;
struct iucv_tty_buffer *rb;
/* reject messages that exceed max size of iucv_tty_msg->datalen */
if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
iucv_message_reject(path, msg);
return;
}
spin_lock(&priv->lock);
/* reject messages if tty has not yet been opened */
if (priv->tty_state == TTY_CLOSED) {
iucv_message_reject(path, msg);
goto unlock_return;
}
/* allocate tty buffer to save iucv msg only */
rb = alloc_tty_buffer(0, GFP_ATOMIC);
if (!rb) {
iucv_message_reject(path, msg);
goto unlock_return; /* -ENOMEM */
}
rb->msg = *msg;
list_add_tail(&rb->list, &priv->tty_inqueue);
hvc_kick(); /* wake up hvc thread */
unlock_return:
spin_unlock(&priv->lock);
}
/**
* hvc_iucv_msg_complete() - IUCV handler to process message completion
* @path: Pending path (struct iucv_path)
* @msg: Pointer to the IUCV message
*
* The function is called upon completion of message delivery to remove the
* message from the outqueue. Additional delivery information can be found
* msg->audit: rejected messages (0x040000 (IPADRJCT)), and
* purged messages (0x010000 (IPADPGNR)).
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_msg_complete(struct iucv_path *path,
struct iucv_message *msg)
{
struct hvc_iucv_private *priv = path->private;
struct iucv_tty_buffer *ent, *next;
LIST_HEAD(list_remove);
spin_lock(&priv->lock);
list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
if (ent->msg.id == msg->id) {
list_move(&ent->list, &list_remove);
break;
}
wake_up(&priv->sndbuf_waitq);
spin_unlock(&priv->lock);
destroy_tty_buffer_list(&list_remove);
}
static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hvc_iucv_private *priv = dev_get_drvdata(dev);
size_t len;
len = sizeof(priv->srv_name);
memcpy(buf, priv->srv_name, len);
EBCASC(buf, len);
buf[len++] = '\n';
return len;
}
static ssize_t hvc_iucv_dev_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hvc_iucv_private *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
}
static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hvc_iucv_private *priv = dev_get_drvdata(dev);
char vmid[9], ipuser[9];
memset(vmid, 0, sizeof(vmid));
memset(ipuser, 0, sizeof(ipuser));
spin_lock_bh(&priv->lock);
if (priv->iucv_state == IUCV_CONNECTED) {
memcpy(vmid, priv->info_path, 8);
memcpy(ipuser, priv->info_path + 8, 8);
}
spin_unlock_bh(&priv->lock);
EBCASC(ipuser, 8);
return sprintf(buf, "%s:%s\n", vmid, ipuser);
}
/* HVC operations */
static const struct hv_ops hvc_iucv_ops = {
.get_chars = hvc_iucv_get_chars,
.put_chars = hvc_iucv_put_chars,
.notifier_add = hvc_iucv_notifier_add,
.notifier_del = hvc_iucv_notifier_del,
.notifier_hangup = hvc_iucv_notifier_hangup,
.dtr_rts = hvc_iucv_dtr_rts,
};
/* IUCV HVC device attributes */
static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
static struct attribute *hvc_iucv_dev_attrs[] = {
&dev_attr_termid.attr,
&dev_attr_state.attr,
&dev_attr_peer.attr,
NULL,
};
static struct attribute_group hvc_iucv_dev_attr_group = {
.attrs = hvc_iucv_dev_attrs,
};
static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
&hvc_iucv_dev_attr_group,
NULL,
};
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
* @id: hvc_iucv_table index
* @is_console: Flag if the instance is used as Linux console
*
* This function allocates a new hvc_iucv_private structure and stores
* the instance in hvc_iucv_table at index @id.
* Returns 0 on success; otherwise non-zero.
*/
static int __init hvc_iucv_alloc(int id, unsigned int is_console)
{
struct hvc_iucv_private *priv;
char name[9];
int rc;
priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
INIT_LIST_HEAD(&priv->tty_outqueue);
INIT_LIST_HEAD(&priv->tty_inqueue);
INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
init_waitqueue_head(&priv->sndbuf_waitq);
priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
if (!priv->sndbuf) {
kfree(priv);
return -ENOMEM;
}
/* set console flag */
priv->is_console = is_console;
/* allocate hvc device */
priv->hvc = hvc_alloc(id, /* PAGE_SIZE */
id, &hvc_iucv_ops, 256);
if (IS_ERR(priv->hvc)) {
rc = PTR_ERR(priv->hvc);
goto out_error_hvc;
}
/* notify HVC thread instead of using polling */
priv->hvc->irq_requested = 1;
/* setup iucv related information */
snprintf(name, 9, "lnxhvc%-2d", id);
memcpy(priv->srv_name, name, 8);
ASCEBC(priv->srv_name, 8);
/* create and setup device */
priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
if (!priv->dev) {
rc = -ENOMEM;
goto out_error_dev;
}
dev_set_name(priv->dev, "hvc_iucv%d", id);
dev_set_drvdata(priv->dev, priv);
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
priv->dev->groups = hvc_iucv_dev_attr_groups;
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
put_device(priv->dev);
goto out_error_dev;
}
hvc_iucv_table[id] = priv;
return 0;
out_error_dev:
hvc_remove(priv->hvc);
out_error_hvc:
free_page((unsigned long) priv->sndbuf);
kfree(priv);
return rc;
}
/**
* hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
*/
static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
{
hvc_remove(priv->hvc);
device_unregister(priv->dev);
free_page((unsigned long) priv->sndbuf);
kfree(priv);
}
/**
* hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
* @filter: String containing a comma-separated list of z/VM user IDs
* @dest: Location where to store the parsed z/VM user ID
*/
static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
{
const char *nextdelim, *residual;
size_t len;
nextdelim = strchr(filter, ',');
if (nextdelim) {
len = nextdelim - filter;
residual = nextdelim + 1;
} else {
len = strlen(filter);
residual = filter + len;
}
if (len == 0)
return ERR_PTR(-EINVAL);
/* check for '\n' (if called from sysfs) */
if (filter[len - 1] == '\n')
len--;
/* prohibit filter entries containing the wildcard character only */
if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
return ERR_PTR(-EINVAL);
if (len > 8)
return ERR_PTR(-EINVAL);
/* pad with blanks and save upper case version of user ID */
memset(dest, ' ', 8);
while (len--)
dest[len] = toupper(filter[len]);
return residual;
}
/**
* hvc_iucv_setup_filter() - Set up z/VM user ID filter
* @filter: String consisting of a comma-separated list of z/VM user IDs
*
* The function parses the @filter string and creates an array containing
* the list of z/VM user ID filter entries.
* Return code 0 means success, -EINVAL if the filter is syntactically
* incorrect, -ENOMEM if there was not enough memory to allocate the
* filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
*/
static int hvc_iucv_setup_filter(const char *val)
{
const char *residual;
int err;
size_t size, count;
void *array, *old_filter;
count = strlen(val);
if (count == 0 || (count == 1 && val[0] == '\n')) {
size = 0;
array = NULL;
goto out_replace_filter; /* clear filter */
}
/* count user IDs in order to allocate sufficient memory */
size = 1;
residual = val;
while ((residual = strchr(residual, ',')) != NULL) {
residual++;
size++;
}
/* check if the specified list exceeds the filter limit */
if (size > MAX_VMID_FILTER)
return -ENOSPC;
array = kcalloc(size, 8, GFP_KERNEL);
if (!array)
return -ENOMEM;
count = size;
residual = val;
while (*residual && count) {
residual = hvc_iucv_parse_filter(residual,
array + ((size - count) * 8));
if (IS_ERR(residual)) {
err = PTR_ERR(residual);
kfree(array);
goto out_err;
}
count--;
}
out_replace_filter:
write_lock_bh(&hvc_iucv_filter_lock);
old_filter = hvc_iucv_filter;
hvc_iucv_filter_size = size;
hvc_iucv_filter = array;
write_unlock_bh(&hvc_iucv_filter_lock);
kfree(old_filter);
err = 0;
out_err:
return err;
}
/**
* param_set_vmidfilter() - Set z/VM user ID filter parameter
* @val: String consisting of a comma-separated list of z/VM user IDs
* @kp: Kernel parameter pointing to hvc_iucv_filter array
*
* The function sets up the z/VM user ID filter specified as comma-separated
* list of user IDs in @val.
* Note: If it is called early in the boot process, @val is stored and
* parsed later in hvc_iucv_init().
*/
static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
{
int rc;
if (!MACHINE_IS_VM || !hvc_iucv_devices)
return -ENODEV;
if (!val)
return -EINVAL;
rc = 0;
if (slab_is_available())
rc = hvc_iucv_setup_filter(val);
else
hvc_iucv_filter_string = val; /* defer... */
return rc;
}
/**
* param_get_vmidfilter() - Get z/VM user ID filter
* @buffer: Buffer to store z/VM user ID filter,
* (buffer size assumption PAGE_SIZE)
* @kp: Kernel parameter pointing to the hvc_iucv_filter array
*
* The function stores the filter as a comma-separated list of z/VM user IDs
* in @buffer. Typically, sysfs routines call this function for attr show.
*/
static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
{
int rc;
size_t index, len;
void *start, *end;
if (!MACHINE_IS_VM || !hvc_iucv_devices)
return -ENODEV;
rc = 0;
read_lock_bh(&hvc_iucv_filter_lock);
for (index = 0; index < hvc_iucv_filter_size; index++) {
start = hvc_iucv_filter + (8 * index);
end = memchr(start, ' ', 8);
len = (end) ? end - start : 8;
memcpy(buffer + rc, start, len);
rc += len;
buffer[rc++] = ',';
}
read_unlock_bh(&hvc_iucv_filter_lock);
if (rc)
buffer[--rc] = '\0'; /* replace last comma and update rc */
return rc;
}
#define param_check_vmidfilter(name, p) __param_check(name, p, void)
static const struct kernel_param_ops param_ops_vmidfilter = {
.set = param_set_vmidfilter,
.get = param_get_vmidfilter,
};
/**
* hvc_iucv_init() - z/VM IUCV HVC device driver initialization
*/
static int __init hvc_iucv_init(void)
{
int rc;
unsigned int i;
if (!hvc_iucv_devices)
return -ENODEV;
if (!MACHINE_IS_VM) {
pr_notice("The z/VM IUCV HVC device driver cannot "
"be used without z/VM\n");
rc = -ENODEV;
goto out_error;
}
if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
pr_err("%lu is not a valid value for the hvc_iucv= "
"kernel parameter\n", hvc_iucv_devices);
rc = -EINVAL;
goto out_error;
}
/* parse hvc_iucv_allow string and create z/VM user ID filter list */
if (hvc_iucv_filter_string) {
rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
switch (rc) {
case 0:
break;
case -ENOMEM:
pr_err("Allocating memory failed with "
"reason code=%d\n", 3);
goto out_error;
case -EINVAL:
pr_err("hvc_iucv_allow= does not specify a valid "
"z/VM user ID list\n");
goto out_error;
case -ENOSPC:
pr_err("hvc_iucv_allow= specifies too many "
"z/VM user IDs\n");
goto out_error;
default:
goto out_error;
}
}
hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
sizeof(struct iucv_tty_buffer),
0, 0, NULL);
if (!hvc_iucv_buffer_cache) {
pr_err("Allocating memory failed with reason code=%d\n", 1);
rc = -ENOMEM;
goto out_error;
}
hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
hvc_iucv_buffer_cache);
if (!hvc_iucv_mempool) {
pr_err("Allocating memory failed with reason code=%d\n", 2);
kmem_cache_destroy(hvc_iucv_buffer_cache);
rc = -ENOMEM;
goto out_error;
}
/* register the first terminal device as console
* (must be done before allocating hvc terminal devices) */
rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
if (rc) {
pr_err("Registering HVC terminal device as "
"Linux console failed\n");
goto out_error_memory;
}
/* allocate hvc_iucv_private structs */
for (i = 0; i < hvc_iucv_devices; i++) {
rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
if (rc) {
pr_err("Creating a new HVC terminal device "
"failed with error code=%d\n", rc);
goto out_error_hvc;
}
}
/* register IUCV callback handler */
rc = iucv_register(&hvc_iucv_handler, 0);
if (rc) {
pr_err("Registering IUCV handlers failed with error code=%d\n",
rc);
goto out_error_hvc;
}
return 0;
out_error_hvc:
for (i = 0; i < hvc_iucv_devices; i++)
if (hvc_iucv_table[i])
hvc_iucv_destroy(hvc_iucv_table[i]);
out_error_memory:
mempool_destroy(hvc_iucv_mempool);
kmem_cache_destroy(hvc_iucv_buffer_cache);
out_error:
kfree(hvc_iucv_filter);
hvc_iucv_devices = 0; /* ensure that we do not provide any device */
return rc;
}
/**
* hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
* @val: Parameter value (numeric)
*/
static int __init hvc_iucv_config(char *val)
{
if (kstrtoul(val, 10, &hvc_iucv_devices))
pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
return 1;
}
device_initcall(hvc_iucv_init);
__setup("hvc_iucv=", hvc_iucv_config);
core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
| linux-master | drivers/tty/hvc/hvc_iucv.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* vio driver interface to hvc_console.c
*
* This code was moved here to allow the remaining code to be reused as a
* generic polling mode with semi-reliable transport driver core to the
* console and tty subsystems.
*
*
* Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM
* Copyright (C) 2001 Paul Mackerras <[email protected]>, IBM
* Copyright (C) 2004 Benjamin Herrenschmidt <[email protected]>, IBM Corp.
* Copyright (C) 2004 IBM Corporation
*
* Additional Author(s):
* Ryan S. Arnold <[email protected]>
*
* TODO:
*
* - handle error in sending hvsi protocol packets
* - retry nego on subsequent sends ?
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/of.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
#include "hvc_console.h"
static const char hvc_driver_name[] = "hvc_console";
static const struct vio_device_id hvc_driver_table[] = {
{"serial", "hvterm1"},
#ifndef HVC_OLD_HVSI
{"serial", "hvterm-protocol"},
#endif
{ "", "" }
};
typedef enum hv_protocol {
HV_PROTOCOL_RAW,
HV_PROTOCOL_HVSI
} hv_protocol_t;
struct hvterm_priv {
u32 termno; /* HV term number */
hv_protocol_t proto; /* Raw data or HVSI packets */
struct hvsi_priv hvsi; /* HVSI specific data */
spinlock_t buf_lock;
char buf[SIZE_VIO_GET_CHARS];
int left;
int offset;
};
static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvterm_priv hvterm_priv0;
static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
unsigned long i;
unsigned long flags;
int got;
if (WARN_ON(!pv))
return 0;
spin_lock_irqsave(&pv->buf_lock, flags);
if (pv->left == 0) {
pv->offset = 0;
pv->left = hvc_get_chars(pv->termno, pv->buf, count);
/*
* Work around a HV bug where it gives us a null
* after every \r. -- paulus
*/
for (i = 1; i < pv->left; ++i) {
if (pv->buf[i] == 0 && pv->buf[i-1] == '\r') {
--pv->left;
if (i < pv->left) {
memmove(&pv->buf[i], &pv->buf[i+1],
pv->left - i);
}
}
}
}
got = min(count, pv->left);
memcpy(buf, &pv->buf[pv->offset], got);
pv->offset += got;
pv->left -= got;
spin_unlock_irqrestore(&pv->buf_lock, flags);
return got;
}
/**
* hvterm_raw_put_chars: send characters to firmware for given vterm adapter
* @vtermno: The virtual terminal number.
* @buf: The characters to send. Because of the underlying hypercall in
* hvc_put_chars(), this buffer must be at least 16 bytes long, even if
* you are sending fewer chars.
* @count: number of chars to send.
*/
static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvc_put_chars(pv->termno, buf, count);
}
static const struct hv_ops hvterm_raw_ops = {
.get_chars = hvterm_raw_get_chars,
.put_chars = hvterm_raw_put_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvsilib_get_chars(&pv->hvsi, buf, count);
}
static int hvterm_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
if (WARN_ON(!pv))
return 0;
return hvsilib_put_chars(&pv->hvsi, buf, count);
}
static int hvterm_hvsi_open(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
int rc;
pr_devel("HVSI@%x: open !\n", pv->termno);
rc = notifier_add_irq(hp, data);
if (rc)
return rc;
return hvsilib_open(&pv->hvsi, hp);
}
static void hvterm_hvsi_close(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: do close !\n", pv->termno);
hvsilib_close(&pv->hvsi, hp);
notifier_del_irq(hp, data);
}
static void hvterm_hvsi_hangup(struct hvc_struct *hp, int data)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: do hangup !\n", pv->termno);
hvsilib_close(&pv->hvsi, hp);
notifier_hangup_irq(hp, data);
}
static int hvterm_hvsi_tiocmget(struct hvc_struct *hp)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
if (!pv)
return -EINVAL;
return pv->hvsi.mctrl;
}
static int hvterm_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
unsigned int clear)
{
struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
pv->termno, set, clear);
if (set & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 1);
else if (clear & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 0);
return 0;
}
static const struct hv_ops hvterm_hvsi_ops = {
.get_chars = hvterm_hvsi_get_chars,
.put_chars = hvterm_hvsi_put_chars,
.notifier_add = hvterm_hvsi_open,
.notifier_del = hvterm_hvsi_close,
.notifier_hangup = hvterm_hvsi_hangup,
.tiocmget = hvterm_hvsi_tiocmget,
.tiocmset = hvterm_hvsi_tiocmset,
};
static void udbg_hvc_putc(char c)
{
int count = -1;
unsigned char bounce_buffer[16];
if (!hvterm_privs[0])
return;
if (c == '\n')
udbg_hvc_putc('\r');
do {
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
/*
* hvterm_raw_put_chars requires at least a 16-byte
* buffer, so go via the bounce buffer
*/
bounce_buffer[0] = c;
count = hvterm_raw_put_chars(0, bounce_buffer, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvterm_hvsi_put_chars(0, &c, 1);
break;
}
} while (count == 0 || count == -EAGAIN);
}
static int udbg_hvc_getc_poll(void)
{
int rc = 0;
char c;
if (!hvterm_privs[0])
return -1;
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
rc = hvterm_raw_get_chars(0, &c, 1);
break;
case HV_PROTOCOL_HVSI:
rc = hvterm_hvsi_get_chars(0, &c, 1);
break;
}
if (!rc)
return -1;
return c;
}
static int udbg_hvc_getc(void)
{
int ch;
if (!hvterm_privs[0])
return -1;
for (;;) {
ch = udbg_hvc_getc_poll();
if (ch == -1) {
/* This shouldn't be needed...but... */
volatile unsigned long delay;
for (delay=0; delay < 2000000; delay++)
;
} else {
return ch;
}
}
}
static int hvc_vio_probe(struct vio_dev *vdev,
const struct vio_device_id *id)
{
const struct hv_ops *ops;
struct hvc_struct *hp;
struct hvterm_priv *pv;
hv_protocol_t proto;
int i, termno = -1;
/* probed with invalid parameters. */
if (!vdev || !id)
return -EPERM;
if (of_device_is_compatible(vdev->dev.of_node, "hvterm1")) {
proto = HV_PROTOCOL_RAW;
ops = &hvterm_raw_ops;
} else if (of_device_is_compatible(vdev->dev.of_node, "hvterm-protocol")) {
proto = HV_PROTOCOL_HVSI;
ops = &hvterm_hvsi_ops;
} else {
pr_err("hvc_vio: Unknown protocol for %pOF\n", vdev->dev.of_node);
return -ENXIO;
}
pr_devel("hvc_vio_probe() device %pOF, using %s protocol\n",
vdev->dev.of_node,
proto == HV_PROTOCOL_RAW ? "raw" : "hvsi");
/* Is it our boot one ? */
if (hvterm_privs[0] == &hvterm_priv0 &&
vdev->unit_address == hvterm_priv0.termno) {
pv = hvterm_privs[0];
termno = 0;
pr_devel("->boot console, using termno 0\n");
}
/* nope, allocate a new one */
else {
for (i = 0; i < MAX_NR_HVC_CONSOLES && termno < 0; i++)
if (!hvterm_privs[i])
termno = i;
pr_devel("->non-boot console, using termno %d\n", termno);
if (termno < 0)
return -ENODEV;
pv = kzalloc(sizeof(struct hvterm_priv), GFP_KERNEL);
if (!pv)
return -ENOMEM;
pv->termno = vdev->unit_address;
pv->proto = proto;
spin_lock_init(&pv->buf_lock);
hvterm_privs[termno] = pv;
hvsilib_init(&pv->hvsi, hvc_get_chars, hvc_put_chars,
pv->termno, 0);
}
hp = hvc_alloc(termno, vdev->irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&vdev->dev, hp);
/* register udbg if it's not there already for console 0 */
if (hp->index == 0 && !udbg_putc) {
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
}
return 0;
}
static struct vio_driver hvc_vio_driver = {
.id_table = hvc_driver_table,
.probe = hvc_vio_probe,
.name = hvc_driver_name,
.driver = {
.suppress_bind_attrs = true,
},
};
static int __init hvc_vio_init(void)
{
int rc;
/* Register as a vio device to receive callbacks */
rc = vio_register_driver(&hvc_vio_driver);
return rc;
}
device_initcall(hvc_vio_init); /* after drivers/tty/hvc/hvc_console.c */
void __init hvc_vio_init_early(void)
{
const __be32 *termno;
const struct hv_ops *ops;
/* find the boot console from /chosen/stdout */
/* Check if it's a virtual terminal */
if (!of_node_name_prefix(of_stdout, "vty"))
return;
termno = of_get_property(of_stdout, "reg", NULL);
if (termno == NULL)
return;
hvterm_priv0.termno = of_read_number(termno, 1);
spin_lock_init(&hvterm_priv0.buf_lock);
hvterm_privs[0] = &hvterm_priv0;
/* Check the protocol */
if (of_device_is_compatible(of_stdout, "hvterm1")) {
hvterm_priv0.proto = HV_PROTOCOL_RAW;
ops = &hvterm_raw_ops;
}
else if (of_device_is_compatible(of_stdout, "hvterm-protocol")) {
hvterm_priv0.proto = HV_PROTOCOL_HVSI;
ops = &hvterm_hvsi_ops;
hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
hvterm_priv0.termno, 1);
/* HVSI, perform the handshake now */
hvsilib_establish(&hvterm_priv0.hvsi);
} else
return;
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
#ifdef HVC_OLD_HVSI
/* When using the old HVSI driver don't register the HVC
* backend for HVSI, only do udbg
*/
if (hvterm_priv0.proto == HV_PROTOCOL_HVSI)
return;
#endif
/* Check whether the user has requested a different console. */
if (!strstr(boot_command_line, "console="))
add_preferred_console("hvc", 0, NULL);
hvc_instantiate(0, 0, ops);
}
/* call this from early_init() for a working debug console on
* vterm capable LPAR machines
*/
#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR
void __init udbg_init_debug_lpar(void)
{
/*
* If we're running as a hypervisor then we definitely can't call the
* hypervisor to print debug output (we *are* the hypervisor), so don't
* register if we detect that MSR_HV=1.
*/
if (mfmsr() & MSR_HV)
return;
hvterm_privs[0] = &hvterm_priv0;
hvterm_priv0.termno = 0;
hvterm_priv0.proto = HV_PROTOCOL_RAW;
spin_lock_init(&hvterm_priv0.buf_lock);
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR */
#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI
void __init udbg_init_debug_lpar_hvsi(void)
{
/* See comment above in udbg_init_debug_lpar() */
if (mfmsr() & MSR_HV)
return;
hvterm_privs[0] = &hvterm_priv0;
hvterm_priv0.termno = CONFIG_PPC_EARLY_DEBUG_HVSI_VTERMNO;
hvterm_priv0.proto = HV_PROTOCOL_HVSI;
spin_lock_init(&hvterm_priv0.buf_lock);
udbg_putc = udbg_hvc_putc;
udbg_getc = udbg_hvc_getc;
udbg_getc_poll = udbg_hvc_getc_poll;
hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
hvterm_priv0.termno, 1);
hvsilib_establish(&hvterm_priv0.hvsi);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI */
| linux-master | drivers/tty/hvc/hvc_vio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* opal driver interface to hvc_console.c
*
* Copyright 2011 Benjamin Herrenschmidt <[email protected]>, IBM Corp.
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <asm/hvconsole.h>
#include <asm/firmware.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
#include <asm/opal.h>
#include "hvc_console.h"
static const char hvc_opal_name[] = "hvc_opal";
static const struct of_device_id hvc_opal_match[] = {
{ .name = "serial", .compatible = "ibm,opal-console-raw" },
{ .name = "serial", .compatible = "ibm,opal-console-hvsi" },
{ },
};
typedef enum hv_protocol {
HV_PROTOCOL_RAW,
HV_PROTOCOL_HVSI
} hv_protocol_t;
struct hvc_opal_priv {
hv_protocol_t proto; /* Raw data or HVSI packets */
struct hvsi_priv hvsi; /* HVSI specific data */
};
static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvc_opal_priv hvc_opal_boot_priv;
static u32 hvc_opal_boot_termno;
static const struct hv_ops hvc_opal_raw_ops = {
.get_chars = opal_get_chars,
.put_chars = opal_put_chars,
.flush = opal_flush_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
{
struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
if (WARN_ON(!pv))
return -ENODEV;
return hvsilib_get_chars(&pv->hvsi, buf, count);
}
static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
if (WARN_ON(!pv))
return -ENODEV;
return hvsilib_put_chars(&pv->hvsi, buf, count);
}
static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data)
{
struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
int rc;
pr_devel("HVSI@%x: do open !\n", hp->vtermno);
rc = notifier_add_irq(hp, data);
if (rc)
return rc;
return hvsilib_open(&pv->hvsi, hp);
}
static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data)
{
struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
pr_devel("HVSI@%x: do close !\n", hp->vtermno);
hvsilib_close(&pv->hvsi, hp);
notifier_del_irq(hp, data);
}
static void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data)
{
struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
pr_devel("HVSI@%x: do hangup !\n", hp->vtermno);
hvsilib_close(&pv->hvsi, hp);
notifier_hangup_irq(hp, data);
}
static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp)
{
struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
if (!pv)
return -EINVAL;
return pv->hvsi.mctrl;
}
static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
unsigned int clear)
{
struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
hp->vtermno, set, clear);
if (set & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 1);
else if (clear & TIOCM_DTR)
hvsilib_write_mctrl(&pv->hvsi, 0);
return 0;
}
static const struct hv_ops hvc_opal_hvsi_ops = {
.get_chars = hvc_opal_hvsi_get_chars,
.put_chars = hvc_opal_hvsi_put_chars,
.flush = opal_flush_chars,
.notifier_add = hvc_opal_hvsi_open,
.notifier_del = hvc_opal_hvsi_close,
.notifier_hangup = hvc_opal_hvsi_hangup,
.tiocmget = hvc_opal_hvsi_tiocmget,
.tiocmset = hvc_opal_hvsi_tiocmset,
};
static int hvc_opal_probe(struct platform_device *dev)
{
const struct hv_ops *ops;
struct hvc_struct *hp;
struct hvc_opal_priv *pv;
hv_protocol_t proto;
unsigned int termno, irq, boot = 0;
const __be32 *reg;
if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
proto = HV_PROTOCOL_RAW;
ops = &hvc_opal_raw_ops;
} else if (of_device_is_compatible(dev->dev.of_node,
"ibm,opal-console-hvsi")) {
proto = HV_PROTOCOL_HVSI;
ops = &hvc_opal_hvsi_ops;
} else {
pr_err("hvc_opal: Unknown protocol for %pOF\n",
dev->dev.of_node);
return -ENXIO;
}
reg = of_get_property(dev->dev.of_node, "reg", NULL);
termno = reg ? be32_to_cpup(reg) : 0;
/* Is it our boot one ? */
if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) {
pv = hvc_opal_privs[termno];
boot = 1;
} else if (hvc_opal_privs[termno] == NULL) {
pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL);
if (!pv)
return -ENOMEM;
pv->proto = proto;
hvc_opal_privs[termno] = pv;
if (proto == HV_PROTOCOL_HVSI) {
/*
* We want put_chars to be atomic to avoid mangling of
* hvsi packets.
*/
hvsilib_init(&pv->hvsi,
opal_get_chars, opal_put_chars_atomic,
termno, 0);
}
/* Instanciate now to establish a mapping index==vtermno */
hvc_instantiate(termno, termno, ops);
} else {
pr_err("hvc_opal: Device %pOF has duplicate terminal number #%d\n",
dev->dev.of_node, termno);
return -ENXIO;
}
pr_info("hvc%d: %s protocol on %pOF%s\n", termno,
proto == HV_PROTOCOL_RAW ? "raw" : "hvsi",
dev->dev.of_node,
boot ? " (boot console)" : "");
irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (!irq) {
pr_info("hvc%d: No interrupts property, using OPAL event\n",
termno);
irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT));
}
if (!irq) {
pr_err("hvc_opal: Unable to map interrupt for device %pOF\n",
dev->dev.of_node);
return irq;
}
hp = hvc_alloc(termno, irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
/* hvc consoles on powernv may need to share a single irq */
hp->flags = IRQF_SHARED;
dev_set_drvdata(&dev->dev, hp);
return 0;
}
static int hvc_opal_remove(struct platform_device *dev)
{
struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
int rc, termno;
termno = hp->vtermno;
rc = hvc_remove(hp);
if (rc == 0) {
if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
kfree(hvc_opal_privs[termno]);
hvc_opal_privs[termno] = NULL;
}
return rc;
}
static struct platform_driver hvc_opal_driver = {
.probe = hvc_opal_probe,
.remove = hvc_opal_remove,
.driver = {
.name = hvc_opal_name,
.of_match_table = hvc_opal_match,
}
};
static int __init hvc_opal_init(void)
{
if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
unsigned int termno = hvc_opal_boot_termno;
int count = -1;
if (c == '\n')
udbg_opal_putc('\r');
do {
switch(hvc_opal_boot_priv.proto) {
case HV_PROTOCOL_RAW:
count = opal_put_chars(termno, &c, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvc_opal_hvsi_put_chars(termno, &c, 1);
break;
}
/* This is needed for the cosole to flush
* when there aren't any interrupts.
*/
opal_flush_console(termno);
} while(count == 0 || count == -EAGAIN);
}
static int udbg_opal_getc_poll(void)
{
unsigned int termno = hvc_opal_boot_termno;
int rc = 0;
char c;
switch(hvc_opal_boot_priv.proto) {
case HV_PROTOCOL_RAW:
rc = opal_get_chars(termno, &c, 1);
break;
case HV_PROTOCOL_HVSI:
rc = hvc_opal_hvsi_get_chars(termno, &c, 1);
break;
}
if (!rc)
return -1;
return c;
}
static int udbg_opal_getc(void)
{
int ch;
for (;;) {
ch = udbg_opal_getc_poll();
if (ch != -1)
return ch;
}
}
static void udbg_init_opal_common(void)
{
udbg_putc = udbg_opal_putc;
udbg_getc = udbg_opal_getc;
udbg_getc_poll = udbg_opal_getc_poll;
}
void __init hvc_opal_init_early(void)
{
struct device_node *stdout_node = of_node_get(of_stdout);
const __be32 *termno;
const struct hv_ops *ops;
u32 index;
/* If the console wasn't in /chosen, try /ibm,opal */
if (!stdout_node) {
struct device_node *opal, *np;
/* Current OPAL takeover doesn't provide the stdout
* path, so we hard wire it
*/
opal = of_find_node_by_path("/ibm,opal/consoles");
if (opal) {
pr_devel("hvc_opal: Found consoles in new location\n");
} else {
opal = of_find_node_by_path("/ibm,opal");
if (opal)
pr_devel("hvc_opal: "
"Found consoles in old location\n");
}
if (!opal)
return;
for_each_child_of_node(opal, np) {
if (of_node_name_eq(np, "serial")) {
stdout_node = np;
break;
}
}
of_node_put(opal);
}
if (!stdout_node)
return;
termno = of_get_property(stdout_node, "reg", NULL);
index = termno ? be32_to_cpup(termno) : 0;
if (index >= MAX_NR_HVC_CONSOLES)
return;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
/* Check the protocol */
if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) {
hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
ops = &hvc_opal_raw_ops;
pr_devel("hvc_opal: Found RAW console\n");
}
else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
ops = &hvc_opal_hvsi_ops;
hvsilib_init(&hvc_opal_boot_priv.hvsi,
opal_get_chars, opal_put_chars_atomic,
index, 1);
/* HVSI, perform the handshake now */
hvsilib_establish(&hvc_opal_boot_priv.hvsi);
pr_devel("hvc_opal: Found HVSI console\n");
} else
goto out;
hvc_opal_boot_termno = index;
udbg_init_opal_common();
add_preferred_console("hvc", index, NULL);
hvc_instantiate(index, index, ops);
out:
of_node_put(stdout_node);
}
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
void __init udbg_init_debug_opal_raw(void)
{
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
hvc_opal_boot_termno = index;
udbg_init_opal_common();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI
void __init udbg_init_debug_opal_hvsi(void)
{
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
hvc_opal_boot_termno = index;
udbg_init_opal_common();
hvsilib_init(&hvc_opal_boot_priv.hvsi,
opal_get_chars, opal_put_chars_atomic,
index, 1);
hvsilib_establish(&hvc_opal_boot_priv.hvsi);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */
| linux-master | drivers/tty/hvc/hvc_opal.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ***************************************************************************
* Marvell Armada-3700 Serial Driver
* Author: Wilson Ding <[email protected]>
* Copyright (C) 2015 Marvell International Ltd.
* ***************************************************************************
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
/* Register Map */
#define UART_STD_RBR 0x00
#define UART_EXT_RBR 0x18
#define UART_STD_TSH 0x04
#define UART_EXT_TSH 0x1C
#define UART_STD_CTRL1 0x08
#define UART_EXT_CTRL1 0x04
#define CTRL_SOFT_RST BIT(31)
#define CTRL_TXFIFO_RST BIT(15)
#define CTRL_RXFIFO_RST BIT(14)
#define CTRL_SND_BRK_SEQ BIT(11)
#define CTRL_BRK_DET_INT BIT(3)
#define CTRL_FRM_ERR_INT BIT(2)
#define CTRL_PAR_ERR_INT BIT(1)
#define CTRL_OVR_ERR_INT BIT(0)
#define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
#define UART_STD_CTRL2 UART_STD_CTRL1
#define UART_EXT_CTRL2 0x20
#define CTRL_STD_TX_RDY_INT BIT(5)
#define CTRL_EXT_TX_RDY_INT BIT(6)
#define CTRL_STD_RX_RDY_INT BIT(4)
#define CTRL_EXT_RX_RDY_INT BIT(5)
#define UART_STAT 0x0C
#define STAT_TX_FIFO_EMP BIT(13)
#define STAT_TX_FIFO_FUL BIT(11)
#define STAT_TX_EMP BIT(6)
#define STAT_STD_TX_RDY BIT(5)
#define STAT_EXT_TX_RDY BIT(15)
#define STAT_STD_RX_RDY BIT(4)
#define STAT_EXT_RX_RDY BIT(14)
#define STAT_BRK_DET BIT(3)
#define STAT_FRM_ERR BIT(2)
#define STAT_PAR_ERR BIT(1)
#define STAT_OVR_ERR BIT(0)
#define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \
| STAT_PAR_ERR | STAT_OVR_ERR)
/*
* Marvell Armada 3700 Functional Specifications describes that bit 21 of UART
* Clock Control register controls UART1 and bit 20 controls UART2. But in
* reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an
* error in Marvell's documentation. Hence following CLK_DIS macros are swapped.
*/
#define UART_BRDV 0x10
/* These bits are located in UART1 address space and control UART2 */
#define UART2_CLK_DIS BIT(21)
/* These bits are located in UART1 address space and control UART1 */
#define UART1_CLK_DIS BIT(20)
/* These bits are located in UART1 address space and control both UARTs */
#define CLK_NO_XTAL BIT(19)
#define CLK_TBG_DIV1_SHIFT 15
#define CLK_TBG_DIV1_MASK 0x7
#define CLK_TBG_DIV1_MAX 6
#define CLK_TBG_DIV2_SHIFT 12
#define CLK_TBG_DIV2_MASK 0x7
#define CLK_TBG_DIV2_MAX 6
#define CLK_TBG_SEL_SHIFT 10
#define CLK_TBG_SEL_MASK 0x3
/* These bits are located in both UARTs address space */
#define BRDV_BAUD_MASK 0x3FF
#define BRDV_BAUD_MAX BRDV_BAUD_MASK
#define UART_OSAMP 0x14
#define OSAMP_DEFAULT_DIVISOR 16
#define OSAMP_DIVISORS_MASK 0x3F3F3F3F
#define OSAMP_MAX_DIVISOR 63
#define MVEBU_NR_UARTS 2
#define MVEBU_UART_TYPE "mvebu-uart"
#define DRIVER_NAME "mvebu_serial"
enum {
/* Either there is only one summed IRQ... */
UART_IRQ_SUM = 0,
/* ...or there are two separate IRQ for RX and TX */
UART_RX_IRQ = 0,
UART_TX_IRQ,
UART_IRQ_COUNT
};
/* Diverging register offsets */
struct uart_regs_layout {
unsigned int rbr;
unsigned int tsh;
unsigned int ctrl;
unsigned int intr;
};
/* Diverging flags */
struct uart_flags {
unsigned int ctrl_tx_rdy_int;
unsigned int ctrl_rx_rdy_int;
unsigned int stat_tx_rdy;
unsigned int stat_rx_rdy;
};
/* Driver data, a structure for each UART port */
struct mvebu_uart_driver_data {
bool is_ext;
struct uart_regs_layout regs;
struct uart_flags flags;
};
/* Saved registers during suspend */
struct mvebu_uart_pm_regs {
unsigned int rbr;
unsigned int tsh;
unsigned int ctrl;
unsigned int intr;
unsigned int stat;
unsigned int brdv;
unsigned int osamp;
};
/* MVEBU UART driver structure */
struct mvebu_uart {
struct uart_port *port;
struct clk *clk;
int irq[UART_IRQ_COUNT];
struct mvebu_uart_driver_data *data;
#if defined(CONFIG_PM)
struct mvebu_uart_pm_regs pm_regs;
#endif /* CONFIG_PM */
};
static struct mvebu_uart *to_mvuart(struct uart_port *port)
{
return (struct mvebu_uart *)port->private_data;
}
#define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
#define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
#define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
#define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
#define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
#define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
#define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
#define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
#define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
static DEFINE_SPINLOCK(mvebu_uart_lock);
/* Core UART Driver Operations */
static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
{
unsigned long flags;
unsigned int st;
spin_lock_irqsave(&port->lock, flags);
st = readl(port->membase + UART_STAT);
spin_unlock_irqrestore(&port->lock, flags);
return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
}
static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void mvebu_uart_set_mctrl(struct uart_port *port,
unsigned int mctrl)
{
/*
* Even if we do not support configuring the modem control lines, this
* function must be proided to the serial core
*/
}
static void mvebu_uart_stop_tx(struct uart_port *port)
{
unsigned int ctl = readl(port->membase + UART_INTR(port));
ctl &= ~CTRL_TX_RDY_INT(port);
writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_start_tx(struct uart_port *port)
{
unsigned int ctl;
struct circ_buf *xmit = &port->state->xmit;
if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
uart_xmit_advance(port, 1);
}
ctl = readl(port->membase + UART_INTR(port));
ctl |= CTRL_TX_RDY_INT(port);
writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_stop_rx(struct uart_port *port)
{
unsigned int ctl;
ctl = readl(port->membase + UART_CTRL(port));
ctl &= ~CTRL_BRK_INT;
writel(ctl, port->membase + UART_CTRL(port));
ctl = readl(port->membase + UART_INTR(port));
ctl &= ~CTRL_RX_RDY_INT(port);
writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
{
unsigned int ctl;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
ctl = readl(port->membase + UART_CTRL(port));
if (brk == -1)
ctl |= CTRL_SND_BRK_SEQ;
else
ctl &= ~CTRL_SND_BRK_SEQ;
writel(ctl, port->membase + UART_CTRL(port));
spin_unlock_irqrestore(&port->lock, flags);
}
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
{
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
int ret;
do {
if (status & STAT_RX_RDY(port)) {
ch = readl(port->membase + UART_RBR(port));
ch &= 0xff;
flag = TTY_NORMAL;
port->icount.rx++;
if (status & STAT_PAR_ERR)
port->icount.parity++;
}
/*
* For UART2, error bits are not cleared on buffer read.
* This causes interrupt loop and system hang.
*/
if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
ret = readl(port->membase + UART_STAT);
ret |= STAT_BRK_ERR;
writel(ret, port->membase + UART_STAT);
}
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
if (uart_handle_break(port))
goto ignore_char;
}
if (status & STAT_OVR_ERR)
port->icount.overrun++;
if (status & STAT_FRM_ERR)
port->icount.frame++;
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
if (status & port->ignore_status_mask & STAT_PAR_ERR)
status &= ~STAT_RX_RDY(port);
status &= port->read_status_mask;
if (status & STAT_PAR_ERR)
flag = TTY_PARITY;
status &= ~port->ignore_status_mask;
if (status & STAT_RX_RDY(port))
tty_insert_flip_char(tport, ch, flag);
if (status & STAT_BRK_DET)
tty_insert_flip_char(tport, 0, TTY_BREAK);
if (status & STAT_FRM_ERR)
tty_insert_flip_char(tport, 0, TTY_FRAME);
if (status & STAT_OVR_ERR)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
ignore_char:
status = readl(port->membase + UART_STAT);
} while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
tty_flip_buffer_push(tport);
}
static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
{
u8 ch;
uart_port_tx_limited(port, ch, port->fifosize,
!(readl(port->membase + UART_STAT) & STAT_TX_FIFO_FUL),
writel(ch, port->membase + UART_TSH(port)),
({}));
}
static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int st = readl(port->membase + UART_STAT);
if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
STAT_BRK_DET))
mvebu_uart_rx_chars(port, st);
if (st & STAT_TX_RDY(port))
mvebu_uart_tx_chars(port, st);
return IRQ_HANDLED;
}
static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int st = readl(port->membase + UART_STAT);
if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
STAT_BRK_DET))
mvebu_uart_rx_chars(port, st);
return IRQ_HANDLED;
}
static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int st = readl(port->membase + UART_STAT);
if (st & STAT_TX_RDY(port))
mvebu_uart_tx_chars(port, st);
return IRQ_HANDLED;
}
static int mvebu_uart_startup(struct uart_port *port)
{
struct mvebu_uart *mvuart = to_mvuart(port);
unsigned int ctl;
int ret;
writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
port->membase + UART_CTRL(port));
udelay(1);
/* Clear the error bits of state register before IRQ request */
ret = readl(port->membase + UART_STAT);
ret |= STAT_BRK_ERR;
writel(ret, port->membase + UART_STAT);
writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
ctl = readl(port->membase + UART_INTR(port));
ctl |= CTRL_RX_RDY_INT(port);
writel(ctl, port->membase + UART_INTR(port));
if (!mvuart->irq[UART_TX_IRQ]) {
/* Old bindings with just one interrupt (UART0 only) */
ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
mvebu_uart_isr, port->irqflags,
dev_name(port->dev), port);
if (ret) {
dev_err(port->dev, "unable to request IRQ %d\n",
mvuart->irq[UART_IRQ_SUM]);
return ret;
}
} else {
/* New bindings with an IRQ for RX and TX (both UART) */
ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
mvebu_uart_rx_isr, port->irqflags,
dev_name(port->dev), port);
if (ret) {
dev_err(port->dev, "unable to request IRQ %d\n",
mvuart->irq[UART_RX_IRQ]);
return ret;
}
ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
mvebu_uart_tx_isr, port->irqflags,
dev_name(port->dev),
port);
if (ret) {
dev_err(port->dev, "unable to request IRQ %d\n",
mvuart->irq[UART_TX_IRQ]);
devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
port);
return ret;
}
}
return 0;
}
static void mvebu_uart_shutdown(struct uart_port *port)
{
struct mvebu_uart *mvuart = to_mvuart(port);
writel(0, port->membase + UART_INTR(port));
if (!mvuart->irq[UART_TX_IRQ]) {
devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
} else {
devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
}
}
static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
unsigned int d_divisor, m_divisor;
unsigned long flags;
u32 brdv, osamp;
if (!port->uartclk)
return 0;
/*
* The baudrate is derived from the UART clock thanks to divisors:
* > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6
* > D ("baud generator"): can divide the clock from 1 to 1023
* > M ("fractional divisor"): allows a better accuracy (from 1 to 63)
*
* Exact formulas for calculating baudrate:
*
* with default x16 scheme:
* baudrate = xtal / (d * 16)
* baudrate = tbg / (d1 * d2 * d * 16)
*
* with fractional divisor:
* baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4)))
* baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4)))
*
* Oversampling value:
* osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24);
*
* Where m1 controls number of clock cycles per bit for bits 1,2,3;
* m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10.
*
* To simplify baudrate setup set all the M prescalers to the same
* value. For baudrates 9600 Bd and higher, it is enough to use the
* default (x16) divisor or fractional divisor with M = 63, so there
* is no need to use real fractional support (where the M prescalers
* are not equal).
*
* When all the M prescalers are zeroed then default (x16) divisor is
* used. Default x16 scheme is more stable than M (fractional divisor),
* so use M only when D divisor is not enough to derive baudrate.
*
* Member port->uartclk is either xtal clock rate or TBG clock rate
* divided by (d1 * d2). So d1 and d2 are already set by the UART clock
* driver (and UART driver itself cannot change them). Moreover they are
* shared between both UARTs.
*/
m_divisor = OSAMP_DEFAULT_DIVISOR;
d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
if (d_divisor > BRDV_BAUD_MAX) {
/*
* Experiments show that small M divisors are unstable.
* Use maximal possible M = 63 and calculate D divisor.
*/
m_divisor = OSAMP_MAX_DIVISOR;
d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
}
if (d_divisor < 1)
d_divisor = 1;
else if (d_divisor > BRDV_BAUD_MAX)
d_divisor = BRDV_BAUD_MAX;
spin_lock_irqsave(&mvebu_uart_lock, flags);
brdv = readl(port->membase + UART_BRDV);
brdv &= ~BRDV_BAUD_MASK;
brdv |= d_divisor;
writel(brdv, port->membase + UART_BRDV);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
osamp = readl(port->membase + UART_OSAMP);
osamp &= ~OSAMP_DIVISORS_MASK;
if (m_divisor != OSAMP_DEFAULT_DIVISOR)
osamp |= (m_divisor << 0) | (m_divisor << 8) |
(m_divisor << 16) | (m_divisor << 24);
writel(osamp, port->membase + UART_OSAMP);
return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
}
static void mvebu_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, min_baud, max_baud;
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
if (termios->c_iflag & INPCK)
port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |=
STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
/*
* Maximal divisor is 1023 and maximal fractional divisor is 63. And
* experiments show that baudrates above 1/80 of parent clock rate are
* not stable. So disallow baudrates above 1/80 of the parent clock
* rate. If port->uartclk is not available, then
* mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud
* in this case do not matter.
*/
min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX *
OSAMP_MAX_DIVISOR);
max_baud = port->uartclk / 80;
baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
baud = mvebu_uart_baud_rate_set(port, baud);
/* In case baudrate cannot be changed, report previous old value */
if (baud == 0 && old)
baud = tty_termios_baud_rate(old);
/* Only the following flag changes are supported */
if (old) {
termios->c_iflag &= INPCK | IGNPAR;
termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
termios->c_cflag &= CREAD | CBAUD;
termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
termios->c_cflag |= CS8;
}
if (baud != 0) {
tty_termios_encode_baud_rate(termios, baud, baud);
uart_update_timeout(port, termios->c_cflag, baud);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *mvebu_uart_type(struct uart_port *port)
{
return MVEBU_UART_TYPE;
}
static void mvebu_uart_release_port(struct uart_port *port)
{
/* Nothing to do here */
}
static int mvebu_uart_request_port(struct uart_port *port)
{
return 0;
}
#ifdef CONFIG_CONSOLE_POLL
static int mvebu_uart_get_poll_char(struct uart_port *port)
{
unsigned int st = readl(port->membase + UART_STAT);
if (!(st & STAT_RX_RDY(port)))
return NO_POLL_CHAR;
return readl(port->membase + UART_RBR(port));
}
static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
{
unsigned int st;
for (;;) {
st = readl(port->membase + UART_STAT);
if (!(st & STAT_TX_FIFO_FUL))
break;
udelay(1);
}
writel(c, port->membase + UART_TSH(port));
}
#endif
static const struct uart_ops mvebu_uart_ops = {
.tx_empty = mvebu_uart_tx_empty,
.set_mctrl = mvebu_uart_set_mctrl,
.get_mctrl = mvebu_uart_get_mctrl,
.stop_tx = mvebu_uart_stop_tx,
.start_tx = mvebu_uart_start_tx,
.stop_rx = mvebu_uart_stop_rx,
.break_ctl = mvebu_uart_break_ctl,
.startup = mvebu_uart_startup,
.shutdown = mvebu_uart_shutdown,
.set_termios = mvebu_uart_set_termios,
.type = mvebu_uart_type,
.release_port = mvebu_uart_release_port,
.request_port = mvebu_uart_request_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = mvebu_uart_get_poll_char,
.poll_put_char = mvebu_uart_put_poll_char,
#endif
};
/* Console Driver Operations */
#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
/* Early Console */
static void mvebu_uart_putc(struct uart_port *port, unsigned char c)
{
unsigned int st;
for (;;) {
st = readl(port->membase + UART_STAT);
if (!(st & STAT_TX_FIFO_FUL))
break;
}
/* At early stage, DT is not parsed yet, only use UART0 */
writel(c, port->membase + UART_STD_TSH);
for (;;) {
st = readl(port->membase + UART_STAT);
if (st & STAT_TX_FIFO_EMP)
break;
}
}
static void mvebu_uart_putc_early_write(struct console *con,
const char *s,
unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, mvebu_uart_putc);
}
static int __init
mvebu_uart_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = mvebu_uart_putc_early_write;
return 0;
}
EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
mvebu_uart_early_console_setup);
static void wait_for_xmitr(struct uart_port *port)
{
u32 val;
readl_poll_timeout_atomic(port->membase + UART_STAT, val,
(val & STAT_TX_RDY(port)), 1, 10000);
}
static void wait_for_xmite(struct uart_port *port)
{
u32 val;
readl_poll_timeout_atomic(port->membase + UART_STAT, val,
(val & STAT_TX_EMP), 1, 10000);
}
static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
wait_for_xmitr(port);
writel(ch, port->membase + UART_TSH(port));
}
static void mvebu_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &mvebu_uart_ports[co->index];
unsigned long flags;
unsigned int ier, intr, ctl;
int locked = 1;
if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
intr = readl(port->membase + UART_INTR(port)) &
(CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
writel(0, port->membase + UART_CTRL(port));
writel(0, port->membase + UART_INTR(port));
uart_console_write(port, s, count, mvebu_uart_console_putchar);
wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
if (intr) {
ctl = intr | readl(port->membase + UART_INTR(port));
writel(ctl, port->membase + UART_INTR(port));
}
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static int mvebu_uart_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
return -EINVAL;
port = &mvebu_uart_ports[co->index];
if (!port->mapbase || !port->membase) {
pr_debug("console on ttyMV%i not present\n", co->index);
return -ENODEV;
}
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver mvebu_uart_driver;
static struct console mvebu_uart_console = {
.name = "ttyMV",
.write = mvebu_uart_console_write,
.device = uart_console_device,
.setup = mvebu_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mvebu_uart_driver,
};
static int __init mvebu_uart_console_init(void)
{
register_console(&mvebu_uart_console);
return 0;
}
console_initcall(mvebu_uart_console_init);
#endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
static struct uart_driver mvebu_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = "ttyMV",
.nr = MVEBU_NR_UARTS,
#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
.cons = &mvebu_uart_console,
#endif
};
#if defined(CONFIG_PM)
static int mvebu_uart_suspend(struct device *dev)
{
struct mvebu_uart *mvuart = dev_get_drvdata(dev);
struct uart_port *port = mvuart->port;
unsigned long flags;
uart_suspend_port(&mvebu_uart_driver, port);
mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port));
mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port));
mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port));
mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port));
mvuart->pm_regs.stat = readl(port->membase + UART_STAT);
spin_lock_irqsave(&mvebu_uart_lock, flags);
mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP);
device_set_wakeup_enable(dev, true);
return 0;
}
static int mvebu_uart_resume(struct device *dev)
{
struct mvebu_uart *mvuart = dev_get_drvdata(dev);
struct uart_port *port = mvuart->port;
unsigned long flags;
writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port));
writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port));
writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port));
writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port));
writel(mvuart->pm_regs.stat, port->membase + UART_STAT);
spin_lock_irqsave(&mvebu_uart_lock, flags);
writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP);
uart_resume_port(&mvebu_uart_driver, port);
return 0;
}
static const struct dev_pm_ops mvebu_uart_pm_ops = {
.suspend = mvebu_uart_suspend,
.resume = mvebu_uart_resume,
};
#endif /* CONFIG_PM */
static const struct of_device_id mvebu_uart_of_match[];
/* Counter to keep track of each UART port id when not using CONFIG_OF */
static int uart_num_counter;
static int mvebu_uart_probe(struct platform_device *pdev)
{
const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
&pdev->dev);
struct uart_port *port;
struct mvebu_uart *mvuart;
struct resource *reg;
int id, irq;
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
pdev->id = uart_num_counter++;
else
pdev->id = id;
if (pdev->id >= MVEBU_NR_UARTS) {
dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
MVEBU_NR_UARTS);
return -EINVAL;
}
port = &mvebu_uart_ports[pdev->id];
spin_lock_init(&port->lock);
port->dev = &pdev->dev;
port->type = PORT_MVEBU;
port->ops = &mvebu_uart_ops;
port->regshift = 0;
port->fifosize = 32;
port->iotype = UPIO_MEM32;
port->flags = UPF_FIXED_PORT;
port->line = pdev->id;
/*
* IRQ number is not stored in this structure because we may have two of
* them per port (RX and TX). Instead, use the driver UART structure
* array so called ->irq[].
*/
port->irq = 0;
port->irqflags = 0;
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, ®);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
port->mapbase = reg->start;
mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
GFP_KERNEL);
if (!mvuart)
return -ENOMEM;
/* Get controller data depending on the compatible string */
mvuart->data = (struct mvebu_uart_driver_data *)match->data;
mvuart->port = port;
port->private_data = mvuart;
platform_set_drvdata(pdev, mvuart);
/* Get fixed clock frequency */
mvuart->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mvuart->clk)) {
if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
return PTR_ERR(mvuart->clk);
if (IS_EXTENDED(port)) {
dev_err(&pdev->dev, "unable to get UART clock\n");
return PTR_ERR(mvuart->clk);
}
} else {
if (!clk_prepare_enable(mvuart->clk))
port->uartclk = clk_get_rate(mvuart->clk);
}
/* Manage interrupts */
if (platform_irq_count(pdev) == 1) {
/* Old bindings: no name on the single unamed UART0 IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
mvuart->irq[UART_IRQ_SUM] = irq;
} else {
/*
* New bindings: named interrupts (RX, TX) for both UARTS,
* only make use of uart-rx and uart-tx interrupts, do not use
* uart-sum of UART0 port.
*/
irq = platform_get_irq_byname(pdev, "uart-rx");
if (irq < 0)
return irq;
mvuart->irq[UART_RX_IRQ] = irq;
irq = platform_get_irq_byname(pdev, "uart-tx");
if (irq < 0)
return irq;
mvuart->irq[UART_TX_IRQ] = irq;
}
/* UART Soft Reset*/
writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
udelay(1);
writel(0, port->membase + UART_CTRL(port));
return uart_add_one_port(&mvebu_uart_driver, port);
}
static struct mvebu_uart_driver_data uart_std_driver_data = {
.is_ext = false,
.regs.rbr = UART_STD_RBR,
.regs.tsh = UART_STD_TSH,
.regs.ctrl = UART_STD_CTRL1,
.regs.intr = UART_STD_CTRL2,
.flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
.flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
.flags.stat_tx_rdy = STAT_STD_TX_RDY,
.flags.stat_rx_rdy = STAT_STD_RX_RDY,
};
static struct mvebu_uart_driver_data uart_ext_driver_data = {
.is_ext = true,
.regs.rbr = UART_EXT_RBR,
.regs.tsh = UART_EXT_TSH,
.regs.ctrl = UART_EXT_CTRL1,
.regs.intr = UART_EXT_CTRL2,
.flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
.flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
.flags.stat_tx_rdy = STAT_EXT_TX_RDY,
.flags.stat_rx_rdy = STAT_EXT_RX_RDY,
};
/* Match table for of_platform binding */
static const struct of_device_id mvebu_uart_of_match[] = {
{
.compatible = "marvell,armada-3700-uart",
.data = (void *)&uart_std_driver_data,
},
{
.compatible = "marvell,armada-3700-uart-ext",
.data = (void *)&uart_ext_driver_data,
},
{}
};
static struct platform_driver mvebu_uart_platform_driver = {
.probe = mvebu_uart_probe,
.driver = {
.name = "mvebu-uart",
.of_match_table = of_match_ptr(mvebu_uart_of_match),
.suppress_bind_attrs = true,
#if defined(CONFIG_PM)
.pm = &mvebu_uart_pm_ops,
#endif /* CONFIG_PM */
},
};
/* This code is based on clk-fixed-factor.c driver and modified. */
struct mvebu_uart_clock {
struct clk_hw clk_hw;
int clock_idx;
u32 pm_context_reg1;
u32 pm_context_reg2;
};
struct mvebu_uart_clock_base {
struct mvebu_uart_clock clocks[2];
unsigned int parent_rates[5];
int parent_idx;
unsigned int div;
void __iomem *reg1;
void __iomem *reg2;
bool configured;
};
#define PARENT_CLOCK_XTAL 4
#define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw)
#define to_uart_clock_base(uart_clock) container_of(uart_clock, \
struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx])
static int mvebu_uart_clock_prepare(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2;
unsigned int parent_clock_idx, parent_clock_rate;
unsigned long flags;
unsigned int d1, d2;
u64 divisor;
u32 val;
/*
* This function just reconfigures UART Clock Control register (located
* in UART1 address space which controls both UART1 and UART2) to
* selected UART base clock and recalculates current UART1/UART2
* divisors in their address spaces, so that final baudrate will not be
* changed by switching UART parent clock. This is required for
* otherwise kernel's boot log stops working - we need to ensure that
* UART baudrate does not change during this setup. It is a one time
* operation, it will execute only once and set `configured` to true,
* and be skipped on subsequent calls. Because this UART Clock Control
* register (UART_BRDV) is shared between UART1 baudrate function,
* UART1 clock selector and UART2 clock selector, every access to
* UART_BRDV (reg1) needs to be protected by a lock.
*/
spin_lock_irqsave(&mvebu_uart_lock, flags);
if (uart_clock_base->configured) {
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
return 0;
}
parent_clock_idx = uart_clock_base->parent_idx;
parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx];
val = readl(uart_clock_base->reg1);
if (uart_clock_base->div > CLK_TBG_DIV1_MAX) {
d1 = CLK_TBG_DIV1_MAX;
d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX;
} else {
d1 = uart_clock_base->div;
d2 = 1;
}
if (val & CLK_NO_XTAL) {
prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK;
prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) *
((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK);
} else {
prev_clock_idx = PARENT_CLOCK_XTAL;
prev_d1d2 = 1;
}
/* Note that uart_clock_base->parent_rates[i] may not be available */
prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx];
/* Recalculate UART1 divisor so UART1 baudrate does not change */
if (prev_clock_rate) {
divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
parent_clock_rate * prev_d1d2,
prev_clock_rate * d1 * d2);
if (divisor < 1)
divisor = 1;
else if (divisor > BRDV_BAUD_MAX)
divisor = BRDV_BAUD_MAX;
val = (val & ~BRDV_BAUD_MASK) | divisor;
}
if (parent_clock_idx != PARENT_CLOCK_XTAL) {
/* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */
val |= CLK_NO_XTAL;
val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT);
val |= d1 << CLK_TBG_DIV1_SHIFT;
val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT);
val |= d2 << CLK_TBG_DIV2_SHIFT;
val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT);
val |= parent_clock_idx << CLK_TBG_SEL_SHIFT;
} else {
/* Use XTAL, TBG bits are then ignored */
val &= ~CLK_NO_XTAL;
}
writel(val, uart_clock_base->reg1);
/* Recalculate UART2 divisor so UART2 baudrate does not change */
if (prev_clock_rate) {
val = readl(uart_clock_base->reg2);
divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
parent_clock_rate * prev_d1d2,
prev_clock_rate * d1 * d2);
if (divisor < 1)
divisor = 1;
else if (divisor > BRDV_BAUD_MAX)
divisor = BRDV_BAUD_MAX;
val = (val & ~BRDV_BAUD_MASK) | divisor;
writel(val, uart_clock_base->reg2);
}
uart_clock_base->configured = true;
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
return 0;
}
static int mvebu_uart_clock_enable(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
unsigned long flags;
u32 val;
spin_lock_irqsave(&mvebu_uart_lock, flags);
val = readl(uart_clock_base->reg1);
if (uart_clock->clock_idx == 0)
val &= ~UART1_CLK_DIS;
else
val &= ~UART2_CLK_DIS;
writel(val, uart_clock_base->reg1);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
return 0;
}
static void mvebu_uart_clock_disable(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
unsigned long flags;
u32 val;
spin_lock_irqsave(&mvebu_uart_lock, flags);
val = readl(uart_clock_base->reg1);
if (uart_clock->clock_idx == 0)
val |= UART1_CLK_DIS;
else
val |= UART2_CLK_DIS;
writel(val, uart_clock_base->reg1);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
}
static int mvebu_uart_clock_is_enabled(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
u32 val;
val = readl(uart_clock_base->reg1);
if (uart_clock->clock_idx == 0)
return !(val & UART1_CLK_DIS);
else
return !(val & UART2_CLK_DIS);
}
static int mvebu_uart_clock_save_context(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
unsigned long flags;
spin_lock_irqsave(&mvebu_uart_lock, flags);
uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1);
uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
return 0;
}
static void mvebu_uart_clock_restore_context(struct clk_hw *hw)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
unsigned long flags;
spin_lock_irqsave(&mvebu_uart_lock, flags);
writel(uart_clock->pm_context_reg1, uart_clock_base->reg1);
writel(uart_clock->pm_context_reg2, uart_clock_base->reg2);
spin_unlock_irqrestore(&mvebu_uart_lock, flags);
}
static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
return parent_rate / uart_clock_base->div;
}
static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
struct mvebu_uart_clock_base *uart_clock_base =
to_uart_clock_base(uart_clock);
return *parent_rate / uart_clock_base->div;
}
static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
/*
* We must report success but we can do so unconditionally because
* mvebu_uart_clock_round_rate returns values that ensure this call is a
* nop.
*/
return 0;
}
static const struct clk_ops mvebu_uart_clock_ops = {
.prepare = mvebu_uart_clock_prepare,
.enable = mvebu_uart_clock_enable,
.disable = mvebu_uart_clock_disable,
.is_enabled = mvebu_uart_clock_is_enabled,
.save_context = mvebu_uart_clock_save_context,
.restore_context = mvebu_uart_clock_restore_context,
.round_rate = mvebu_uart_clock_round_rate,
.set_rate = mvebu_uart_clock_set_rate,
.recalc_rate = mvebu_uart_clock_recalc_rate,
};
static int mvebu_uart_clock_register(struct device *dev,
struct mvebu_uart_clock *uart_clock,
const char *name,
const char *parent_name)
{
struct clk_init_data init = { };
uart_clock->clk_hw.init = &init;
init.name = name;
init.ops = &mvebu_uart_clock_ops;
init.flags = 0;
init.num_parents = 1;
init.parent_names = &parent_name;
return devm_clk_hw_register(dev, &uart_clock->clk_hw);
}
static int mvebu_uart_clock_probe(struct platform_device *pdev)
{
static const char *const uart_clk_names[] = { "uart_1", "uart_2" };
static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P",
"TBG-A-S", "TBG-B-S",
"xtal" };
struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)];
struct mvebu_uart_clock_base *uart_clock_base;
struct clk_hw_onecell_data *hw_clk_data;
struct device *dev = &pdev->dev;
int i, parent_clk_idx, ret;
unsigned long div, rate;
struct resource *res;
unsigned int d1, d2;
BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) !=
ARRAY_SIZE(uart_clock_base->clocks));
BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) !=
ARRAY_SIZE(uart_clock_base->parent_rates));
uart_clock_base = devm_kzalloc(dev,
sizeof(*uart_clock_base),
GFP_KERNEL);
if (!uart_clock_base)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Couldn't get first register\n");
return -ENOENT;
}
/*
* UART Clock Control register (reg1 / UART_BRDV) is in the address
* space of UART1 (standard UART variant), controls parent clock and
* dividers for both UART1 and UART2 and is supplied via DT as the first
* resource. Therefore use ioremap() rather than ioremap_resource() to
* avoid conflicts with UART1 driver. Access to UART_BRDV is protected
* by a lock shared between clock and UART driver.
*/
uart_clock_base->reg1 = devm_ioremap(dev, res->start,
resource_size(res));
if (!uart_clock_base->reg1)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(dev, "Couldn't get second register\n");
return -ENOENT;
}
/*
* UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address
* space of UART2 (extended UART variant), controls only one UART2
* specific divider and is supplied via DT as second resource.
* Therefore use ioremap() rather than ioremap_resource() to avoid
* conflicts with UART2 driver. Access to UART_BRDV is protected by a
* by lock shared between clock and UART driver.
*/
uart_clock_base->reg2 = devm_ioremap(dev, res->start,
resource_size(res));
if (!uart_clock_base->reg2)
return -ENOMEM;
hw_clk_data = devm_kzalloc(dev,
struct_size(hw_clk_data, hws,
ARRAY_SIZE(uart_clk_names)),
GFP_KERNEL);
if (!hw_clk_data)
return -ENOMEM;
hw_clk_data->num = ARRAY_SIZE(uart_clk_names);
for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw;
uart_clock_base->clocks[i].clock_idx = i;
}
parent_clk_idx = -1;
for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]);
if (IS_ERR(parent_clks[i])) {
if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(dev, "Couldn't get the parent clock %s: %ld\n",
parent_clk_names[i], PTR_ERR(parent_clks[i]));
continue;
}
ret = clk_prepare_enable(parent_clks[i]);
if (ret) {
dev_warn(dev, "Couldn't enable parent clock %s: %d\n",
parent_clk_names[i], ret);
continue;
}
rate = clk_get_rate(parent_clks[i]);
uart_clock_base->parent_rates[i] = rate;
if (i != PARENT_CLOCK_XTAL) {
/*
* Calculate the smallest TBG d1 and d2 divisors that
* still can provide 9600 baudrate.
*/
d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
BRDV_BAUD_MAX);
if (d1 < 1)
d1 = 1;
else if (d1 > CLK_TBG_DIV1_MAX)
d1 = CLK_TBG_DIV1_MAX;
d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
BRDV_BAUD_MAX * d1);
if (d2 < 1)
d2 = 1;
else if (d2 > CLK_TBG_DIV2_MAX)
d2 = CLK_TBG_DIV2_MAX;
} else {
/*
* When UART clock uses XTAL clock as a source then it
* is not possible to use d1 and d2 divisors.
*/
d1 = d2 = 1;
}
/* Skip clock source which cannot provide 9600 baudrate */
if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2)
continue;
/*
* Choose TBG clock source with the smallest divisors. Use XTAL
* clock source only in case TBG is not available as XTAL cannot
* be used for baudrates higher than 230400.
*/
if (parent_clk_idx == -1 ||
(i != PARENT_CLOCK_XTAL && div > d1 * d2)) {
parent_clk_idx = i;
div = d1 * d2;
}
}
for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
if (i == parent_clk_idx || IS_ERR(parent_clks[i]))
continue;
clk_disable_unprepare(parent_clks[i]);
devm_clk_put(dev, parent_clks[i]);
}
if (parent_clk_idx == -1) {
dev_err(dev, "No usable parent clock\n");
return -ENOENT;
}
uart_clock_base->parent_idx = parent_clk_idx;
uart_clock_base->div = div;
dev_notice(dev, "Using parent clock %s as base UART clock\n",
__clk_get_name(parent_clks[parent_clk_idx]));
for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
ret = mvebu_uart_clock_register(dev,
&uart_clock_base->clocks[i],
uart_clk_names[i],
__clk_get_name(parent_clks[parent_clk_idx]));
if (ret) {
dev_err(dev, "Can't register UART clock %d: %d\n",
i, ret);
return ret;
}
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
hw_clk_data);
}
static const struct of_device_id mvebu_uart_clock_of_match[] = {
{ .compatible = "marvell,armada-3700-uart-clock", },
{ }
};
static struct platform_driver mvebu_uart_clock_platform_driver = {
.probe = mvebu_uart_clock_probe,
.driver = {
.name = "mvebu-uart-clock",
.of_match_table = mvebu_uart_clock_of_match,
},
};
static int __init mvebu_uart_init(void)
{
int ret;
ret = uart_register_driver(&mvebu_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&mvebu_uart_clock_platform_driver);
if (ret) {
uart_unregister_driver(&mvebu_uart_driver);
return ret;
}
ret = platform_driver_register(&mvebu_uart_platform_driver);
if (ret) {
platform_driver_unregister(&mvebu_uart_clock_platform_driver);
uart_unregister_driver(&mvebu_uart_driver);
return ret;
}
return 0;
}
arch_initcall(mvebu_uart_init);
| linux-master | drivers/tty/serial/mvebu-uart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
*
* Copyright (C) 2008 Christian Pellegrin <[email protected]>
*
* Notes: the MAX3100 doesn't provide an interrupt on CTS so we have
* to use polling for flow control. TX empty IRQ is unusable, since
* writing conf clears FIFO buffer and we cannot have this interrupt
* always asking us for attention.
*
* Example platform data:
static struct plat_max3100 max3100_plat_data = {
.loopback = 0,
.crystal = 0,
.poll_time = 100,
};
static struct spi_board_info spi_board_info[] = {
{
.modalias = "max3100",
.platform_data = &max3100_plat_data,
.irq = IRQ_EINT12,
.max_speed_hz = 5*1000*1000,
.chip_select = 0,
},
};
* The initial minor number is 209 in the low-density serial port:
* mknod /dev/ttyMAX0 c 204 209
*/
#define MAX3100_MAJOR 204
#define MAX3100_MINOR 209
/* 4 MAX3100s should be enough for everyone */
#define MAX_MAX3100 4
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/spi/spi.h>
#include <linux/freezer.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_max3100.h>
#define MAX3100_C (1<<14)
#define MAX3100_D (0<<14)
#define MAX3100_W (1<<15)
#define MAX3100_RX (0<<15)
#define MAX3100_WC (MAX3100_W | MAX3100_C)
#define MAX3100_RC (MAX3100_RX | MAX3100_C)
#define MAX3100_WD (MAX3100_W | MAX3100_D)
#define MAX3100_RD (MAX3100_RX | MAX3100_D)
#define MAX3100_CMD (3 << 14)
#define MAX3100_T (1<<14)
#define MAX3100_R (1<<15)
#define MAX3100_FEN (1<<13)
#define MAX3100_SHDN (1<<12)
#define MAX3100_TM (1<<11)
#define MAX3100_RM (1<<10)
#define MAX3100_PM (1<<9)
#define MAX3100_RAM (1<<8)
#define MAX3100_IR (1<<7)
#define MAX3100_ST (1<<6)
#define MAX3100_PE (1<<5)
#define MAX3100_L (1<<4)
#define MAX3100_BAUD (0xf)
#define MAX3100_TE (1<<10)
#define MAX3100_RAFE (1<<10)
#define MAX3100_RTS (1<<9)
#define MAX3100_CTS (1<<9)
#define MAX3100_PT (1<<8)
#define MAX3100_DATA (0xff)
#define MAX3100_RT (MAX3100_R | MAX3100_T)
#define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE)
/* the following simulate a status reg for ignore_status_mask */
#define MAX3100_STATUS_PE 1
#define MAX3100_STATUS_FE 2
#define MAX3100_STATUS_OE 4
struct max3100_port {
struct uart_port port;
struct spi_device *spi;
int cts; /* last CTS received for flow ctrl */
int tx_empty; /* last TX empty bit */
spinlock_t conf_lock; /* shared data */
int conf_commit; /* need to make changes */
int conf; /* configuration for the MAX31000
* (bits 0-7, bits 8-11 are irqs) */
int rts_commit; /* need to change rts */
int rts; /* rts status */
int baud; /* current baud rate */
int parity; /* keeps track if we should send parity */
#define MAX3100_PARITY_ON 1
#define MAX3100_PARITY_ODD 2
#define MAX3100_7BIT 4
int rx_enabled; /* if we should rx chars */
int irq; /* irq assigned to the max3100 */
int minor; /* minor number */
int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */
int loopback; /* 1 if we are in loopback mode */
/* for handling irqs: need workqueue since we do spi_sync */
struct workqueue_struct *workqueue;
struct work_struct work;
/* set to 1 to make the workhandler exit as soon as possible */
int force_end_work;
/* need to know we are suspending to avoid deadlock on workqueue */
int suspending;
/* hook for suspending MAX3100 via dedicated pin */
void (*max3100_hw_suspend) (int suspend);
/* poll time (in ms) for ctrl lines */
int poll_time;
/* and its timer */
struct timer_list timer;
};
static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */
static DEFINE_MUTEX(max3100s_lock); /* race on probe */
static int max3100_do_parity(struct max3100_port *s, u16 c)
{
int parity;
if (s->parity & MAX3100_PARITY_ODD)
parity = 1;
else
parity = 0;
if (s->parity & MAX3100_7BIT)
c &= 0x7f;
else
c &= 0xff;
parity = parity ^ (hweight8(c) & 1);
return parity;
}
static int max3100_check_parity(struct max3100_port *s, u16 c)
{
return max3100_do_parity(s, c) == ((c >> 8) & 1);
}
static void max3100_calc_parity(struct max3100_port *s, u16 *c)
{
if (s->parity & MAX3100_7BIT)
*c &= 0x7f;
else
*c &= 0xff;
if (s->parity & MAX3100_PARITY_ON)
*c |= max3100_do_parity(s, *c) << 8;
}
static void max3100_work(struct work_struct *w);
static void max3100_dowork(struct max3100_port *s)
{
if (!s->force_end_work && !freezing(current) && !s->suspending)
queue_work(s->workqueue, &s->work);
}
static void max3100_timeout(struct timer_list *t)
{
struct max3100_port *s = from_timer(s, t, timer);
if (s->port.state) {
max3100_dowork(s);
mod_timer(&s->timer, jiffies + s->poll_time);
}
}
static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
{
struct spi_message message;
u16 etx, erx;
int status;
struct spi_transfer tran = {
.tx_buf = &etx,
.rx_buf = &erx,
.len = 2,
};
etx = cpu_to_be16(tx);
spi_message_init(&message);
spi_message_add_tail(&tran, &message);
status = spi_sync(s->spi, &message);
if (status) {
dev_warn(&s->spi->dev, "error while calling spi_sync\n");
return -EIO;
}
*rx = be16_to_cpu(erx);
s->tx_empty = (*rx & MAX3100_T) > 0;
dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx);
return 0;
}
static int max3100_handlerx(struct max3100_port *s, u16 rx)
{
unsigned int status = 0;
int ret = 0, cts;
u8 ch, flg;
if (rx & MAX3100_R && s->rx_enabled) {
dev_dbg(&s->spi->dev, "%s\n", __func__);
ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff);
if (rx & MAX3100_RAFE) {
s->port.icount.frame++;
flg = TTY_FRAME;
status |= MAX3100_STATUS_FE;
} else {
if (s->parity & MAX3100_PARITY_ON) {
if (max3100_check_parity(s, rx)) {
s->port.icount.rx++;
flg = TTY_NORMAL;
} else {
s->port.icount.parity++;
flg = TTY_PARITY;
status |= MAX3100_STATUS_PE;
}
} else {
s->port.icount.rx++;
flg = TTY_NORMAL;
}
}
uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg);
ret = 1;
}
cts = (rx & MAX3100_CTS) > 0;
if (s->cts != cts) {
s->cts = cts;
uart_handle_cts_change(&s->port, cts);
}
return ret;
}
static void max3100_work(struct work_struct *w)
{
struct max3100_port *s = container_of(w, struct max3100_port, work);
int rxchars;
u16 tx, rx;
int conf, cconf, crts;
struct circ_buf *xmit = &s->port.state->xmit;
dev_dbg(&s->spi->dev, "%s\n", __func__);
rxchars = 0;
do {
spin_lock(&s->conf_lock);
conf = s->conf;
cconf = s->conf_commit;
s->conf_commit = 0;
crts = s->rts_commit;
s->rts_commit = 0;
spin_unlock(&s->conf_lock);
if (cconf)
max3100_sr(s, MAX3100_WC | conf, &rx);
if (crts) {
max3100_sr(s, MAX3100_WD | MAX3100_TE |
(s->rts ? MAX3100_RTS : 0), &rx);
rxchars += max3100_handlerx(s, rx);
}
max3100_sr(s, MAX3100_RD, &rx);
rxchars += max3100_handlerx(s, rx);
if (rx & MAX3100_T) {
tx = 0xffff;
if (s->port.x_char) {
tx = s->port.x_char;
s->port.icount.tx++;
s->port.x_char = 0;
} else if (!uart_circ_empty(xmit) &&
!uart_tx_stopped(&s->port)) {
tx = xmit->buf[xmit->tail];
uart_xmit_advance(&s->port, 1);
}
if (tx != 0xffff) {
max3100_calc_parity(s, &tx);
tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0);
max3100_sr(s, tx, &rx);
rxchars += max3100_handlerx(s, rx);
}
}
if (rxchars > 16) {
tty_flip_buffer_push(&s->port.state->port);
rxchars = 0;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&s->port);
} while (!s->force_end_work &&
!freezing(current) &&
((rx & MAX3100_R) ||
(!uart_circ_empty(xmit) &&
!uart_tx_stopped(&s->port))));
if (rxchars > 0)
tty_flip_buffer_push(&s->port.state->port);
}
static irqreturn_t max3100_irq(int irqno, void *dev_id)
{
struct max3100_port *s = dev_id;
dev_dbg(&s->spi->dev, "%s\n", __func__);
max3100_dowork(s);
return IRQ_HANDLED;
}
static void max3100_enable_ms(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
if (s->poll_time > 0)
mod_timer(&s->timer, jiffies);
dev_dbg(&s->spi->dev, "%s\n", __func__);
}
static void max3100_start_tx(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
max3100_dowork(s);
}
static void max3100_stop_rx(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
s->rx_enabled = 0;
spin_lock(&s->conf_lock);
s->conf &= ~MAX3100_RM;
s->conf_commit = 1;
spin_unlock(&s->conf_lock);
max3100_dowork(s);
}
static unsigned int max3100_tx_empty(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
/* may not be truly up-to-date */
max3100_dowork(s);
return s->tx_empty;
}
static unsigned int max3100_get_mctrl(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
/* may not be truly up-to-date */
max3100_dowork(s);
/* always assert DCD and DSR since these lines are not wired */
return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR;
}
static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
int rts;
dev_dbg(&s->spi->dev, "%s\n", __func__);
rts = (mctrl & TIOCM_RTS) > 0;
spin_lock(&s->conf_lock);
if (s->rts != rts) {
s->rts = rts;
s->rts_commit = 1;
max3100_dowork(s);
}
spin_unlock(&s->conf_lock);
}
static void
max3100_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
int baud = 0;
unsigned cflag;
u32 param_new, param_mask, parity = 0;
dev_dbg(&s->spi->dev, "%s\n", __func__);
cflag = termios->c_cflag;
param_mask = 0;
baud = tty_termios_baud_rate(termios);
param_new = s->conf & MAX3100_BAUD;
switch (baud) {
case 300:
if (s->crystal)
baud = s->baud;
else
param_new = 15;
break;
case 600:
param_new = 14 + s->crystal;
break;
case 1200:
param_new = 13 + s->crystal;
break;
case 2400:
param_new = 12 + s->crystal;
break;
case 4800:
param_new = 11 + s->crystal;
break;
case 9600:
param_new = 10 + s->crystal;
break;
case 19200:
param_new = 9 + s->crystal;
break;
case 38400:
param_new = 8 + s->crystal;
break;
case 57600:
param_new = 1 + s->crystal;
break;
case 115200:
param_new = 0 + s->crystal;
break;
case 230400:
if (s->crystal)
param_new = 0;
else
baud = s->baud;
break;
default:
baud = s->baud;
}
tty_termios_encode_baud_rate(termios, baud, baud);
s->baud = baud;
param_mask |= MAX3100_BAUD;
if ((cflag & CSIZE) == CS8) {
param_new &= ~MAX3100_L;
parity &= ~MAX3100_7BIT;
} else {
param_new |= MAX3100_L;
parity |= MAX3100_7BIT;
cflag = (cflag & ~CSIZE) | CS7;
}
param_mask |= MAX3100_L;
if (cflag & CSTOPB)
param_new |= MAX3100_ST;
else
param_new &= ~MAX3100_ST;
param_mask |= MAX3100_ST;
if (cflag & PARENB) {
param_new |= MAX3100_PE;
parity |= MAX3100_PARITY_ON;
} else {
param_new &= ~MAX3100_PE;
parity &= ~MAX3100_PARITY_ON;
}
param_mask |= MAX3100_PE;
if (cflag & PARODD)
parity |= MAX3100_PARITY_ODD;
else
parity &= ~MAX3100_PARITY_ODD;
/* mask termios capabilities we don't support */
cflag &= ~CMSPAR;
termios->c_cflag = cflag;
s->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
s->port.ignore_status_mask |=
MAX3100_STATUS_PE | MAX3100_STATUS_FE |
MAX3100_STATUS_OE;
if (s->poll_time > 0)
del_timer_sync(&s->timer);
uart_update_timeout(port, termios->c_cflag, baud);
spin_lock(&s->conf_lock);
s->conf = (s->conf & ~param_mask) | (param_new & param_mask);
s->conf_commit = 1;
s->parity = parity;
spin_unlock(&s->conf_lock);
max3100_dowork(s);
if (UART_ENABLE_MS(&s->port, termios->c_cflag))
max3100_enable_ms(&s->port);
}
static void max3100_shutdown(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
if (s->suspending)
return;
s->force_end_work = 1;
if (s->poll_time > 0)
del_timer_sync(&s->timer);
if (s->workqueue) {
destroy_workqueue(s->workqueue);
s->workqueue = NULL;
}
if (s->irq)
free_irq(s->irq, s);
/* set shutdown mode to save power */
if (s->max3100_hw_suspend)
s->max3100_hw_suspend(1);
else {
u16 tx, rx;
tx = MAX3100_WC | MAX3100_SHDN;
max3100_sr(s, tx, &rx);
}
}
static int max3100_startup(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
char b[12];
dev_dbg(&s->spi->dev, "%s\n", __func__);
s->conf = MAX3100_RM;
s->baud = s->crystal ? 230400 : 115200;
s->rx_enabled = 1;
if (s->suspending)
return 0;
s->force_end_work = 0;
s->parity = 0;
s->rts = 0;
sprintf(b, "max3100-%d", s->minor);
s->workqueue = create_freezable_workqueue(b);
if (!s->workqueue) {
dev_warn(&s->spi->dev, "cannot create workqueue\n");
return -EBUSY;
}
INIT_WORK(&s->work, max3100_work);
if (request_irq(s->irq, max3100_irq,
IRQF_TRIGGER_FALLING, "max3100", s) < 0) {
dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq);
s->irq = 0;
destroy_workqueue(s->workqueue);
s->workqueue = NULL;
return -EBUSY;
}
if (s->loopback) {
u16 tx, rx;
tx = 0x4001;
max3100_sr(s, tx, &rx);
}
if (s->max3100_hw_suspend)
s->max3100_hw_suspend(0);
s->conf_commit = 1;
max3100_dowork(s);
/* wait for clock to settle */
msleep(50);
max3100_enable_ms(&s->port);
return 0;
}
static const char *max3100_type(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL;
}
static void max3100_release_port(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
}
static void max3100_config_port(struct uart_port *port, int flags)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
if (flags & UART_CONFIG_TYPE)
s->port.type = PORT_MAX3100;
}
static int max3100_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
int ret = -EINVAL;
dev_dbg(&s->spi->dev, "%s\n", __func__);
if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100)
ret = 0;
return ret;
}
static void max3100_stop_tx(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
}
static int max3100_request_port(struct uart_port *port)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
return 0;
}
static void max3100_break_ctl(struct uart_port *port, int break_state)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
port);
dev_dbg(&s->spi->dev, "%s\n", __func__);
}
static const struct uart_ops max3100_ops = {
.tx_empty = max3100_tx_empty,
.set_mctrl = max3100_set_mctrl,
.get_mctrl = max3100_get_mctrl,
.stop_tx = max3100_stop_tx,
.start_tx = max3100_start_tx,
.stop_rx = max3100_stop_rx,
.enable_ms = max3100_enable_ms,
.break_ctl = max3100_break_ctl,
.startup = max3100_startup,
.shutdown = max3100_shutdown,
.set_termios = max3100_set_termios,
.type = max3100_type,
.release_port = max3100_release_port,
.request_port = max3100_request_port,
.config_port = max3100_config_port,
.verify_port = max3100_verify_port,
};
static struct uart_driver max3100_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "ttyMAX",
.dev_name = "ttyMAX",
.major = MAX3100_MAJOR,
.minor = MAX3100_MINOR,
.nr = MAX_MAX3100,
};
static int uart_driver_registered;
static int max3100_probe(struct spi_device *spi)
{
int i, retval;
struct plat_max3100 *pdata;
u16 tx, rx;
mutex_lock(&max3100s_lock);
if (!uart_driver_registered) {
uart_driver_registered = 1;
retval = uart_register_driver(&max3100_uart_driver);
if (retval) {
printk(KERN_ERR "Couldn't register max3100 uart driver\n");
mutex_unlock(&max3100s_lock);
return retval;
}
}
for (i = 0; i < MAX_MAX3100; i++)
if (!max3100s[i])
break;
if (i == MAX_MAX3100) {
dev_warn(&spi->dev, "too many MAX3100 chips\n");
mutex_unlock(&max3100s_lock);
return -ENOMEM;
}
max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL);
if (!max3100s[i]) {
dev_warn(&spi->dev,
"kmalloc for max3100 structure %d failed!\n", i);
mutex_unlock(&max3100s_lock);
return -ENOMEM;
}
max3100s[i]->spi = spi;
max3100s[i]->irq = spi->irq;
spin_lock_init(&max3100s[i]->conf_lock);
spi_set_drvdata(spi, max3100s[i]);
pdata = dev_get_platdata(&spi->dev);
max3100s[i]->crystal = pdata->crystal;
max3100s[i]->loopback = pdata->loopback;
max3100s[i]->poll_time = msecs_to_jiffies(pdata->poll_time);
if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200;
max3100s[i]->port.fifosize = 16;
max3100s[i]->port.ops = &max3100_ops;
max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
max3100s[i]->port.line = i;
max3100s[i]->port.type = PORT_MAX3100;
max3100s[i]->port.dev = &spi->dev;
retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port);
if (retval < 0)
dev_warn(&spi->dev,
"uart_add_one_port failed for line %d with error %d\n",
i, retval);
/* set shutdown mode to save power. Will be woken-up on open */
if (max3100s[i]->max3100_hw_suspend)
max3100s[i]->max3100_hw_suspend(1);
else {
tx = MAX3100_WC | MAX3100_SHDN;
max3100_sr(max3100s[i], tx, &rx);
}
mutex_unlock(&max3100s_lock);
return 0;
}
static void max3100_remove(struct spi_device *spi)
{
struct max3100_port *s = spi_get_drvdata(spi);
int i;
mutex_lock(&max3100s_lock);
/* find out the index for the chip we are removing */
for (i = 0; i < MAX_MAX3100; i++)
if (max3100s[i] == s) {
dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i);
uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port);
kfree(max3100s[i]);
max3100s[i] = NULL;
break;
}
WARN_ON(i == MAX_MAX3100);
/* check if this is the last chip we have */
for (i = 0; i < MAX_MAX3100; i++)
if (max3100s[i]) {
mutex_unlock(&max3100s_lock);
return;
}
pr_debug("removing max3100 driver\n");
uart_unregister_driver(&max3100_uart_driver);
mutex_unlock(&max3100s_lock);
}
#ifdef CONFIG_PM_SLEEP
static int max3100_suspend(struct device *dev)
{
struct max3100_port *s = dev_get_drvdata(dev);
dev_dbg(&s->spi->dev, "%s\n", __func__);
disable_irq(s->irq);
s->suspending = 1;
uart_suspend_port(&max3100_uart_driver, &s->port);
if (s->max3100_hw_suspend)
s->max3100_hw_suspend(1);
else {
/* no HW suspend, so do SW one */
u16 tx, rx;
tx = MAX3100_WC | MAX3100_SHDN;
max3100_sr(s, tx, &rx);
}
return 0;
}
static int max3100_resume(struct device *dev)
{
struct max3100_port *s = dev_get_drvdata(dev);
dev_dbg(&s->spi->dev, "%s\n", __func__);
if (s->max3100_hw_suspend)
s->max3100_hw_suspend(0);
uart_resume_port(&max3100_uart_driver, &s->port);
s->suspending = 0;
enable_irq(s->irq);
s->conf_commit = 1;
if (s->workqueue)
max3100_dowork(s);
return 0;
}
static SIMPLE_DEV_PM_OPS(max3100_pm_ops, max3100_suspend, max3100_resume);
#define MAX3100_PM_OPS (&max3100_pm_ops)
#else
#define MAX3100_PM_OPS NULL
#endif
static struct spi_driver max3100_driver = {
.driver = {
.name = "max3100",
.pm = MAX3100_PM_OPS,
},
.probe = max3100_probe,
.remove = max3100_remove,
};
module_spi_driver(max3100_driver);
MODULE_DESCRIPTION("MAX3100 driver");
MODULE_AUTHOR("Christian Pellegrin <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:max3100");
| linux-master | drivers/tty/serial/max3100.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* st-asc.c: ST Asynchronous serial controller (ASC) driver
*
* Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
*/
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/serial_core.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#define DRIVER_NAME "st-asc"
#define ASC_SERIAL_NAME "ttyAS"
#define ASC_FIFO_SIZE 16
#define ASC_MAX_PORTS 8
/* Pinctrl states */
#define DEFAULT 0
#define NO_HW_FLOWCTRL 1
struct asc_port {
struct uart_port port;
struct gpio_desc *rts;
struct clk *clk;
struct pinctrl *pinctrl;
struct pinctrl_state *states[2];
unsigned int hw_flow_control:1;
unsigned int force_m1:1;
};
static struct asc_port asc_ports[ASC_MAX_PORTS];
static struct uart_driver asc_uart_driver;
/*---- UART Register definitions ------------------------------*/
/* Register offsets */
#define ASC_BAUDRATE 0x00
#define ASC_TXBUF 0x04
#define ASC_RXBUF 0x08
#define ASC_CTL 0x0C
#define ASC_INTEN 0x10
#define ASC_STA 0x14
#define ASC_GUARDTIME 0x18
#define ASC_TIMEOUT 0x1C
#define ASC_TXRESET 0x20
#define ASC_RXRESET 0x24
#define ASC_RETRIES 0x28
/* ASC_RXBUF */
#define ASC_RXBUF_PE 0x100
#define ASC_RXBUF_FE 0x200
/*
* Some of status comes from higher bits of the character and some come from
* the status register. Combining both of them in to single status using dummy
* bits.
*/
#define ASC_RXBUF_DUMMY_RX 0x10000
#define ASC_RXBUF_DUMMY_BE 0x20000
#define ASC_RXBUF_DUMMY_OE 0x40000
/* ASC_CTL */
#define ASC_CTL_MODE_MSK 0x0007
#define ASC_CTL_MODE_8BIT 0x0001
#define ASC_CTL_MODE_7BIT_PAR 0x0003
#define ASC_CTL_MODE_9BIT 0x0004
#define ASC_CTL_MODE_8BIT_WKUP 0x0005
#define ASC_CTL_MODE_8BIT_PAR 0x0007
#define ASC_CTL_STOP_MSK 0x0018
#define ASC_CTL_STOP_HALFBIT 0x0000
#define ASC_CTL_STOP_1BIT 0x0008
#define ASC_CTL_STOP_1_HALFBIT 0x0010
#define ASC_CTL_STOP_2BIT 0x0018
#define ASC_CTL_PARITYODD 0x0020
#define ASC_CTL_LOOPBACK 0x0040
#define ASC_CTL_RUN 0x0080
#define ASC_CTL_RXENABLE 0x0100
#define ASC_CTL_SCENABLE 0x0200
#define ASC_CTL_FIFOENABLE 0x0400
#define ASC_CTL_CTSENABLE 0x0800
#define ASC_CTL_BAUDMODE 0x1000
/* ASC_GUARDTIME */
#define ASC_GUARDTIME_MSK 0x00FF
/* ASC_INTEN */
#define ASC_INTEN_RBE 0x0001
#define ASC_INTEN_TE 0x0002
#define ASC_INTEN_THE 0x0004
#define ASC_INTEN_PE 0x0008
#define ASC_INTEN_FE 0x0010
#define ASC_INTEN_OE 0x0020
#define ASC_INTEN_TNE 0x0040
#define ASC_INTEN_TOI 0x0080
#define ASC_INTEN_RHF 0x0100
/* ASC_RETRIES */
#define ASC_RETRIES_MSK 0x00FF
/* ASC_RXBUF */
#define ASC_RXBUF_MSK 0x03FF
/* ASC_STA */
#define ASC_STA_RBF 0x0001
#define ASC_STA_TE 0x0002
#define ASC_STA_THE 0x0004
#define ASC_STA_PE 0x0008
#define ASC_STA_FE 0x0010
#define ASC_STA_OE 0x0020
#define ASC_STA_TNE 0x0040
#define ASC_STA_TOI 0x0080
#define ASC_STA_RHF 0x0100
#define ASC_STA_TF 0x0200
#define ASC_STA_NKD 0x0400
/* ASC_TIMEOUT */
#define ASC_TIMEOUT_MSK 0x00FF
/* ASC_TXBUF */
#define ASC_TXBUF_MSK 0x01FF
/*---- Inline function definitions ---------------------------*/
static inline struct asc_port *to_asc_port(struct uart_port *port)
{
return container_of(port, struct asc_port, port);
}
static inline u32 asc_in(struct uart_port *port, u32 offset)
{
#ifdef readl_relaxed
return readl_relaxed(port->membase + offset);
#else
return readl(port->membase + offset);
#endif
}
static inline void asc_out(struct uart_port *port, u32 offset, u32 value)
{
#ifdef writel_relaxed
writel_relaxed(value, port->membase + offset);
#else
writel(value, port->membase + offset);
#endif
}
/*
* Some simple utility functions to enable and disable interrupts.
* Note that these need to be called with interrupts disabled.
*/
static inline void asc_disable_tx_interrupts(struct uart_port *port)
{
u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_THE;
asc_out(port, ASC_INTEN, intenable);
(void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
}
static inline void asc_enable_tx_interrupts(struct uart_port *port)
{
u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_THE;
asc_out(port, ASC_INTEN, intenable);
}
static inline void asc_disable_rx_interrupts(struct uart_port *port)
{
u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_RBE;
asc_out(port, ASC_INTEN, intenable);
(void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
}
static inline void asc_enable_rx_interrupts(struct uart_port *port)
{
u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_RBE;
asc_out(port, ASC_INTEN, intenable);
}
static inline u32 asc_txfifo_is_empty(struct uart_port *port)
{
return asc_in(port, ASC_STA) & ASC_STA_TE;
}
static inline u32 asc_txfifo_is_half_empty(struct uart_port *port)
{
return asc_in(port, ASC_STA) & ASC_STA_THE;
}
static inline const char *asc_port_name(struct uart_port *port)
{
return to_platform_device(port->dev)->name;
}
/*----------------------------------------------------------------------*/
/*
* This section contains code to support the use of the ASC as a
* generic serial port.
*/
static inline unsigned asc_hw_txroom(struct uart_port *port)
{
u32 status = asc_in(port, ASC_STA);
if (status & ASC_STA_THE)
return port->fifosize / 2;
else if (!(status & ASC_STA_TF))
return 1;
return 0;
}
/*
* Start transmitting chars.
* This is called from both interrupt and task level.
* Either way interrupts are disabled.
*/
static void asc_transmit_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx_limited(port, ch, asc_hw_txroom(port),
true,
asc_out(port, ASC_TXBUF, ch),
({}));
}
static void asc_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
unsigned long status, mode;
unsigned long c = 0;
u8 flag;
bool ignore_pe = false;
/*
* Datasheet states: If the MODE field selects an 8-bit frame then
* this [parity error] bit is undefined. Software should ignore this
* bit when reading 8-bit frames.
*/
mode = asc_in(port, ASC_CTL) & ASC_CTL_MODE_MSK;
if (mode == ASC_CTL_MODE_8BIT || mode == ASC_CTL_MODE_8BIT_PAR)
ignore_pe = true;
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
while ((status = asc_in(port, ASC_STA)) & ASC_STA_RBF) {
c = asc_in(port, ASC_RXBUF) | ASC_RXBUF_DUMMY_RX;
flag = TTY_NORMAL;
port->icount.rx++;
if (status & ASC_STA_OE || c & ASC_RXBUF_FE ||
(c & ASC_RXBUF_PE && !ignore_pe)) {
if (c & ASC_RXBUF_FE) {
if (c == (ASC_RXBUF_FE | ASC_RXBUF_DUMMY_RX)) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
c |= ASC_RXBUF_DUMMY_BE;
} else {
port->icount.frame++;
}
} else if (c & ASC_RXBUF_PE) {
port->icount.parity++;
}
/*
* Reading any data from the RX FIFO clears the
* overflow error condition.
*/
if (status & ASC_STA_OE) {
port->icount.overrun++;
c |= ASC_RXBUF_DUMMY_OE;
}
c &= port->read_status_mask;
if (c & ASC_RXBUF_DUMMY_BE)
flag = TTY_BREAK;
else if (c & ASC_RXBUF_PE)
flag = TTY_PARITY;
else if (c & ASC_RXBUF_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c & 0xff))
continue;
uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag);
}
/* Tell the rest of the system the news. New characters! */
tty_flip_buffer_push(tport);
}
static irqreturn_t asc_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
u32 status;
spin_lock(&port->lock);
status = asc_in(port, ASC_STA);
if (status & ASC_STA_RBF) {
/* Receive FIFO not empty */
asc_receive_chars(port);
}
if ((status & ASC_STA_THE) &&
(asc_in(port, ASC_INTEN) & ASC_INTEN_THE)) {
/* Transmitter FIFO at least half empty */
asc_transmit_chars(port);
}
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
/*
* UART Functions
*/
static unsigned int asc_tx_empty(struct uart_port *port)
{
return asc_txfifo_is_empty(port) ? TIOCSER_TEMT : 0;
}
static void asc_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct asc_port *ascport = to_asc_port(port);
/*
* This routine is used for seting signals of: DTR, DCD, CTS and RTS.
* We use ASC's hardware for CTS/RTS when hardware flow-control is
* enabled, however if the RTS line is required for another purpose,
* commonly controlled using HUP from userspace, then we need to toggle
* it manually, using GPIO.
*
* Some boards also have DTR and DCD implemented using PIO pins, code to
* do this should be hooked in here.
*/
if (!ascport->rts)
return;
/* If HW flow-control is enabled, we can't fiddle with the RTS line */
if (asc_in(port, ASC_CTL) & ASC_CTL_CTSENABLE)
return;
gpiod_set_value(ascport->rts, mctrl & TIOCM_RTS);
}
static unsigned int asc_get_mctrl(struct uart_port *port)
{
/*
* This routine is used for geting signals of: DTR, DCD, DSR, RI,
* and CTS/RTS
*/
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
/* There are probably characters waiting to be transmitted. */
static void asc_start_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
if (!uart_circ_empty(xmit))
asc_enable_tx_interrupts(port);
}
/* Transmit stop */
static void asc_stop_tx(struct uart_port *port)
{
asc_disable_tx_interrupts(port);
}
/* Receive stop */
static void asc_stop_rx(struct uart_port *port)
{
asc_disable_rx_interrupts(port);
}
/* Handle breaks - ignored by us */
static void asc_break_ctl(struct uart_port *port, int break_state)
{
/* Nothing here yet .. */
}
/*
* Enable port for reception.
*/
static int asc_startup(struct uart_port *port)
{
if (request_irq(port->irq, asc_interrupt, 0,
asc_port_name(port), port)) {
dev_err(port->dev, "cannot allocate irq.\n");
return -ENODEV;
}
asc_transmit_chars(port);
asc_enable_rx_interrupts(port);
return 0;
}
static void asc_shutdown(struct uart_port *port)
{
asc_disable_tx_interrupts(port);
asc_disable_rx_interrupts(port);
free_irq(port->irq, port);
}
static void asc_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct asc_port *ascport = to_asc_port(port);
unsigned long flags;
u32 ctl;
switch (state) {
case UART_PM_STATE_ON:
clk_prepare_enable(ascport->clk);
break;
case UART_PM_STATE_OFF:
/*
* Disable the ASC baud rate generator, which is as close as
* we can come to turning it off. Note this is not called with
* the port spinlock held.
*/
spin_lock_irqsave(&port->lock, flags);
ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
asc_out(port, ASC_CTL, ctl);
spin_unlock_irqrestore(&port->lock, flags);
clk_disable_unprepare(ascport->clk);
break;
}
}
static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
struct gpio_desc *gpiod;
unsigned int baud;
u32 ctrl_val;
tcflag_t cflag;
unsigned long flags;
/* Update termios to reflect hardware capabilities */
termios->c_cflag &= ~(CMSPAR |
(ascport->hw_flow_control ? 0 : CRTSCTS));
port->uartclk = clk_get_rate(ascport->clk);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
cflag = termios->c_cflag;
spin_lock_irqsave(&port->lock, flags);
/* read control register */
ctrl_val = asc_in(port, ASC_CTL);
/* stop serial port and reset value */
asc_out(port, ASC_CTL, (ctrl_val & ~ASC_CTL_RUN));
ctrl_val = ASC_CTL_RXENABLE | ASC_CTL_FIFOENABLE;
/* reset fifo rx & tx */
asc_out(port, ASC_TXRESET, 1);
asc_out(port, ASC_RXRESET, 1);
/* set character length */
if ((cflag & CSIZE) == CS7) {
ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
cflag |= PARENB;
} else {
ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
ASC_CTL_MODE_8BIT;
cflag &= ~CSIZE;
cflag |= CS8;
}
termios->c_cflag = cflag;
/* set stop bit */
ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
/* odd parity */
if (cflag & PARODD)
ctrl_val |= ASC_CTL_PARITYODD;
/* hardware flow control */
if ((cflag & CRTSCTS)) {
ctrl_val |= ASC_CTL_CTSENABLE;
/* If flow-control selected, stop handling RTS manually */
if (ascport->rts) {
devm_gpiod_put(port->dev, ascport->rts);
ascport->rts = NULL;
pinctrl_select_state(ascport->pinctrl,
ascport->states[DEFAULT]);
}
} else {
/* If flow-control disabled, it's safe to handle RTS manually */
if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL]) {
pinctrl_select_state(ascport->pinctrl,
ascport->states[NO_HW_FLOWCTRL]);
gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
if (!IS_ERR(gpiod)) {
gpiod_set_consumer_name(gpiod,
port->dev->of_node->name);
ascport->rts = gpiod;
}
}
}
if ((baud < 19200) && !ascport->force_m1) {
asc_out(port, ASC_BAUDRATE, (port->uartclk / (16 * baud)));
} else {
/*
* MODE 1: recommended for high bit rates (above 19.2K)
*
* baudrate * 16 * 2^16
* ASCBaudRate = ------------------------
* inputclock
*
* To keep maths inside 64bits, we divide inputclock by 16.
*/
u64 dividend = (u64)baud * (1 << 16);
do_div(dividend, port->uartclk / 16);
asc_out(port, ASC_BAUDRATE, dividend);
ctrl_val |= ASC_CTL_BAUDMODE;
}
uart_update_timeout(port, cflag, baud);
ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
if (termios->c_iflag & INPCK)
ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
/*
* Characters to ignore
*/
ascport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
ascport->port.ignore_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
if (termios->c_iflag & IGNBRK) {
ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_BE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_OE;
}
/*
* Ignore all characters if CREAD is not set.
*/
if (!(termios->c_cflag & CREAD))
ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_RX;
/* Set the timeout */
asc_out(port, ASC_TIMEOUT, 20);
/* write final value and enable port */
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *asc_type(struct uart_port *port)
{
return (port->type == PORT_ASC) ? DRIVER_NAME : NULL;
}
static void asc_release_port(struct uart_port *port)
{
}
static int asc_request_port(struct uart_port *port)
{
return 0;
}
/*
* Called when the port is opened, and UPF_BOOT_AUTOCONF flag is set
* Set type field if successful
*/
static void asc_config_port(struct uart_port *port, int flags)
{
if ((flags & UART_CONFIG_TYPE))
port->type = PORT_ASC;
}
static int
asc_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* No user changeable parameters */
return -EINVAL;
}
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context (i.e. kgdb).
*/
static int asc_get_poll_char(struct uart_port *port)
{
if (!(asc_in(port, ASC_STA) & ASC_STA_RBF))
return NO_POLL_CHAR;
return asc_in(port, ASC_RXBUF);
}
static void asc_put_poll_char(struct uart_port *port, unsigned char c)
{
while (!asc_txfifo_is_half_empty(port))
cpu_relax();
asc_out(port, ASC_TXBUF, c);
}
#endif /* CONFIG_CONSOLE_POLL */
/*---------------------------------------------------------------------*/
static const struct uart_ops asc_uart_ops = {
.tx_empty = asc_tx_empty,
.set_mctrl = asc_set_mctrl,
.get_mctrl = asc_get_mctrl,
.start_tx = asc_start_tx,
.stop_tx = asc_stop_tx,
.stop_rx = asc_stop_rx,
.break_ctl = asc_break_ctl,
.startup = asc_startup,
.shutdown = asc_shutdown,
.set_termios = asc_set_termios,
.type = asc_type,
.release_port = asc_release_port,
.request_port = asc_request_port,
.config_port = asc_config_port,
.verify_port = asc_verify_port,
.pm = asc_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = asc_get_poll_char,
.poll_put_char = asc_put_poll_char,
#endif /* CONFIG_CONSOLE_POLL */
};
static int asc_init_port(struct asc_port *ascport,
struct platform_device *pdev)
{
struct uart_port *port = &ascport->port;
struct resource *res;
int ret;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &asc_uart_ops;
port->fifosize = ASC_FIFO_SIZE;
port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE);
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
port->mapbase = res->start;
spin_lock_init(&port->lock);
ascport->clk = devm_clk_get(&pdev->dev, NULL);
if (WARN_ON(IS_ERR(ascport->clk)))
return -EINVAL;
/* ensure that clk rate is correct by enabling the clk */
ret = clk_prepare_enable(ascport->clk);
if (ret)
return ret;
ascport->port.uartclk = clk_get_rate(ascport->clk);
WARN_ON(ascport->port.uartclk == 0);
clk_disable_unprepare(ascport->clk);
ascport->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(ascport->pinctrl)) {
ret = PTR_ERR(ascport->pinctrl);
dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret);
return ret;
}
ascport->states[DEFAULT] =
pinctrl_lookup_state(ascport->pinctrl, "default");
if (IS_ERR(ascport->states[DEFAULT])) {
ret = PTR_ERR(ascport->states[DEFAULT]);
dev_err(&pdev->dev,
"Failed to look up Pinctrl state 'default': %d\n", ret);
return ret;
}
/* "no-hw-flowctrl" state is optional */
ascport->states[NO_HW_FLOWCTRL] =
pinctrl_lookup_state(ascport->pinctrl, "no-hw-flowctrl");
if (IS_ERR(ascport->states[NO_HW_FLOWCTRL]))
ascport->states[NO_HW_FLOWCTRL] = NULL;
return 0;
}
static struct asc_port *asc_of_get_asc_port(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int id;
if (!np)
return NULL;
id = of_alias_get_id(np, "serial");
if (id < 0)
id = of_alias_get_id(np, ASC_SERIAL_NAME);
if (id < 0)
id = 0;
if (WARN_ON(id >= ASC_MAX_PORTS))
return NULL;
asc_ports[id].hw_flow_control = of_property_read_bool(np,
"uart-has-rtscts");
asc_ports[id].force_m1 = of_property_read_bool(np, "st,force-m1");
asc_ports[id].port.line = id;
asc_ports[id].rts = NULL;
return &asc_ports[id];
}
#ifdef CONFIG_OF
static const struct of_device_id asc_match[] = {
{ .compatible = "st,asc", },
{},
};
MODULE_DEVICE_TABLE(of, asc_match);
#endif
static int asc_serial_probe(struct platform_device *pdev)
{
int ret;
struct asc_port *ascport;
ascport = asc_of_get_asc_port(pdev);
if (!ascport)
return -ENODEV;
ret = asc_init_port(ascport, pdev);
if (ret)
return ret;
ret = uart_add_one_port(&asc_uart_driver, &ascport->port);
if (ret)
return ret;
platform_set_drvdata(pdev, &ascport->port);
return 0;
}
static int asc_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&asc_uart_driver, port);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int asc_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
return uart_suspend_port(&asc_uart_driver, port);
}
static int asc_serial_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
return uart_resume_port(&asc_uart_driver, port);
}
#endif /* CONFIG_PM_SLEEP */
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SERIAL_ST_ASC_CONSOLE
static void asc_console_putchar(struct uart_port *port, unsigned char ch)
{
unsigned int timeout = 1000000;
/* Wait for upto 1 second in case flow control is stopping us. */
while (--timeout && !asc_txfifo_is_half_empty(port))
udelay(1);
asc_out(port, ASC_TXBUF, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*/
static void asc_console_write(struct console *co, const char *s, unsigned count)
{
struct uart_port *port = &asc_ports[co->index].port;
unsigned long flags;
unsigned long timeout = 1000000;
int locked = 1;
u32 intenable;
if (port->sysrq)
locked = 0; /* asc_interrupt has already claimed the lock */
else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
/*
* Disable interrupts so we don't get the IRQ line bouncing
* up and down while interrupts are disabled.
*/
intenable = asc_in(port, ASC_INTEN);
asc_out(port, ASC_INTEN, 0);
(void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
uart_console_write(port, s, count, asc_console_putchar);
while (--timeout && !asc_txfifo_is_empty(port))
udelay(1);
asc_out(port, ASC_INTEN, intenable);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static int asc_console_setup(struct console *co, char *options)
{
struct asc_port *ascport;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index >= ASC_MAX_PORTS)
return -ENODEV;
ascport = &asc_ports[co->index];
/*
* This driver does not support early console initialization
* (use ARM early printk support instead), so we only expect
* this to be called during the uart port registration when the
* driver gets probed and the port should be mapped at that point.
*/
if (ascport->port.mapbase == 0 || ascport->port.membase == NULL)
return -ENXIO;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&ascport->port, co, baud, parity, bits, flow);
}
static struct console asc_console = {
.name = ASC_SERIAL_NAME,
.device = uart_console_device,
.write = asc_console_write,
.setup = asc_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &asc_uart_driver,
};
#define ASC_SERIAL_CONSOLE (&asc_console)
#else
#define ASC_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_ST_ASC_CONSOLE */
static struct uart_driver asc_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = ASC_SERIAL_NAME,
.major = 0,
.minor = 0,
.nr = ASC_MAX_PORTS,
.cons = ASC_SERIAL_CONSOLE,
};
static const struct dev_pm_ops asc_serial_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume)
};
static struct platform_driver asc_serial_driver = {
.probe = asc_serial_probe,
.remove = asc_serial_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &asc_serial_pm_ops,
.of_match_table = of_match_ptr(asc_match),
},
};
static int __init asc_init(void)
{
int ret;
static const char banner[] __initconst =
KERN_INFO "STMicroelectronics ASC driver initialized\n";
printk(banner);
ret = uart_register_driver(&asc_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&asc_serial_driver);
if (ret)
uart_unregister_driver(&asc_uart_driver);
return ret;
}
static void __exit asc_exit(void)
{
platform_driver_unregister(&asc_serial_driver);
uart_unregister_driver(&asc_uart_driver);
}
module_init(asc_init);
module_exit(asc_exit);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("STMicroelectronics (R&D) Limited");
MODULE_DESCRIPTION("STMicroelectronics ASC serial port driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/st-asc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Alexey Charkov <[email protected]>
*
* Based on msm_serial.c, which is:
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <[email protected]>
*/
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/err.h>
/*
* UART Register offsets
*/
#define VT8500_URTDR 0x0000 /* Transmit data */
#define VT8500_URRDR 0x0004 /* Receive data */
#define VT8500_URDIV 0x0008 /* Clock/Baud rate divisor */
#define VT8500_URLCR 0x000C /* Line control */
#define VT8500_URICR 0x0010 /* IrDA control */
#define VT8500_URIER 0x0014 /* Interrupt enable */
#define VT8500_URISR 0x0018 /* Interrupt status */
#define VT8500_URUSR 0x001c /* UART status */
#define VT8500_URFCR 0x0020 /* FIFO control */
#define VT8500_URFIDX 0x0024 /* FIFO index */
#define VT8500_URBKR 0x0028 /* Break signal count */
#define VT8500_URTOD 0x002c /* Time out divisor */
#define VT8500_TXFIFO 0x1000 /* Transmit FIFO (16x8) */
#define VT8500_RXFIFO 0x1020 /* Receive FIFO (16x10) */
/*
* Interrupt enable and status bits
*/
#define TXDE (1 << 0) /* Tx Data empty */
#define RXDF (1 << 1) /* Rx Data full */
#define TXFAE (1 << 2) /* Tx FIFO almost empty */
#define TXFE (1 << 3) /* Tx FIFO empty */
#define RXFAF (1 << 4) /* Rx FIFO almost full */
#define RXFF (1 << 5) /* Rx FIFO full */
#define TXUDR (1 << 6) /* Tx underrun */
#define RXOVER (1 << 7) /* Rx overrun */
#define PER (1 << 8) /* Parity error */
#define FER (1 << 9) /* Frame error */
#define TCTS (1 << 10) /* Toggle of CTS */
#define RXTOUT (1 << 11) /* Rx timeout */
#define BKDONE (1 << 12) /* Break signal done */
#define ERR (1 << 13) /* AHB error response */
#define RX_FIFO_INTS (RXFAF | RXFF | RXOVER | PER | FER | RXTOUT)
#define TX_FIFO_INTS (TXFAE | TXFE | TXUDR)
/*
* Line control bits
*/
#define VT8500_TXEN (1 << 0) /* Enable transmit logic */
#define VT8500_RXEN (1 << 1) /* Enable receive logic */
#define VT8500_CS8 (1 << 2) /* 8-bit data length (vs. 7-bit) */
#define VT8500_CSTOPB (1 << 3) /* 2 stop bits (vs. 1) */
#define VT8500_PARENB (1 << 4) /* Enable parity */
#define VT8500_PARODD (1 << 5) /* Odd parity (vs. even) */
#define VT8500_RTS (1 << 6) /* Ready to send */
#define VT8500_LOOPBK (1 << 7) /* Enable internal loopback */
#define VT8500_DMA (1 << 8) /* Enable DMA mode (needs FIFO) */
#define VT8500_BREAK (1 << 9) /* Initiate break signal */
#define VT8500_PSLVERR (1 << 10) /* APB error upon empty RX FIFO read */
#define VT8500_SWRTSCTS (1 << 11) /* Software-controlled RTS/CTS */
/*
* Capability flags (driver-internal)
*/
#define VT8500_HAS_SWRTSCTS_SWITCH (1 << 1)
#define VT8500_RECOMMENDED_CLK 12000000
#define VT8500_OVERSAMPLING_DIVISOR 13
#define VT8500_MAX_PORTS 6
struct vt8500_port {
struct uart_port uart;
char name[16];
struct clk *clk;
unsigned int clk_predivisor;
unsigned int ier;
unsigned int vt8500_uart_flags;
};
/*
* we use this variable to keep track of which ports
* have been allocated as we can't use pdev->id in
* devicetree
*/
static DECLARE_BITMAP(vt8500_ports_in_use, VT8500_MAX_PORTS);
static inline void vt8500_write(struct uart_port *port, unsigned int val,
unsigned int off)
{
writel(val, port->membase + off);
}
static inline unsigned int vt8500_read(struct uart_port *port, unsigned int off)
{
return readl(port->membase + off);
}
static void vt8500_stop_tx(struct uart_port *port)
{
struct vt8500_port *vt8500_port = container_of(port,
struct vt8500_port,
uart);
vt8500_port->ier &= ~TX_FIFO_INTS;
vt8500_write(port, vt8500_port->ier, VT8500_URIER);
}
static void vt8500_stop_rx(struct uart_port *port)
{
struct vt8500_port *vt8500_port = container_of(port,
struct vt8500_port,
uart);
vt8500_port->ier &= ~RX_FIFO_INTS;
vt8500_write(port, vt8500_port->ier, VT8500_URIER);
}
static void vt8500_enable_ms(struct uart_port *port)
{
struct vt8500_port *vt8500_port = container_of(port,
struct vt8500_port,
uart);
vt8500_port->ier |= TCTS;
vt8500_write(port, vt8500_port->ier, VT8500_URIER);
}
static void handle_rx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
/*
* Handle overrun
*/
if ((vt8500_read(port, VT8500_URISR) & RXOVER)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
/* and now the main RX loop */
while (vt8500_read(port, VT8500_URFIDX) & 0x1f00) {
unsigned int c;
char flag = TTY_NORMAL;
c = readw(port->membase + VT8500_RXFIFO) & 0x3ff;
/* Mask conditions we're ignoring. */
c &= ~port->read_status_mask;
if (c & FER) {
port->icount.frame++;
flag = TTY_FRAME;
} else if (c & PER) {
port->icount.parity++;
flag = TTY_PARITY;
}
port->icount.rx++;
if (!uart_handle_sysrq_char(port, c))
tty_insert_flip_char(tport, c, flag);
}
tty_flip_buffer_push(tport);
}
static unsigned int vt8500_tx_empty(struct uart_port *port)
{
unsigned int idx = vt8500_read(port, VT8500_URFIDX) & 0x1f;
return idx < 16 ? TIOCSER_TEMT : 0;
}
static void handle_tx(struct uart_port *port)
{
u8 ch;
uart_port_tx(port, ch,
vt8500_tx_empty(port),
writeb(ch, port->membase + VT8500_TXFIFO));
}
static void vt8500_start_tx(struct uart_port *port)
{
struct vt8500_port *vt8500_port = container_of(port,
struct vt8500_port,
uart);
vt8500_port->ier &= ~TX_FIFO_INTS;
vt8500_write(port, vt8500_port->ier, VT8500_URIER);
handle_tx(port);
vt8500_port->ier |= TX_FIFO_INTS;
vt8500_write(port, vt8500_port->ier, VT8500_URIER);
}
static void handle_delta_cts(struct uart_port *port)
{
port->icount.cts++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
static irqreturn_t vt8500_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned long isr;
spin_lock(&port->lock);
isr = vt8500_read(port, VT8500_URISR);
/* Acknowledge active status bits */
vt8500_write(port, isr, VT8500_URISR);
if (isr & RX_FIFO_INTS)
handle_rx(port);
if (isr & TX_FIFO_INTS)
handle_tx(port);
if (isr & TCTS)
handle_delta_cts(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static unsigned int vt8500_get_mctrl(struct uart_port *port)
{
unsigned int usr;
usr = vt8500_read(port, VT8500_URUSR);
if (usr & (1 << 4))
return TIOCM_CTS;
else
return 0;
}
static void vt8500_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int lcr = vt8500_read(port, VT8500_URLCR);
if (mctrl & TIOCM_RTS)
lcr |= VT8500_RTS;
else
lcr &= ~VT8500_RTS;
vt8500_write(port, lcr, VT8500_URLCR);
}
static void vt8500_break_ctl(struct uart_port *port, int break_ctl)
{
if (break_ctl)
vt8500_write(port,
vt8500_read(port, VT8500_URLCR) | VT8500_BREAK,
VT8500_URLCR);
}
static int vt8500_set_baud_rate(struct uart_port *port, unsigned int baud)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
unsigned long div;
unsigned int loops = 1000;
div = ((vt8500_port->clk_predivisor - 1) & 0xf) << 16;
div |= (uart_get_divisor(port, baud) - 1) & 0x3ff;
/* Effective baud rate */
baud = port->uartclk / 16 / ((div & 0x3ff) + 1);
while ((vt8500_read(port, VT8500_URUSR) & (1 << 5)) && --loops)
cpu_relax();
vt8500_write(port, div, VT8500_URDIV);
/* Break signal timing depends on baud rate, update accordingly */
vt8500_write(port, mult_frac(baud, 4096, 1000000), VT8500_URBKR);
return baud;
}
static int vt8500_startup(struct uart_port *port)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
int ret;
snprintf(vt8500_port->name, sizeof(vt8500_port->name),
"vt8500_serial%d", port->line);
ret = request_irq(port->irq, vt8500_irq, IRQF_TRIGGER_HIGH,
vt8500_port->name, port);
if (unlikely(ret))
return ret;
vt8500_write(port, 0x03, VT8500_URLCR); /* enable TX & RX */
return 0;
}
static void vt8500_shutdown(struct uart_port *port)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
vt8500_port->ier = 0;
/* disable interrupts and FIFOs */
vt8500_write(&vt8500_port->uart, 0, VT8500_URIER);
vt8500_write(&vt8500_port->uart, 0x880, VT8500_URFCR);
free_irq(port->irq, port);
}
static void vt8500_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
unsigned long flags;
unsigned int baud, lcr;
unsigned int loops = 1000;
spin_lock_irqsave(&port->lock, flags);
/* calculate and set baud rate */
baud = uart_get_baud_rate(port, termios, old, 900, 921600);
baud = vt8500_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* calculate parity */
lcr = vt8500_read(&vt8500_port->uart, VT8500_URLCR);
lcr &= ~(VT8500_PARENB | VT8500_PARODD);
if (termios->c_cflag & PARENB) {
lcr |= VT8500_PARENB;
termios->c_cflag &= ~CMSPAR;
if (termios->c_cflag & PARODD)
lcr |= VT8500_PARODD;
}
/* calculate bits per char */
lcr &= ~VT8500_CS8;
switch (termios->c_cflag & CSIZE) {
case CS7:
break;
case CS8:
default:
lcr |= VT8500_CS8;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
break;
}
/* calculate stop bits */
lcr &= ~VT8500_CSTOPB;
if (termios->c_cflag & CSTOPB)
lcr |= VT8500_CSTOPB;
lcr &= ~VT8500_SWRTSCTS;
if (vt8500_port->vt8500_uart_flags & VT8500_HAS_SWRTSCTS_SWITCH)
lcr |= VT8500_SWRTSCTS;
/* set parity, bits per char, and stop bit */
vt8500_write(&vt8500_port->uart, lcr, VT8500_URLCR);
/* Configure status bits to ignore based on termio flags. */
port->read_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->read_status_mask = FER | PER;
uart_update_timeout(port, termios->c_cflag, baud);
/* Reset FIFOs */
vt8500_write(&vt8500_port->uart, 0x88c, VT8500_URFCR);
while ((vt8500_read(&vt8500_port->uart, VT8500_URFCR) & 0xc)
&& --loops)
cpu_relax();
/* Every possible FIFO-related interrupt */
vt8500_port->ier = RX_FIFO_INTS | TX_FIFO_INTS;
/*
* CTS flow control
*/
if (UART_ENABLE_MS(&vt8500_port->uart, termios->c_cflag))
vt8500_port->ier |= TCTS;
vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *vt8500_type(struct uart_port *port)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
return vt8500_port->name;
}
static void vt8500_release_port(struct uart_port *port)
{
}
static int vt8500_request_port(struct uart_port *port)
{
return 0;
}
static void vt8500_config_port(struct uart_port *port, int flags)
{
port->type = PORT_VT8500;
}
static int vt8500_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_VT8500))
return -EINVAL;
if (unlikely(port->irq != ser->irq))
return -EINVAL;
return 0;
}
static struct vt8500_port *vt8500_uart_ports[VT8500_MAX_PORTS];
static struct uart_driver vt8500_uart_driver;
#ifdef CONFIG_SERIAL_VT8500_CONSOLE
static void wait_for_xmitr(struct uart_port *port)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = vt8500_read(port, VT8500_URFIDX);
if (--tmout == 0)
break;
udelay(1);
} while (status & 0x10);
}
static void vt8500_console_putchar(struct uart_port *port, unsigned char c)
{
wait_for_xmitr(port);
writeb(c, port->membase + VT8500_TXFIFO);
}
static void vt8500_console_write(struct console *co, const char *s,
unsigned int count)
{
struct vt8500_port *vt8500_port = vt8500_uart_ports[co->index];
unsigned long ier;
BUG_ON(co->index < 0 || co->index >= vt8500_uart_driver.nr);
ier = vt8500_read(&vt8500_port->uart, VT8500_URIER);
vt8500_write(&vt8500_port->uart, VT8500_URIER, 0);
uart_console_write(&vt8500_port->uart, s, count,
vt8500_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and switch back to FIFO
*/
wait_for_xmitr(&vt8500_port->uart);
vt8500_write(&vt8500_port->uart, VT8500_URIER, ier);
}
static int __init vt8500_console_setup(struct console *co, char *options)
{
struct vt8500_port *vt8500_port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (unlikely(co->index >= vt8500_uart_driver.nr || co->index < 0))
return -ENXIO;
vt8500_port = vt8500_uart_ports[co->index];
if (!vt8500_port)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&vt8500_port->uart,
co, baud, parity, bits, flow);
}
static struct console vt8500_console = {
.name = "ttyWMT",
.write = vt8500_console_write,
.device = uart_console_device,
.setup = vt8500_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &vt8500_uart_driver,
};
#define VT8500_CONSOLE (&vt8500_console)
#else
#define VT8500_CONSOLE NULL
#endif
#ifdef CONFIG_CONSOLE_POLL
static int vt8500_get_poll_char(struct uart_port *port)
{
unsigned int status = vt8500_read(port, VT8500_URFIDX);
if (!(status & 0x1f00))
return NO_POLL_CHAR;
return vt8500_read(port, VT8500_RXFIFO) & 0xff;
}
static void vt8500_put_poll_char(struct uart_port *port, unsigned char c)
{
unsigned int status, tmout = 10000;
do {
status = vt8500_read(port, VT8500_URFIDX);
if (--tmout == 0)
break;
udelay(1);
} while (status & 0x10);
vt8500_write(port, c, VT8500_TXFIFO);
}
#endif
static const struct uart_ops vt8500_uart_pops = {
.tx_empty = vt8500_tx_empty,
.set_mctrl = vt8500_set_mctrl,
.get_mctrl = vt8500_get_mctrl,
.stop_tx = vt8500_stop_tx,
.start_tx = vt8500_start_tx,
.stop_rx = vt8500_stop_rx,
.enable_ms = vt8500_enable_ms,
.break_ctl = vt8500_break_ctl,
.startup = vt8500_startup,
.shutdown = vt8500_shutdown,
.set_termios = vt8500_set_termios,
.type = vt8500_type,
.release_port = vt8500_release_port,
.request_port = vt8500_request_port,
.config_port = vt8500_config_port,
.verify_port = vt8500_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = vt8500_get_poll_char,
.poll_put_char = vt8500_put_poll_char,
#endif
};
static struct uart_driver vt8500_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "vt8500_serial",
.dev_name = "ttyWMT",
.nr = 6,
.cons = VT8500_CONSOLE,
};
static unsigned int vt8500_flags; /* none required so far */
static unsigned int wm8880_flags = VT8500_HAS_SWRTSCTS_SWITCH;
static const struct of_device_id wmt_dt_ids[] = {
{ .compatible = "via,vt8500-uart", .data = &vt8500_flags},
{ .compatible = "wm,wm8880-uart", .data = &wm8880_flags},
{}
};
static int vt8500_serial_probe(struct platform_device *pdev)
{
struct vt8500_port *vt8500_port;
struct resource *mmres;
struct device_node *np = pdev->dev.of_node;
const unsigned int *flags;
int ret;
int port;
int irq;
flags = of_device_get_match_data(&pdev->dev);
if (!flags)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (np) {
port = of_alias_get_id(np, "serial");
if (port >= VT8500_MAX_PORTS)
port = -1;
} else {
port = -1;
}
if (port < 0) {
/* calculate the port id */
port = find_first_zero_bit(vt8500_ports_in_use,
VT8500_MAX_PORTS);
}
if (port >= VT8500_MAX_PORTS)
return -ENODEV;
/* reserve the port id */
if (test_and_set_bit(port, vt8500_ports_in_use)) {
/* port already in use - shouldn't really happen */
return -EBUSY;
}
vt8500_port = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_port),
GFP_KERNEL);
if (!vt8500_port)
return -ENOMEM;
vt8500_port->uart.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &mmres);
if (IS_ERR(vt8500_port->uart.membase))
return PTR_ERR(vt8500_port->uart.membase);
vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(vt8500_port->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return -EINVAL;
}
ret = clk_prepare_enable(vt8500_port->clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock\n");
return ret;
}
vt8500_port->vt8500_uart_flags = *flags;
vt8500_port->clk_predivisor = DIV_ROUND_CLOSEST(
clk_get_rate(vt8500_port->clk),
VT8500_RECOMMENDED_CLK
);
vt8500_port->uart.type = PORT_VT8500;
vt8500_port->uart.iotype = UPIO_MEM;
vt8500_port->uart.mapbase = mmres->start;
vt8500_port->uart.irq = irq;
vt8500_port->uart.fifosize = 16;
vt8500_port->uart.ops = &vt8500_uart_pops;
vt8500_port->uart.line = port;
vt8500_port->uart.dev = &pdev->dev;
vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
vt8500_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_VT8500_CONSOLE);
/* Serial core uses the magic "16" everywhere - adjust for it */
vt8500_port->uart.uartclk = 16 * clk_get_rate(vt8500_port->clk) /
vt8500_port->clk_predivisor /
VT8500_OVERSAMPLING_DIVISOR;
snprintf(vt8500_port->name, sizeof(vt8500_port->name),
"VT8500 UART%d", pdev->id);
vt8500_uart_ports[port] = vt8500_port;
uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart);
platform_set_drvdata(pdev, vt8500_port);
return 0;
}
static struct platform_driver vt8500_platform_driver = {
.probe = vt8500_serial_probe,
.driver = {
.name = "vt8500_serial",
.of_match_table = wmt_dt_ids,
.suppress_bind_attrs = true,
},
};
static int __init vt8500_serial_init(void)
{
int ret;
ret = uart_register_driver(&vt8500_uart_driver);
if (unlikely(ret))
return ret;
ret = platform_driver_register(&vt8500_platform_driver);
if (unlikely(ret))
uart_unregister_driver(&vt8500_uart_driver);
return ret;
}
device_initcall(vt8500_serial_init);
| linux-master | drivers/tty/serial/vt8500_serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARC On-Chip(fpga) UART Driver
*
* Copyright (C) 2010-2012 Synopsys, Inc. (www.synopsys.com)
*
* vineetg: July 10th 2012
* -Decoupled the driver from arch/arc
* +Using platform_get_resource() for irq/membase (thx to bfin_uart.c)
* +Using early_platform_xxx() for early console (thx to mach-shmobile/xxx)
*
* Vineetg: Aug 21st 2010
* -Is uart_tx_stopped() not done in tty write path as it has already been
* taken care of, in serial core
*
* Vineetg: Aug 18th 2010
* -New Serial Core based ARC UART driver
* -Derived largely from blackfin driver albiet with some major tweaks
*
* TODO:
* -check if sysreq works
*/
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
/*************************************
* ARC UART Hardware Specs
************************************/
#define ARC_UART_TX_FIFO_SIZE 1
/*
* UART Register set (this is not a Standards Compliant IP)
* Also each reg is Word aligned, but only 8 bits wide
*/
#define R_ID0 0
#define R_ID1 4
#define R_ID2 8
#define R_ID3 12
#define R_DATA 16
#define R_STS 20
#define R_BAUDL 24
#define R_BAUDH 28
/* Bits for UART Status Reg (R/W) */
#define RXIENB 0x04 /* Receive Interrupt Enable */
#define TXIENB 0x40 /* Transmit Interrupt Enable */
#define RXEMPTY 0x20 /* Receive FIFO Empty: No char receivede */
#define TXEMPTY 0x80 /* Transmit FIFO Empty, thus char can be written into */
#define RXFULL 0x08 /* Receive FIFO full */
#define RXFULL1 0x10 /* Receive FIFO has space for 1 char (tot space=4) */
#define RXFERR 0x01 /* Frame Error: Stop Bit not detected */
#define RXOERR 0x02 /* OverFlow Err: Char recv but RXFULL still set */
/* Uart bit fiddling helpers: lowest level */
#define RBASE(port, reg) (port->membase + reg)
#define UART_REG_SET(u, r, v) writeb((v), RBASE(u, r))
#define UART_REG_GET(u, r) readb(RBASE(u, r))
#define UART_REG_OR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) | (v))
#define UART_REG_CLR(u, r, v) UART_REG_SET(u, r, UART_REG_GET(u, r) & ~(v))
/* Uart bit fiddling helpers: API level */
#define UART_SET_DATA(uart, val) UART_REG_SET(uart, R_DATA, val)
#define UART_GET_DATA(uart) UART_REG_GET(uart, R_DATA)
#define UART_SET_BAUDH(uart, val) UART_REG_SET(uart, R_BAUDH, val)
#define UART_SET_BAUDL(uart, val) UART_REG_SET(uart, R_BAUDL, val)
#define UART_CLR_STATUS(uart, val) UART_REG_CLR(uart, R_STS, val)
#define UART_GET_STATUS(uart) UART_REG_GET(uart, R_STS)
#define UART_ALL_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB|TXIENB)
#define UART_RX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, RXIENB)
#define UART_TX_IRQ_DISABLE(uart) UART_REG_CLR(uart, R_STS, TXIENB)
#define UART_ALL_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB|TXIENB)
#define UART_RX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, RXIENB)
#define UART_TX_IRQ_ENABLE(uart) UART_REG_OR(uart, R_STS, TXIENB)
#define ARC_SERIAL_DEV_NAME "ttyARC"
struct arc_uart_port {
struct uart_port port;
unsigned long baud;
};
#define to_arc_port(uport) container_of(uport, struct arc_uart_port, port)
static struct arc_uart_port arc_uart_ports[CONFIG_SERIAL_ARC_NR_PORTS];
#ifdef CONFIG_SERIAL_ARC_CONSOLE
static struct console arc_console;
#endif
#define DRIVER_NAME "arc-uart"
static struct uart_driver arc_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = ARC_SERIAL_DEV_NAME,
.major = 0,
.minor = 0,
.nr = CONFIG_SERIAL_ARC_NR_PORTS,
#ifdef CONFIG_SERIAL_ARC_CONSOLE
.cons = &arc_console,
#endif
};
static void arc_serial_stop_rx(struct uart_port *port)
{
UART_RX_IRQ_DISABLE(port);
}
static void arc_serial_stop_tx(struct uart_port *port)
{
while (!(UART_GET_STATUS(port) & TXEMPTY))
cpu_relax();
UART_TX_IRQ_DISABLE(port);
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int arc_serial_tx_empty(struct uart_port *port)
{
unsigned int stat;
stat = UART_GET_STATUS(port);
if (stat & TXEMPTY)
return TIOCSER_TEMT;
return 0;
}
/*
* Driver internal routine, used by both tty(serial core) as well as tx-isr
* -Called under spinlock in either cases
* -also tty->flow.stopped has already been checked
* = by uart_start( ) before calling us
* = tx_ist checks that too before calling
*/
static void arc_serial_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
int sent = 0;
unsigned char ch;
if (unlikely(port->x_char)) {
UART_SET_DATA(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
sent = 1;
} else if (!uart_circ_empty(xmit)) {
ch = xmit->buf[xmit->tail];
uart_xmit_advance(port, 1);
while (!(UART_GET_STATUS(port) & TXEMPTY))
cpu_relax();
UART_SET_DATA(port, ch);
sent = 1;
}
/*
* If num chars in xmit buffer are too few, ask tty layer for more.
* By Hard ISR to schedule processing in software interrupt part
*/
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (sent)
UART_TX_IRQ_ENABLE(port);
}
/*
* port is locked and interrupts are disabled
* uart_start( ) calls us under the port spinlock irqsave
*/
static void arc_serial_start_tx(struct uart_port *port)
{
arc_serial_tx_chars(port);
}
static void arc_serial_rx_chars(struct uart_port *port, unsigned int status)
{
/*
* UART has 4 deep RX-FIFO. Driver's recongnition of this fact
* is very subtle. Here's how ...
* Upon getting a RX-Intr, such that RX-EMPTY=0, meaning data available,
* driver reads the DATA Reg and keeps doing that in a loop, until
* RX-EMPTY=1. Multiple chars being avail, with a single Interrupt,
* before RX-EMPTY=0, implies some sort of buffering going on in the
* controller, which is indeed the Rx-FIFO.
*/
do {
u8 ch, flg = TTY_NORMAL;
/*
* This could be an Rx Intr for err (no data),
* so check err and clear that Intr first
*/
if (status & RXOERR) {
port->icount.overrun++;
flg = TTY_OVERRUN;
UART_CLR_STATUS(port, RXOERR);
}
if (status & RXFERR) {
port->icount.frame++;
flg = TTY_FRAME;
UART_CLR_STATUS(port, RXFERR);
}
if (status & RXEMPTY)
continue;
ch = UART_GET_DATA(port);
port->icount.rx++;
if (!(uart_handle_sysrq_char(port, ch)))
uart_insert_char(port, status, RXOERR, ch, flg);
tty_flip_buffer_push(&port->state->port);
} while (!((status = UART_GET_STATUS(port)) & RXEMPTY));
}
/*
* A note on the Interrupt handling state machine of this driver
*
* kernel printk writes funnel thru the console driver framework and in order
* to keep things simple as well as efficient, it writes to UART in polled
* mode, in one shot, and exits.
*
* OTOH, Userland output (via tty layer), uses interrupt based writes as there
* can be undeterministic delay between char writes.
*
* Thus Rx-interrupts are always enabled, while tx-interrupts are by default
* disabled.
*
* When tty has some data to send out, serial core calls driver's start_tx
* which
* -checks-if-tty-buffer-has-char-to-send
* -writes-data-to-uart
* -enable-tx-intr
*
* Once data bits are pushed out, controller raises the Tx-room-avail-Interrupt.
* The first thing Tx ISR does is disable further Tx interrupts (as this could
* be the last char to send, before settling down into the quiet polled mode).
* It then calls the exact routine used by tty layer write to send out any
* more char in tty buffer. In case of sending, it re-enables Tx-intr. In case
* of no data, it remains disabled.
* This is how the transmit state machine is dynamically switched on/off
*/
static irqreturn_t arc_serial_isr(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status;
status = UART_GET_STATUS(port);
/*
* Single IRQ for both Rx (data available) Tx (room available) Interrupt
* notifications from the UART Controller.
* To demultiplex between the two, we check the relevant bits
*/
if (status & RXIENB) {
/* already in ISR, no need of xx_irqsave */
spin_lock(&port->lock);
arc_serial_rx_chars(port, status);
spin_unlock(&port->lock);
}
if ((status & TXIENB) && (status & TXEMPTY)) {
/* Unconditionally disable further Tx-Interrupts.
* will be enabled by tx_chars() if needed.
*/
UART_TX_IRQ_DISABLE(port);
spin_lock(&port->lock);
if (!uart_tx_stopped(port))
arc_serial_tx_chars(port);
spin_unlock(&port->lock);
}
return IRQ_HANDLED;
}
static unsigned int arc_serial_get_mctrl(struct uart_port *port)
{
/*
* Pretend we have a Modem status reg and following bits are
* always set, to satify the serial core state machine
* (DSR) Data Set Ready
* (CTS) Clear To Send
* (CAR) Carrier Detect
*/
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void arc_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
/* MCR not present */
}
static void arc_serial_break_ctl(struct uart_port *port, int break_state)
{
/* ARC UART doesn't support sending Break signal */
}
static int arc_serial_startup(struct uart_port *port)
{
/* Before we hook up the ISR, Disable all UART Interrupts */
UART_ALL_IRQ_DISABLE(port);
if (request_irq(port->irq, arc_serial_isr, 0, "arc uart rx-tx", port)) {
dev_warn(port->dev, "Unable to attach ARC UART intr\n");
return -EBUSY;
}
UART_RX_IRQ_ENABLE(port); /* Only Rx IRQ enabled to begin with */
return 0;
}
/* This is not really needed */
static void arc_serial_shutdown(struct uart_port *port)
{
free_irq(port->irq, port);
}
static void
arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
const struct ktermios *old)
{
struct arc_uart_port *uart = to_arc_port(port);
unsigned int baud, uartl, uarth, hw_val;
unsigned long flags;
/*
* Use the generic handler so that any specially encoded baud rates
* such as SPD_xx flags or "%B0" can be handled
* Max Baud I suppose will not be more than current 115K * 4
* Formula for ARC UART is: hw-val = ((CLK/(BAUD*4)) -1)
* spread over two 8-bit registers
*/
baud = uart_get_baud_rate(port, new, old, 0, 460800);
hw_val = port->uartclk / (uart->baud * 4) - 1;
uartl = hw_val & 0xFF;
uarth = (hw_val >> 8) & 0xFF;
spin_lock_irqsave(&port->lock, flags);
UART_ALL_IRQ_DISABLE(port);
UART_SET_BAUDL(port, uartl);
UART_SET_BAUDH(port, uarth);
UART_RX_IRQ_ENABLE(port);
/*
* UART doesn't support Parity/Hardware Flow Control;
* Only supports 8N1 character size
*/
new->c_cflag &= ~(CMSPAR|CRTSCTS|CSIZE);
new->c_cflag |= CS8;
if (old)
tty_termios_copy_hw(new, old);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
uart_update_timeout(port, new->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *arc_serial_type(struct uart_port *port)
{
return port->type == PORT_ARC ? DRIVER_NAME : NULL;
}
static void arc_serial_release_port(struct uart_port *port)
{
}
static int arc_serial_request_port(struct uart_port *port)
{
return 0;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int
arc_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (port->type != PORT_UNKNOWN && ser->type != PORT_ARC)
return -EINVAL;
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void arc_serial_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_ARC;
}
#ifdef CONFIG_CONSOLE_POLL
static void arc_serial_poll_putchar(struct uart_port *port, unsigned char chr)
{
while (!(UART_GET_STATUS(port) & TXEMPTY))
cpu_relax();
UART_SET_DATA(port, chr);
}
static int arc_serial_poll_getchar(struct uart_port *port)
{
unsigned char chr;
while (!(UART_GET_STATUS(port) & RXEMPTY))
cpu_relax();
chr = UART_GET_DATA(port);
return chr;
}
#endif
static const struct uart_ops arc_serial_pops = {
.tx_empty = arc_serial_tx_empty,
.set_mctrl = arc_serial_set_mctrl,
.get_mctrl = arc_serial_get_mctrl,
.stop_tx = arc_serial_stop_tx,
.start_tx = arc_serial_start_tx,
.stop_rx = arc_serial_stop_rx,
.break_ctl = arc_serial_break_ctl,
.startup = arc_serial_startup,
.shutdown = arc_serial_shutdown,
.set_termios = arc_serial_set_termios,
.type = arc_serial_type,
.release_port = arc_serial_release_port,
.request_port = arc_serial_request_port,
.config_port = arc_serial_config_port,
.verify_port = arc_serial_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = arc_serial_poll_putchar,
.poll_get_char = arc_serial_poll_getchar,
#endif
};
#ifdef CONFIG_SERIAL_ARC_CONSOLE
static int arc_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= CONFIG_SERIAL_ARC_NR_PORTS)
return -ENODEV;
/*
* The uart port backing the console (e.g. ttyARC1) might not have been
* init yet. If so, defer the console setup to after the port.
*/
port = &arc_uart_ports[co->index].port;
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
/*
* Serial core will call port->ops->set_termios( )
* which will set the baud reg
*/
return uart_set_options(port, co, baud, parity, bits, flow);
}
static void arc_serial_console_putchar(struct uart_port *port, unsigned char ch)
{
while (!(UART_GET_STATUS(port) & TXEMPTY))
cpu_relax();
UART_SET_DATA(port, (unsigned char)ch);
}
/*
* Interrupts are disabled on entering
*/
static void arc_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &arc_uart_ports[co->index].port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
uart_console_write(port, s, count, arc_serial_console_putchar);
spin_unlock_irqrestore(&port->lock, flags);
}
static struct console arc_console = {
.name = ARC_SERIAL_DEV_NAME,
.write = arc_serial_console_write,
.device = uart_console_device,
.setup = arc_serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &arc_uart_driver
};
static void arc_early_serial_write(struct console *con, const char *s,
unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, arc_serial_console_putchar);
}
static int __init arc_early_console_setup(struct earlycon_device *dev,
const char *opt)
{
struct uart_port *port = &dev->port;
unsigned int l, h, hw_val;
if (!dev->port.membase)
return -ENODEV;
hw_val = port->uartclk / (dev->baud * 4) - 1;
l = hw_val & 0xFF;
h = (hw_val >> 8) & 0xFF;
UART_SET_BAUDL(port, l);
UART_SET_BAUDH(port, h);
dev->con->write = arc_early_serial_write;
return 0;
}
OF_EARLYCON_DECLARE(arc_uart, "snps,arc-uart", arc_early_console_setup);
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
static int arc_serial_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct arc_uart_port *uart;
struct uart_port *port;
int dev_id;
u32 val;
/* no device tree device */
if (!np)
return -ENODEV;
dev_id = of_alias_get_id(np, "serial");
if (dev_id < 0)
dev_id = 0;
if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
return -EINVAL;
}
uart = &arc_uart_ports[dev_id];
port = &uart->port;
if (of_property_read_u32(np, "clock-frequency", &val)) {
dev_err(&pdev->dev, "clock-frequency property NOTset\n");
return -EINVAL;
}
port->uartclk = val;
if (of_property_read_u32(np, "current-speed", &val)) {
dev_err(&pdev->dev, "current-speed property NOT set\n");
return -EINVAL;
}
uart->baud = val;
port->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->membase)) {
/* No point of dev_err since UART itself is hosed here */
return PTR_ERR(port->membase);
}
port->irq = irq_of_parse_and_map(np, 0);
port->dev = &pdev->dev;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->line = dev_id;
port->ops = &arc_serial_pops;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ARC_CONSOLE);
port->fifosize = ARC_UART_TX_FIFO_SIZE;
/*
* uart_insert_char( ) uses it in decideding whether to ignore a
* char or not. Explicitly setting it here, removes the subtelty
*/
port->ignore_status_mask = 0;
return uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port);
}
static const struct of_device_id arc_uart_dt_ids[] = {
{ .compatible = "snps,arc-uart" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, arc_uart_dt_ids);
static struct platform_driver arc_platform_driver = {
.probe = arc_serial_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = arc_uart_dt_ids,
},
};
static int __init arc_serial_init(void)
{
int ret;
ret = uart_register_driver(&arc_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&arc_platform_driver);
if (ret)
uart_unregister_driver(&arc_uart_driver);
return ret;
}
static void __exit arc_serial_exit(void)
{
platform_driver_unregister(&arc_platform_driver);
uart_unregister_driver(&arc_uart_driver);
}
module_init(arc_serial_init);
module_exit(arc_serial_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Vineet Gupta");
MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
| linux-master | drivers/tty/serial/arc_uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Derived from many drivers using generic_serial interface,
* especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c
* (was in Linux/VR tree) by Jim Pick.
*
* Copyright (C) 1999 Harald Koerfgen
* Copyright (C) 2000 Jim Pick <[email protected]>
* Copyright (C) 2001 Steven J. Hill ([email protected])
* Copyright (C) 2000-2002 Toshiba Corporation
*
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/io.h>
#define PASS_LIMIT 256
#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL)
/* "ttyS" is used for standard serial driver */
#define TXX9_TTY_NAME "ttyTX"
#define TXX9_TTY_MINOR_START 196
#define TXX9_TTY_MAJOR 204
#else
/* acts like standard serial driver */
#define TXX9_TTY_NAME "ttyS"
#define TXX9_TTY_MINOR_START 64
#define TXX9_TTY_MAJOR TTY_MAJOR
#endif
/* flag aliases */
#define UPF_TXX9_HAVE_CTS_LINE UPF_BUGGY_UART
#define UPF_TXX9_USE_SCLK UPF_MAGIC_MULTIPLIER
#ifdef CONFIG_PCI
/* support for Toshiba TC86C001 SIO */
#define ENABLE_SERIAL_TXX9_PCI
#endif
/*
* Number of serial ports
*/
#define UART_NR CONFIG_SERIAL_TXX9_NR_UARTS
#define TXX9_REGION_SIZE 0x24
/* TXX9 Serial Registers */
#define TXX9_SILCR 0x00
#define TXX9_SIDICR 0x04
#define TXX9_SIDISR 0x08
#define TXX9_SICISR 0x0c
#define TXX9_SIFCR 0x10
#define TXX9_SIFLCR 0x14
#define TXX9_SIBGR 0x18
#define TXX9_SITFIFO 0x1c
#define TXX9_SIRFIFO 0x20
/* SILCR : Line Control */
#define TXX9_SILCR_SCS_MASK 0x00000060
#define TXX9_SILCR_SCS_IMCLK 0x00000000
#define TXX9_SILCR_SCS_IMCLK_BG 0x00000020
#define TXX9_SILCR_SCS_SCLK 0x00000040
#define TXX9_SILCR_SCS_SCLK_BG 0x00000060
#define TXX9_SILCR_UEPS 0x00000010
#define TXX9_SILCR_UPEN 0x00000008
#define TXX9_SILCR_USBL_MASK 0x00000004
#define TXX9_SILCR_USBL_1BIT 0x00000000
#define TXX9_SILCR_USBL_2BIT 0x00000004
#define TXX9_SILCR_UMODE_MASK 0x00000003
#define TXX9_SILCR_UMODE_8BIT 0x00000000
#define TXX9_SILCR_UMODE_7BIT 0x00000001
/* SIDICR : DMA/Int. Control */
#define TXX9_SIDICR_TDE 0x00008000
#define TXX9_SIDICR_RDE 0x00004000
#define TXX9_SIDICR_TIE 0x00002000
#define TXX9_SIDICR_RIE 0x00001000
#define TXX9_SIDICR_SPIE 0x00000800
#define TXX9_SIDICR_CTSAC 0x00000600
#define TXX9_SIDICR_STIE_MASK 0x0000003f
#define TXX9_SIDICR_STIE_OERS 0x00000020
#define TXX9_SIDICR_STIE_CTSS 0x00000010
#define TXX9_SIDICR_STIE_RBRKD 0x00000008
#define TXX9_SIDICR_STIE_TRDY 0x00000004
#define TXX9_SIDICR_STIE_TXALS 0x00000002
#define TXX9_SIDICR_STIE_UBRKD 0x00000001
/* SIDISR : DMA/Int. Status */
#define TXX9_SIDISR_UBRK 0x00008000
#define TXX9_SIDISR_UVALID 0x00004000
#define TXX9_SIDISR_UFER 0x00002000
#define TXX9_SIDISR_UPER 0x00001000
#define TXX9_SIDISR_UOER 0x00000800
#define TXX9_SIDISR_ERI 0x00000400
#define TXX9_SIDISR_TOUT 0x00000200
#define TXX9_SIDISR_TDIS 0x00000100
#define TXX9_SIDISR_RDIS 0x00000080
#define TXX9_SIDISR_STIS 0x00000040
#define TXX9_SIDISR_RFDN_MASK 0x0000001f
/* SICISR : Change Int. Status */
#define TXX9_SICISR_OERS 0x00000020
#define TXX9_SICISR_CTSS 0x00000010
#define TXX9_SICISR_RBRKD 0x00000008
#define TXX9_SICISR_TRDY 0x00000004
#define TXX9_SICISR_TXALS 0x00000002
#define TXX9_SICISR_UBRKD 0x00000001
/* SIFCR : FIFO Control */
#define TXX9_SIFCR_SWRST 0x00008000
#define TXX9_SIFCR_RDIL_MASK 0x00000180
#define TXX9_SIFCR_RDIL_1 0x00000000
#define TXX9_SIFCR_RDIL_4 0x00000080
#define TXX9_SIFCR_RDIL_8 0x00000100
#define TXX9_SIFCR_RDIL_12 0x00000180
#define TXX9_SIFCR_RDIL_MAX 0x00000180
#define TXX9_SIFCR_TDIL_MASK 0x00000018
#define TXX9_SIFCR_TDIL_1 0x00000000
#define TXX9_SIFCR_TDIL_4 0x00000001
#define TXX9_SIFCR_TDIL_8 0x00000010
#define TXX9_SIFCR_TDIL_MAX 0x00000010
#define TXX9_SIFCR_TFRST 0x00000004
#define TXX9_SIFCR_RFRST 0x00000002
#define TXX9_SIFCR_FRSTE 0x00000001
#define TXX9_SIO_TX_FIFO 8
#define TXX9_SIO_RX_FIFO 16
/* SIFLCR : Flow Control */
#define TXX9_SIFLCR_RCS 0x00001000
#define TXX9_SIFLCR_TES 0x00000800
#define TXX9_SIFLCR_RTSSC 0x00000200
#define TXX9_SIFLCR_RSDE 0x00000100
#define TXX9_SIFLCR_TSDE 0x00000080
#define TXX9_SIFLCR_RTSTL_MASK 0x0000001e
#define TXX9_SIFLCR_RTSTL_MAX 0x0000001e
#define TXX9_SIFLCR_TBRK 0x00000001
/* SIBGR : Baudrate Control */
#define TXX9_SIBGR_BCLK_MASK 0x00000300
#define TXX9_SIBGR_BCLK_T0 0x00000000
#define TXX9_SIBGR_BCLK_T2 0x00000100
#define TXX9_SIBGR_BCLK_T4 0x00000200
#define TXX9_SIBGR_BCLK_T6 0x00000300
#define TXX9_SIBGR_BRD_MASK 0x000000ff
static inline unsigned int sio_in(struct uart_port *up, int offset)
{
switch (up->iotype) {
default:
return __raw_readl(up->membase + offset);
case UPIO_PORT:
return inl(up->iobase + offset);
}
}
static inline void
sio_out(struct uart_port *up, int offset, int value)
{
switch (up->iotype) {
default:
__raw_writel(value, up->membase + offset);
break;
case UPIO_PORT:
outl(value, up->iobase + offset);
break;
}
}
static inline void
sio_mask(struct uart_port *up, int offset, unsigned int value)
{
sio_out(up, offset, sio_in(up, offset) & ~value);
}
static inline void
sio_set(struct uart_port *up, int offset, unsigned int value)
{
sio_out(up, offset, sio_in(up, offset) | value);
}
static inline void
sio_quot_set(struct uart_port *up, int quot)
{
quot >>= 1;
if (quot < 256)
sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0);
else if (quot < (256 << 2))
sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2);
else if (quot < (256 << 4))
sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4);
else if (quot < (256 << 6))
sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6);
else
sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
}
static void serial_txx9_stop_tx(struct uart_port *up)
{
sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
}
static void serial_txx9_start_tx(struct uart_port *up)
{
sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
}
static void serial_txx9_stop_rx(struct uart_port *up)
{
up->read_status_mask &= ~TXX9_SIDISR_RDIS;
}
static void serial_txx9_initialize(struct uart_port *up)
{
unsigned int tmout = 10000;
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
/* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1);
/* initial settings */
sio_out(up, TXX9_SILCR,
TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT |
((up->flags & UPF_TXX9_USE_SCLK) ?
TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG));
sio_quot_set(up, uart_get_divisor(up, 9600));
sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */);
sio_out(up, TXX9_SIDICR, 0);
}
static inline void
receive_chars(struct uart_port *up, unsigned int *status)
{
unsigned int disr = *status;
int max_count = 256;
unsigned int next_ignore_status_mask;
u8 ch, flag;
do {
ch = sio_in(up, TXX9_SIRFIFO);
flag = TTY_NORMAL;
up->icount.rx++;
/* mask out RFDN_MASK bit added by previous overrun */
next_ignore_status_mask =
up->ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK;
if (unlikely(disr & (TXX9_SIDISR_UBRK | TXX9_SIDISR_UPER |
TXX9_SIDISR_UFER | TXX9_SIDISR_UOER))) {
/*
* For statistics only
*/
if (disr & TXX9_SIDISR_UBRK) {
disr &= ~(TXX9_SIDISR_UFER | TXX9_SIDISR_UPER);
up->icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(up))
goto ignore_char;
} else if (disr & TXX9_SIDISR_UPER)
up->icount.parity++;
else if (disr & TXX9_SIDISR_UFER)
up->icount.frame++;
if (disr & TXX9_SIDISR_UOER) {
up->icount.overrun++;
/*
* The receiver read buffer still hold
* a char which caused overrun.
* Ignore next char by adding RFDN_MASK
* to ignore_status_mask temporarily.
*/
next_ignore_status_mask |=
TXX9_SIDISR_RFDN_MASK;
}
/*
* Mask off conditions which should be ingored.
*/
disr &= up->read_status_mask;
if (disr & TXX9_SIDISR_UBRK) {
flag = TTY_BREAK;
} else if (disr & TXX9_SIDISR_UPER)
flag = TTY_PARITY;
else if (disr & TXX9_SIDISR_UFER)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(up, ch))
goto ignore_char;
uart_insert_char(up, disr, TXX9_SIDISR_UOER, ch, flag);
ignore_char:
up->ignore_status_mask = next_ignore_status_mask;
disr = sio_in(up, TXX9_SIDISR);
} while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0));
tty_flip_buffer_push(&up->state->port);
*status = disr;
}
static inline void transmit_chars(struct uart_port *up)
{
u8 ch;
uart_port_tx_limited(up, ch, TXX9_SIO_TX_FIFO,
true,
sio_out(up, TXX9_SITFIFO, ch),
({}));
}
static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
{
int pass_counter = 0;
struct uart_port *up = dev_id;
unsigned int status;
while (1) {
spin_lock(&up->lock);
status = sio_in(up, TXX9_SIDISR);
if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
status &= ~TXX9_SIDISR_TDIS;
if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT))) {
spin_unlock(&up->lock);
break;
}
if (status & TXX9_SIDISR_RDIS)
receive_chars(up, &status);
if (status & TXX9_SIDISR_TDIS)
transmit_chars(up);
/* Clear TX/RX Int. Status */
sio_mask(up, TXX9_SIDISR,
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT);
spin_unlock(&up->lock);
if (pass_counter++ > PASS_LIMIT)
break;
}
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
static unsigned int serial_txx9_tx_empty(struct uart_port *up)
{
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&up->lock, flags);
ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->lock, flags);
return ret;
}
static unsigned int serial_txx9_get_mctrl(struct uart_port *up)
{
unsigned int ret;
/* no modem control lines */
ret = TIOCM_CAR | TIOCM_DSR;
ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS;
ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS;
return ret;
}
static void serial_txx9_set_mctrl(struct uart_port *up, unsigned int mctrl)
{
if (mctrl & TIOCM_RTS)
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
else
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
}
static void serial_txx9_break_ctl(struct uart_port *up, int break_state)
{
unsigned long flags;
spin_lock_irqsave(&up->lock, flags);
if (break_state == -1)
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
else
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
spin_unlock_irqrestore(&up->lock, flags);
}
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* Wait for transmitter & holding register to empty
*/
static void wait_for_xmitr(struct uart_port *up)
{
unsigned int tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
while (--tmout &&
!(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
udelay(1);
/* Wait up to 1s for flow control if necessary */
if (up->flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout &&
(sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
udelay(1);
}
}
#endif
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context.
*/
static int serial_txx9_get_poll_char(struct uart_port *up)
{
unsigned int ier;
unsigned char c;
/*
* First save the IER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
;
c = sio_in(up, TXX9_SIRFIFO);
/*
* Finally, clear RX interrupt status
* and restore the IER
*/
sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
sio_out(up, TXX9_SIDICR, ier);
return c;
}
static void serial_txx9_put_poll_char(struct uart_port *up, unsigned char c)
{
unsigned int ier;
/*
* First save the IER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
wait_for_xmitr(up);
/*
* Send the character out.
*/
sio_out(up, TXX9_SITFIFO, c);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
sio_out(up, TXX9_SIDICR, ier);
}
#endif /* CONFIG_CONSOLE_POLL */
static int serial_txx9_startup(struct uart_port *up)
{
unsigned long flags;
int retval;
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* clear reset */
sio_mask(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
sio_out(up, TXX9_SIDICR, 0);
/*
* Clear the interrupt registers.
*/
sio_out(up, TXX9_SIDISR, 0);
retval = request_irq(up->irq, serial_txx9_interrupt,
IRQF_SHARED, "serial_txx9", up);
if (retval)
return retval;
/*
* Now, initialize the UART
*/
spin_lock_irqsave(&up->lock, flags);
serial_txx9_set_mctrl(up, up->mctrl);
spin_unlock_irqrestore(&up->lock, flags);
/* Enable RX/TX */
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
/*
* Finally, enable interrupts.
*/
sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE);
return 0;
}
static void serial_txx9_shutdown(struct uart_port *up)
{
unsigned long flags;
/*
* Disable interrupts from this port
*/
sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
spin_lock_irqsave(&up->lock, flags);
serial_txx9_set_mctrl(up, up->mctrl);
spin_unlock_irqrestore(&up->lock, flags);
/*
* Disable break condition
*/
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
if (up->cons && up->line == up->cons->index) {
free_irq(up->irq, up);
return;
}
#endif
/* reset FIFOs */
sio_set(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* clear reset */
sio_mask(up, TXX9_SIFCR,
TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
/* Disable RX/TX */
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
free_irq(up->irq, up);
}
static void
serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
const struct ktermios *old)
{
unsigned int cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
/*
* We don't support modem control lines.
*/
termios->c_cflag &= ~(HUPCL | CMSPAR);
termios->c_cflag |= CLOCAL;
cval = sio_in(up, TXX9_SILCR);
/* byte size and parity */
cval &= ~TXX9_SILCR_UMODE_MASK;
switch (termios->c_cflag & CSIZE) {
case CS7:
cval |= TXX9_SILCR_UMODE_7BIT;
break;
default:
case CS5: /* not supported */
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
break;
}
cval &= ~TXX9_SILCR_USBL_MASK;
if (termios->c_cflag & CSTOPB)
cval |= TXX9_SILCR_USBL_2BIT;
else
cval |= TXX9_SILCR_USBL_1BIT;
cval &= ~(TXX9_SILCR_UPEN | TXX9_SILCR_UEPS);
if (termios->c_cflag & PARENB)
cval |= TXX9_SILCR_UPEN;
if (!(termios->c_cflag & PARODD))
cval |= TXX9_SILCR_UEPS;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(up, termios, old, 0, up->uartclk/16/2);
quot = uart_get_divisor(up, baud);
/* Set up FIFOs */
/* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
fcr = TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(up, termios->c_cflag, baud);
up->read_status_mask = TXX9_SIDISR_UOER |
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
if (termios->c_iflag & INPCK)
up->read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->read_status_mask |= TXX9_SIDISR_UBRK;
/*
* Characteres to ignore
*/
up->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER;
if (termios->c_iflag & IGNBRK) {
up->ignore_status_mask |= TXX9_SIDISR_UBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->ignore_status_mask |= TXX9_SIDISR_UOER;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->ignore_status_mask |= TXX9_SIDISR_RDIS;
/* CTS flow control flag */
if ((termios->c_cflag & CRTSCTS) &&
(up->flags & UPF_TXX9_HAVE_CTS_LINE)) {
sio_set(up, TXX9_SIFLCR,
TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
} else {
sio_mask(up, TXX9_SIFLCR,
TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
}
sio_out(up, TXX9_SILCR, cval);
sio_quot_set(up, quot);
sio_out(up, TXX9_SIFCR, fcr);
serial_txx9_set_mctrl(up, up->mctrl);
spin_unlock_irqrestore(&up->lock, flags);
}
static void
serial_txx9_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
/*
* If oldstate was -1 this is called from
* uart_configure_port(). In this case do not initialize the
* port now, because the port was already initialized (for
* non-console port) or should not be initialized here (for
* console port). If we initialized the port here we lose
* serial console settings.
*/
if (state == 0 && oldstate != -1)
serial_txx9_initialize(port);
}
static int serial_txx9_request_resource(struct uart_port *up)
{
unsigned int size = TXX9_REGION_SIZE;
int ret = 0;
switch (up->iotype) {
default:
if (!up->mapbase)
break;
if (!request_mem_region(up->mapbase, size, "serial_txx9")) {
ret = -EBUSY;
break;
}
if (up->flags & UPF_IOREMAP) {
up->membase = ioremap(up->mapbase, size);
if (!up->membase) {
release_mem_region(up->mapbase, size);
ret = -ENOMEM;
}
}
break;
case UPIO_PORT:
if (!request_region(up->iobase, size, "serial_txx9"))
ret = -EBUSY;
break;
}
return ret;
}
static void serial_txx9_release_resource(struct uart_port *up)
{
unsigned int size = TXX9_REGION_SIZE;
switch (up->iotype) {
default:
if (!up->mapbase)
break;
if (up->flags & UPF_IOREMAP) {
iounmap(up->membase);
up->membase = NULL;
}
release_mem_region(up->mapbase, size);
break;
case UPIO_PORT:
release_region(up->iobase, size);
break;
}
}
static void serial_txx9_release_port(struct uart_port *up)
{
serial_txx9_release_resource(up);
}
static int serial_txx9_request_port(struct uart_port *up)
{
return serial_txx9_request_resource(up);
}
static void serial_txx9_config_port(struct uart_port *up, int uflags)
{
int ret;
/*
* Find the region that we can probe for. This in turn
* tells us whether we can probe for the type of port.
*/
ret = serial_txx9_request_resource(up);
if (ret < 0)
return;
up->type = PORT_TXX9;
up->fifosize = TXX9_SIO_TX_FIFO;
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
if (up->line == up->cons->index)
return;
#endif
serial_txx9_initialize(up);
}
static const char *
serial_txx9_type(struct uart_port *port)
{
return "txx9";
}
static const struct uart_ops serial_txx9_pops = {
.tx_empty = serial_txx9_tx_empty,
.set_mctrl = serial_txx9_set_mctrl,
.get_mctrl = serial_txx9_get_mctrl,
.stop_tx = serial_txx9_stop_tx,
.start_tx = serial_txx9_start_tx,
.stop_rx = serial_txx9_stop_rx,
.break_ctl = serial_txx9_break_ctl,
.startup = serial_txx9_startup,
.shutdown = serial_txx9_shutdown,
.set_termios = serial_txx9_set_termios,
.pm = serial_txx9_pm,
.type = serial_txx9_type,
.release_port = serial_txx9_release_port,
.request_port = serial_txx9_request_port,
.config_port = serial_txx9_config_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial_txx9_get_poll_char,
.poll_put_char = serial_txx9_put_poll_char,
#endif
};
static struct uart_port serial_txx9_ports[UART_NR];
static void __init serial_txx9_register_ports(struct uart_driver *drv,
struct device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_port *up = &serial_txx9_ports[i];
up->line = i;
up->ops = &serial_txx9_pops;
up->dev = dev;
if (up->iobase || up->mapbase)
uart_add_one_port(drv, up);
}
}
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
static void serial_txx9_console_putchar(struct uart_port *up, unsigned char ch)
{
wait_for_xmitr(up);
sio_out(up, TXX9_SITFIFO, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void
serial_txx9_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_port *up = &serial_txx9_ports[co->index];
unsigned int ier, flcr;
/*
* First save the UER then disable the interrupts
*/
ier = sio_in(up, TXX9_SIDICR);
sio_out(up, TXX9_SIDICR, 0);
/*
* Disable flow-control if enabled (and unnecessary)
*/
flcr = sio_in(up, TXX9_SIFLCR);
if (!(up->flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES))
sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES);
uart_console_write(up, s, count, serial_txx9_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
sio_out(up, TXX9_SIFLCR, flcr);
sio_out(up, TXX9_SIDICR, ier);
}
static int __init serial_txx9_console_setup(struct console *co, char *options)
{
struct uart_port *up;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index >= UART_NR)
co->index = 0;
up = &serial_txx9_ports[co->index];
if (!up->ops)
return -ENODEV;
serial_txx9_initialize(up);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(up, co, baud, parity, bits, flow);
}
static struct uart_driver serial_txx9_reg;
static struct console serial_txx9_console = {
.name = TXX9_TTY_NAME,
.write = serial_txx9_console_write,
.device = uart_console_device,
.setup = serial_txx9_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_txx9_reg,
};
static int __init serial_txx9_console_init(void)
{
register_console(&serial_txx9_console);
return 0;
}
console_initcall(serial_txx9_console_init);
#define SERIAL_TXX9_CONSOLE &serial_txx9_console
#else
#define SERIAL_TXX9_CONSOLE NULL
#endif
static struct uart_driver serial_txx9_reg = {
.owner = THIS_MODULE,
.driver_name = "serial_txx9",
.dev_name = TXX9_TTY_NAME,
.major = TXX9_TTY_MAJOR,
.minor = TXX9_TTY_MINOR_START,
.nr = UART_NR,
.cons = SERIAL_TXX9_CONSOLE,
};
int __init early_serial_txx9_setup(struct uart_port *port)
{
if (port->line >= ARRAY_SIZE(serial_txx9_ports))
return -ENODEV;
serial_txx9_ports[port->line] = *port;
serial_txx9_ports[port->line].ops = &serial_txx9_pops;
serial_txx9_ports[port->line].flags |=
UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
return 0;
}
static DEFINE_MUTEX(serial_txx9_mutex);
/**
* serial_txx9_register_port - register a serial port
* @port: serial port template
*
* Configure the serial port specified by the request.
*
* The port is then probed and if necessary the IRQ is autodetected
* If this fails an error is returned.
*
* On success the port is ready to use and the line number is returned.
*/
static int serial_txx9_register_port(struct uart_port *port)
{
int i;
struct uart_port *uart;
int ret = -ENOSPC;
mutex_lock(&serial_txx9_mutex);
for (i = 0; i < UART_NR; i++) {
uart = &serial_txx9_ports[i];
if (uart_match_port(uart, port)) {
uart_remove_one_port(&serial_txx9_reg, uart);
break;
}
}
if (i == UART_NR) {
/* Find unused port */
for (i = 0; i < UART_NR; i++) {
uart = &serial_txx9_ports[i];
if (!(uart->iobase || uart->mapbase))
break;
}
}
if (i < UART_NR) {
uart->iobase = port->iobase;
uart->membase = port->membase;
uart->irq = port->irq;
uart->uartclk = port->uartclk;
uart->iotype = port->iotype;
uart->flags = port->flags
| UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
uart->mapbase = port->mapbase;
if (port->dev)
uart->dev = port->dev;
ret = uart_add_one_port(&serial_txx9_reg, uart);
if (ret == 0)
ret = uart->line;
}
mutex_unlock(&serial_txx9_mutex);
return ret;
}
/**
* serial_txx9_unregister_port - remove a txx9 serial port at runtime
* @line: serial line number
*
* Remove one serial port. This may not be called from interrupt
* context. We hand the port back to the our control.
*/
static void serial_txx9_unregister_port(int line)
{
struct uart_port *uart = &serial_txx9_ports[line];
mutex_lock(&serial_txx9_mutex);
uart_remove_one_port(&serial_txx9_reg, uart);
uart->flags = 0;
uart->type = PORT_UNKNOWN;
uart->iobase = 0;
uart->mapbase = 0;
uart->membase = NULL;
uart->dev = NULL;
mutex_unlock(&serial_txx9_mutex);
}
/*
* Register a set of serial devices attached to a platform device.
*/
static int serial_txx9_probe(struct platform_device *dev)
{
struct uart_port *p = dev_get_platdata(&dev->dev);
struct uart_port port;
int ret, i;
memset(&port, 0, sizeof(struct uart_port));
for (i = 0; p && p->uartclk != 0; p++, i++) {
port.iobase = p->iobase;
port.membase = p->membase;
port.irq = p->irq;
port.uartclk = p->uartclk;
port.iotype = p->iotype;
port.flags = p->flags;
port.mapbase = p->mapbase;
port.dev = &dev->dev;
port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_TXX9_CONSOLE);
ret = serial_txx9_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
"(IO%lx MEM%llx IRQ%d): %d\n", i,
p->iobase, (unsigned long long)p->mapbase,
p->irq, ret);
}
}
return 0;
}
/*
* Remove serial ports registered against a platform device.
*/
static int serial_txx9_remove(struct platform_device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_port *up = &serial_txx9_ports[i];
if (up->dev == &dev->dev)
serial_txx9_unregister_port(i);
}
return 0;
}
#ifdef CONFIG_PM
static int serial_txx9_suspend(struct platform_device *dev, pm_message_t state)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_port *up = &serial_txx9_ports[i];
if (up->type != PORT_UNKNOWN && up->dev == &dev->dev)
uart_suspend_port(&serial_txx9_reg, up);
}
return 0;
}
static int serial_txx9_resume(struct platform_device *dev)
{
int i;
for (i = 0; i < UART_NR; i++) {
struct uart_port *up = &serial_txx9_ports[i];
if (up->type != PORT_UNKNOWN && up->dev == &dev->dev)
uart_resume_port(&serial_txx9_reg, up);
}
return 0;
}
#endif
static struct platform_driver serial_txx9_plat_driver = {
.probe = serial_txx9_probe,
.remove = serial_txx9_remove,
#ifdef CONFIG_PM
.suspend = serial_txx9_suspend,
.resume = serial_txx9_resume,
#endif
.driver = {
.name = "serial_txx9",
},
};
#ifdef ENABLE_SERIAL_TXX9_PCI
/*
* Probe one serial board. Unfortunately, there is no rhyme nor reason
* to the arrangement of serial ports on a PCI card.
*/
static int
pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct uart_port port;
int line;
int rc;
rc = pci_enable_device(dev);
if (rc)
return rc;
memset(&port, 0, sizeof(port));
port.ops = &serial_txx9_pops;
port.flags |= UPF_TXX9_HAVE_CTS_LINE;
port.uartclk = 66670000;
port.irq = dev->irq;
port.iotype = UPIO_PORT;
port.iobase = pci_resource_start(dev, 1);
port.dev = &dev->dev;
line = serial_txx9_register_port(&port);
if (line < 0) {
printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), line);
pci_disable_device(dev);
return line;
}
pci_set_drvdata(dev, &serial_txx9_ports[line]);
return 0;
}
static void pciserial_txx9_remove_one(struct pci_dev *dev)
{
struct uart_port *up = pci_get_drvdata(dev);
if (up) {
serial_txx9_unregister_port(up->line);
pci_disable_device(dev);
}
}
#ifdef CONFIG_PM
static int pciserial_txx9_suspend_one(struct pci_dev *dev, pm_message_t state)
{
struct uart_port *up = pci_get_drvdata(dev);
if (up)
uart_suspend_port(&serial_txx9_reg, up);
pci_save_state(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
static int pciserial_txx9_resume_one(struct pci_dev *dev)
{
struct uart_port *up = pci_get_drvdata(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (up)
uart_resume_port(&serial_txx9_reg, up);
return 0;
}
#endif
static const struct pci_device_id serial_txx9_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC) },
{ 0, }
};
static struct pci_driver serial_txx9_pci_driver = {
.name = "serial_txx9",
.probe = pciserial_txx9_init_one,
.remove = pciserial_txx9_remove_one,
#ifdef CONFIG_PM
.suspend = pciserial_txx9_suspend_one,
.resume = pciserial_txx9_resume_one,
#endif
.id_table = serial_txx9_pci_tbl,
};
MODULE_DEVICE_TABLE(pci, serial_txx9_pci_tbl);
#endif /* ENABLE_SERIAL_TXX9_PCI */
static struct platform_device *serial_txx9_plat_devs;
static int __init serial_txx9_init(void)
{
int ret;
ret = uart_register_driver(&serial_txx9_reg);
if (ret)
goto out;
serial_txx9_plat_devs = platform_device_alloc("serial_txx9", -1);
if (!serial_txx9_plat_devs) {
ret = -ENOMEM;
goto unreg_uart_drv;
}
ret = platform_device_add(serial_txx9_plat_devs);
if (ret)
goto put_dev;
serial_txx9_register_ports(&serial_txx9_reg,
&serial_txx9_plat_devs->dev);
ret = platform_driver_register(&serial_txx9_plat_driver);
if (ret)
goto del_dev;
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
if (ret) {
platform_driver_unregister(&serial_txx9_plat_driver);
}
#endif
if (ret == 0)
goto out;
del_dev:
platform_device_del(serial_txx9_plat_devs);
put_dev:
platform_device_put(serial_txx9_plat_devs);
unreg_uart_drv:
uart_unregister_driver(&serial_txx9_reg);
out:
return ret;
}
static void __exit serial_txx9_exit(void)
{
int i;
#ifdef ENABLE_SERIAL_TXX9_PCI
pci_unregister_driver(&serial_txx9_pci_driver);
#endif
platform_driver_unregister(&serial_txx9_plat_driver);
platform_device_unregister(serial_txx9_plat_devs);
for (i = 0; i < UART_NR; i++) {
struct uart_port *up = &serial_txx9_ports[i];
if (up->iobase || up->mapbase)
uart_remove_one_port(&serial_txx9_reg, up);
}
uart_unregister_driver(&serial_txx9_reg);
}
module_init(serial_txx9_init);
module_exit(serial_txx9_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TX39/49 serial driver");
MODULE_ALIAS_CHARDEV_MAJOR(TXX9_TTY_MAJOR);
| linux-master | drivers/tty/serial/serial_txx9.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
*
* Copyright (C) 2010 Texas Instruments.
*
* Authors:
* Govindraj R <[email protected]>
* Thara Gopinath <[email protected]>
*
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
* hardware flow control and software flow control configuration with
* this driver as required for the omap-platform.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 10
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
#define OMAP_UART_REV_42 0x0402
#define OMAP_UART_REV_46 0x0406
#define OMAP_UART_REV_52 0x0502
#define OMAP_UART_REV_63 0x0603
#define OMAP_UART_TX_WAKEUP_EN BIT(7)
/* Feature flags */
#define OMAP_UART_WER_HAS_TX_WAKEUP BIT(0)
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz */
/* SCR register bitmasks */
#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
/* FCR register bitmasks */
#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6)
#define OMAP_UART_FCR_TX_FIFO_TRIG_MASK (0x3 << 4)
/* MVR register bitmasks */
#define OMAP_UART_MVR_SCHEME_SHIFT 30
#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0
#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4
#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f
#define OMAP_UART_MVR_MAJ_MASK 0x700
#define OMAP_UART_MVR_MAJ_SHIFT 8
#define OMAP_UART_MVR_MIN_MASK 0x3f
#define OMAP_UART_DMA_CH_FREE -1
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
#define OMAP_MODE13X_SPEED 230400
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
#define OMAP_UART_WER_MOD_WKUP 0x7F
/* Enable XON/XOFF flow control on output */
#define OMAP_UART_SW_TX 0x08
/* Enable XON/XOFF flow control on input */
#define OMAP_UART_SW_RX 0x02
#define OMAP_UART_SW_CLR 0xF0
#define OMAP_UART_TCR_TRIG 0x0F
struct uart_omap_dma {
u8 uart_dma_tx;
u8 uart_dma_rx;
int rx_dma_channel;
int tx_dma_channel;
dma_addr_t rx_buf_dma_phys;
dma_addr_t tx_buf_dma_phys;
unsigned int uart_base;
/*
* Buffer for rx dma. It is not required for tx because the buffer
* comes from port structure.
*/
unsigned char *rx_buf;
unsigned int prev_rx_dma_pos;
int tx_buf_size;
int tx_dma_used;
int rx_dma_used;
spinlock_t tx_lock;
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
unsigned int rx_buf_size;
unsigned int rx_poll_rate;
unsigned int rx_timeout;
};
struct uart_omap_port {
struct uart_port port;
struct uart_omap_dma uart_dma;
struct device *dev;
int wakeirq;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
unsigned char dll;
unsigned char dlh;
unsigned char mdr1;
unsigned char scr;
unsigned char wer;
int use_dma;
/*
* Some bits in registers are cleared on a read, so they must
* be saved whenever the register is read, but the bits will not
* be immediately processed.
*/
unsigned int lsr_break_flag;
unsigned char msr_saved_flags;
char name[20];
unsigned long port_activity;
int context_loss_cnt;
u32 errata;
u32 features;
struct gpio_desc *rts_gpiod;
struct pm_qos_request pm_qos_request;
u32 latency;
u32 calc_latency;
struct work_struct qos_work;
bool is_suspending;
unsigned int rs485_tx_filter_count;
};
#define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
offset <<= up->port.regshift;
return readw(up->port.membase + offset);
}
static inline void serial_out(struct uart_omap_port *up, int offset, int value)
{
offset <<= up->port.regshift;
writew(value, up->port.membase + offset);
}
static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
{
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
}
#ifdef CONFIG_PM
static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (!pdata || !pdata->get_context_loss_count)
return -EINVAL;
return pdata->get_context_loss_count(up->dev);
}
/* REVISIT: Remove this when omap3 boots in device tree only mode */
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (!pdata || !pdata->enable_wakeup)
return;
pdata->enable_wakeup(up->dev, enable);
}
#endif /* CONFIG_PM */
/*
* Calculate the absolute difference between the desired and actual baud
* rate for the given mode.
*/
static inline int calculate_baud_abs_diff(struct uart_port *port,
unsigned int baud, unsigned int mode)
{
unsigned int n = port->uartclk / (mode * baud);
if (n == 0)
n = 1;
return abs_diff(baud, port->uartclk / (mode * n));
}
/*
* serial_omap_baud_is_mode16 - check if baud rate is MODE16X
* @port: uart port info
* @baud: baudrate for which mode needs to be determined
*
* Returns true if baud rate is MODE16X and false if MODE13X
* Original table in OMAP TRM named "UART Mode Baud Rates, Divisor Values,
* and Error Rates" determines modes not for all common baud rates.
* E.g. for 1000000 baud rate mode must be 16x, but according to that
* table it's determined as 13x.
*/
static bool
serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
{
int abs_diff_13 = calculate_baud_abs_diff(port, baud, 13);
int abs_diff_16 = calculate_baud_abs_diff(port, baud, 16);
return (abs_diff_13 >= abs_diff_16);
}
/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int mode;
if (!serial_omap_baud_is_mode16(port, baud))
mode = 13;
else
mode = 16;
return port->uartclk/(mode * baud);
}
static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
}
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
int res;
/* Handle RS-485 */
if (port->rs485.flags & SER_RS485_ENABLED) {
if (up->scr & OMAP_UART_SCR_TX_EMPTY) {
/* THR interrupt is fired when both TX FIFO and TX
* shift register are empty. This means there's nothing
* left to transmit now, so make sure the THR interrupt
* is fired when TX FIFO is below the trigger level,
* disable THR interrupts and toggle the RS-485 GPIO
* data direction pin if needed.
*/
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
if (gpiod_get_value(up->rts_gpiod) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
gpiod_set_value(up->rts_gpiod, res);
}
} else {
/* We're asked to stop, but there's still stuff in the
* UART FIFO, so make sure the THR interrupt is fired
* when both TX FIFO and TX shift register are empty.
* The next THR interrupt (if no transmission is started
* in the meantime) will indicate the end of a
* transmission. Therefore we _don't_ disable THR
* interrupts in this situation.
*/
up->scr |= OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
return;
}
}
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
static void serial_omap_put_char(struct uart_omap_port *up, unsigned char ch)
{
serial_out(up, UART_TX, ch);
if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
up->rs485_tx_filter_count++;
}
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
{
u8 ch;
uart_port_tx_limited(&up->port, ch, up->port.fifosize / 4,
true,
serial_omap_put_char(up, ch),
({}));
}
static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
{
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
int res;
/* Handle RS-485 */
if (port->rs485.flags & SER_RS485_ENABLED) {
/* Fire THR interrupts when FIFO is below trigger level */
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
if (gpiod_get_value(up->rts_gpiod) != res) {
gpiod_set_value(up->rts_gpiod, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
}
}
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
up->rs485_tx_filter_count = 0;
serial_omap_enable_ier_thri(up);
}
static void serial_omap_throttle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static void serial_omap_unthrottle(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
{
unsigned int status;
status = serial_in(up, UART_MSR);
status |= up->msr_saved_flags;
up->msr_saved_flags = 0;
if ((status & UART_MSR_ANY_DELTA) == 0)
return status;
if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
up->port.state != NULL) {
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change
(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change
(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
return status;
}
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
u8 flag;
/*
* Read one data character out to avoid stalling the receiver according
* to the table 23-246 of the omap4 TRM.
*/
if (likely(lsr & UART_LSR_DR)) {
serial_in(up, UART_RX);
if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
up->rs485_tx_filter_count)
up->rs485_tx_filter_count--;
}
up->port.icount.rx++;
flag = TTY_NORMAL;
if (lsr & UART_LSR_BI) {
flag = TTY_BREAK;
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
return;
}
if (lsr & UART_LSR_PE) {
flag = TTY_PARITY;
up->port.icount.parity++;
}
if (lsr & UART_LSR_FE) {
flag = TTY_FRAME;
up->port.icount.frame++;
}
if (lsr & UART_LSR_OE)
up->port.icount.overrun++;
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
lsr |= up->lsr_break_flag;
}
#endif
uart_insert_char(&up->port, lsr, UART_LSR_OE, 0, flag);
}
static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
{
u8 ch;
if (!(lsr & UART_LSR_DR))
return;
ch = serial_in(up, UART_RX);
if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
up->rs485_tx_filter_count) {
up->rs485_tx_filter_count--;
return;
}
up->port.icount.rx++;
if (uart_handle_sysrq_char(&up->port, ch))
return;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, TTY_NORMAL);
}
/**
* serial_omap_irq() - This handles the interrupt from one port
* @irq: uart port irq number
* @dev_id: uart port info
*/
static irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
unsigned int iir, lsr;
unsigned int type;
irqreturn_t ret = IRQ_NONE;
int max_count = 256;
spin_lock(&up->port.lock);
do {
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
break;
ret = IRQ_HANDLED;
lsr = serial_in(up, UART_LSR);
/* extract IRQ type from IIR register */
type = iir & 0x3e;
switch (type) {
case UART_IIR_MSI:
check_modem_status(up);
break;
case UART_IIR_THRI:
transmit_chars(up, lsr);
break;
case UART_IIR_RX_TIMEOUT:
case UART_IIR_RDI:
serial_omap_rdi(up, lsr);
break;
case UART_IIR_RLSI:
serial_omap_rlsi(up, lsr);
break;
case UART_IIR_CTS_RTS_DSR:
/* simply try again */
break;
case UART_IIR_XOFF:
default:
break;
}
} while (max_count--);
spin_unlock(&up->port.lock);
tty_flip_buffer_push(&up->port.state->port);
up->port_activity = jiffies;
return ret;
}
static unsigned int serial_omap_tx_empty(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
unsigned int ret = 0;
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
unsigned int ret = 0;
status = check_modem_status(up);
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char mcr = 0, old_mcr, lcr;
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
old_mcr = serial_in(up, UART_MCR);
old_mcr &= ~(UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_OUT1 |
UART_MCR_DTR | UART_MCR_RTS);
up->mcr = old_mcr | mcr;
serial_out(up, UART_MCR, up->mcr);
/* Turn off autoRTS if RTS is lowered; restore autoRTS if RTS raised */
lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
up->efr |= UART_EFR_RTS;
else
up->efr &= ~UART_EFR_RTS;
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, lcr);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
up->name, up);
if (retval)
return retval;
/* Optional wake-up IRQ */
if (up->wakeirq) {
retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq);
if (retval) {
free_irq(up->port.irq, up);
return retval;
}
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_omap_clear_fifos(up);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
up->msr_saved_flags = 0;
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
up->wer = OMAP_UART_WER_MOD_WKUP;
if (up->features & OMAP_UART_WER_HAS_TX_WAKEUP)
up->wer |= OMAP_UART_TX_WAKEUP_EN;
serial_out(up, UART_OMAP_WER, up->wer);
up->port_activity = jiffies;
return 0;
}
static void serial_omap_shutdown(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_omap_clear_fifos(up);
/*
* Read data port to reset things, and then free the irq
*/
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
pm_runtime_put_sync(up->dev);
free_irq(up->port.irq, up);
dev_pm_clear_wake_irq(up->dev);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
{
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
qos_work);
cpu_latency_qos_update_request(&up->pm_qos_request, up->latency);
}
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
unsigned long flags;
unsigned int baud, quot;
cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
if (termios->c_cflag & CMSPAR)
cval |= UART_LCR_SPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
/* calculate wakeup latency constraint */
up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8);
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
up->dll = quot & 0xff;
up->dlh = quot >> 8;
up->mdr1 = UART_OMAP_MDR1_DISABLE;
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* Modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval;
up->scr = 0;
/* FIFOs and DMA Settings */
/* FCR can be changed only when the
* baud clock is not running
* DLL_REG and DLH_REG set to 0.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_DLL, 0);
serial_out(up, UART_DLM, 0);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR) & ~UART_EFR_ECB;
up->efr &= ~UART_EFR_SCD;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR) & ~UART_MCR_TCRTLR;
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
/*
* NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
* sets Enables the granularity of 1 for TRIGGER RX
* level. Along with setting RX FIFO trigger level
* to 1 (as noted below, 16 characters) and TLR[3:0]
* to zero this will result RX FIFO threshold level
* to 1 character, instead of 16 as noted in comment
* below.
*/
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 32 spaces
*/
up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
up->fcr &= ~OMAP_UART_FCR_TX_FIFO_TRIG_MASK;
up->fcr |= UART_FCR6_R_TRIGGER_16 | UART_FCR6_T_TRIGGER_24 |
UART_FCR_ENABLE_FIFO;
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_OMAP_SCR, up->scr);
/* Reset UART_MCR_TCRTLR: this must be done with the EFR_ECB bit set */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
/* Protocol, Baud Rate, and Interrupt Settings */
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_DLL, up->dll); /* LS of divisor */
serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
if (!serial_omap_baud_is_mode16(port, baud))
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Configure flow control */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/* XON1/XOFF1 accessible mode B, TCRTLR=0, ECB=0 */
serial_out(up, UART_XON1, termios->c_cc[VSTART]);
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* Enable access to TCR/TLR */
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
up->efr |= UART_EFR_CTS;
} else {
/* Disable AUTORTS and AUTOCTS */
up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS);
}
if (up->port.flags & UPF_SOFT_FLOW) {
/* clear SW control mode bits */
up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
* Enable XON/XOFF flow control on input.
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXON)
up->efr |= OMAP_UART_SW_RX;
/*
* IXOFF Flag:
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXOFF) {
up->port.status |= UPSTAT_AUTOXOFF;
up->efr |= OMAP_UART_SW_TX;
}
/*
* IXANY Flag:
* Enable any character to restart output.
* Operation resumes after receiving any
* character after recognition of the XOFF character
*/
if (termios->c_iflag & IXANY)
up->mcr |= UART_MCR_XONANY;
else
up->mcr &= ~UART_MCR_XONANY;
}
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char efr;
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
}
static void serial_omap_release_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_release_port+\n");
}
static int serial_omap_request_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_request_port+\n");
return 0;
}
static void serial_omap_config_port(struct uart_port *port, int flags)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
up->port.line);
up->port.type = PORT_OMAP;
up->port.flags |= UPF_SOFT_FLOW | UPF_HARD_FLOW;
}
static int
serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
dev_dbg(port->dev, "serial_omap_verify_port+\n");
return -EINVAL;
}
static const char *
serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
if (msr & UART_MSR_CTS)
break;
udelay(1);
}
}
}
#ifdef CONFIG_CONSOLE_POLL
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int status;
status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR)) {
status = NO_POLL_CHAR;
goto out;
}
status = serial_in(up, UART_RX);
out:
return status;
}
#endif /* CONFIG_CONSOLE_POLL */
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
#ifdef CONFIG_SERIAL_EARLYCON
static unsigned int omap_serial_early_in(struct uart_port *port, int offset)
{
offset <<= port->regshift;
return readw(port->membase + offset);
}
static void omap_serial_early_out(struct uart_port *port, int offset,
int value)
{
offset <<= port->regshift;
writew(value, port->membase + offset);
}
static void omap_serial_early_putc(struct uart_port *port, unsigned char c)
{
unsigned int status;
for (;;) {
status = omap_serial_early_in(port, UART_LSR);
if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
omap_serial_early_out(port, UART_TX, c);
}
static void early_omap_serial_write(struct console *console, const char *s,
unsigned int count)
{
struct earlycon_device *device = console->data;
struct uart_port *port = &device->port;
uart_console_write(port, s, count, omap_serial_early_putc);
}
static int __init early_omap_serial_setup(struct earlycon_device *device,
const char *options)
{
struct uart_port *port = &device->port;
if (!(device->port.membase || device->port.iobase))
return -ENODEV;
port->regshift = 2;
device->con->write = early_omap_serial_write;
return 0;
}
OF_EARLYCON_DECLARE(omapserial, "ti,omap2-uart", early_omap_serial_setup);
OF_EARLYCON_DECLARE(omapserial, "ti,omap3-uart", early_omap_serial_setup);
OF_EARLYCON_DECLARE(omapserial, "ti,omap4-uart", early_omap_serial_setup);
#endif /* CONFIG_SERIAL_EARLYCON */
static struct uart_omap_port *serial_omap_console_ports[OMAP_MAX_HSUART_PORTS];
static struct uart_driver serial_omap_reg;
static void serial_omap_console_putchar(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = to_uart_omap_port(port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static void
serial_omap_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_omap_port *up = serial_omap_console_ports[co->index];
unsigned long flags;
unsigned int ier;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
uart_console_write(&up->port, s, count, serial_omap_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
/*
* The receive handling will happen properly because the
* receive ready bit will still be set; it is not cleared
* on read. However, modem control will not, we must
* call it if we have saved something in the saved flags
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
check_modem_status(up);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static int __init
serial_omap_console_setup(struct console *co, char *options)
{
struct uart_omap_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (serial_omap_console_ports[co->index] == NULL)
return -ENODEV;
up = serial_omap_console_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_omap_console = {
.name = OMAP_SERIAL_NAME,
.write = serial_omap_console_write,
.device = uart_console_device,
.setup = serial_omap_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_omap_reg,
};
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
#else
#define OMAP_CONSOLE NULL
static inline void serial_omap_add_console_port(struct uart_omap_port *up)
{}
#endif
/* Enable or disable the rs485 support */
static int
serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
int val;
/* Disable interrupts from this port */
mode = up->ier;
up->ier = 0;
serial_out(up, UART_IER, 0);
/* enable / disable rts */
val = (rs485->flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
val = (rs485->flags & val) ? 1 : 0;
gpiod_set_value(up->rts_gpiod, val);
/* Enable interrupts */
up->ier = mode;
serial_out(up, UART_IER, up->ier);
/* If RS-485 is disabled, make sure the THR interrupt is fired when
* TX FIFO is below the trigger level.
*/
if (!(rs485->flags & SER_RS485_ENABLED) &&
(up->scr & OMAP_UART_SCR_TX_EMPTY)) {
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
}
return 0;
}
static const struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
.get_mctrl = serial_omap_get_mctrl,
.stop_tx = serial_omap_stop_tx,
.start_tx = serial_omap_start_tx,
.throttle = serial_omap_throttle,
.unthrottle = serial_omap_unthrottle,
.stop_rx = serial_omap_stop_rx,
.enable_ms = serial_omap_enable_ms,
.break_ctl = serial_omap_break_ctl,
.startup = serial_omap_startup,
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
#endif
};
static struct uart_driver serial_omap_reg = {
.owner = THIS_MODULE,
.driver_name = "OMAP-SERIAL",
.dev_name = OMAP_SERIAL_NAME,
.nr = OMAP_MAX_HSUART_PORTS,
.cons = OMAP_CONSOLE,
};
#ifdef CONFIG_PM_SLEEP
static int serial_omap_prepare(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
up->is_suspending = true;
return 0;
}
static void serial_omap_complete(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
up->is_suspending = false;
}
static int serial_omap_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
uart_suspend_port(&serial_omap_reg, &up->port);
flush_work(&up->qos_work);
if (device_may_wakeup(dev))
serial_omap_enable_wakeup(up, true);
else
serial_omap_enable_wakeup(up, false);
return 0;
}
static int serial_omap_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
serial_omap_enable_wakeup(up, false);
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
#else
#define serial_omap_prepare NULL
#define serial_omap_complete NULL
#endif /* CONFIG_PM_SLEEP */
static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
{
u32 mvr, scheme;
u16 revision, major, minor;
mvr = readl(up->port.membase + (UART_OMAP_MVER << up->port.regshift));
/* Check revision register scheme */
scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
switch (scheme) {
case 0: /* Legacy Scheme: OMAP2/3 */
/* MINOR_REV[0:4], MAJOR_REV[4:7] */
major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >>
OMAP_UART_LEGACY_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK);
break;
case 1:
/* New Scheme: OMAP4+ */
/* MINOR_REV[0:5], MAJOR_REV[8:10] */
major = (mvr & OMAP_UART_MVR_MAJ_MASK) >>
OMAP_UART_MVR_MAJ_SHIFT;
minor = (mvr & OMAP_UART_MVR_MIN_MASK);
break;
default:
dev_warn(up->dev,
"Unknown %s revision, defaulting to highest\n",
up->name);
/* highest possible revision */
major = 0xff;
minor = 0xff;
}
/* normalize revision for the driver */
revision = UART_BUILD_REVISION(major, minor);
switch (revision) {
case OMAP_UART_REV_46:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
break;
case OMAP_UART_REV_52:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
case OMAP_UART_REV_63:
up->errata |= UART_ERRATA_i202_MDR1_ACCESS;
up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
default:
break;
}
}
static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
{
struct omap_uart_port_info *omap_up_info;
omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
if (!omap_up_info)
return NULL; /* out of memory */
of_property_read_u32(dev->of_node, "clock-frequency",
&omap_up_info->uartclk);
omap_up_info->flags = UPF_BOOT_AUTOCONF;
return omap_up_info;
}
static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device *dev)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
struct device_node *np = dev->of_node;
enum gpiod_flags gflags;
int ret;
rs485conf->flags = 0;
up->rts_gpiod = NULL;
if (!np)
return 0;
ret = uart_get_rs485_mode(&up->port);
if (ret)
return ret;
if (of_property_read_bool(np, "rs485-rts-active-high")) {
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
} else {
rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
}
/* check for tx enable gpio */
gflags = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ?
GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
up->rts_gpiod = devm_gpiod_get_optional(dev, "rts", gflags);
if (IS_ERR(up->rts_gpiod)) {
ret = PTR_ERR(up->rts_gpiod);
if (ret == -EPROBE_DEFER)
return ret;
up->rts_gpiod = NULL;
up->port.rs485_supported = (const struct serial_rs485) { };
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_err(dev, "disabling RS-485 (rts-gpio missing in device tree)\n");
memset(rs485conf, 0, sizeof(*rs485conf));
}
} else {
gpiod_set_consumer_name(up->rts_gpiod, "omap-serial");
}
return 0;
}
static const struct serial_rs485 serial_omap_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
struct uart_omap_port *up;
struct resource *mem;
void __iomem *base;
int uartirq = 0;
int wakeirq = 0;
int ret;
/* The optional wakeirq may be specified in the board dts file */
if (pdev->dev.of_node) {
uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!uartirq)
return -EPROBE_DEFER;
wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
omap_up_info = of_get_uart_port_info(&pdev->dev);
pdev->dev.platform_data = omap_up_info;
} else {
uartirq = platform_get_irq(pdev, 0);
if (uartirq < 0)
return -EPROBE_DEFER;
}
up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
if (!up)
return -ENOMEM;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
up->dev = &pdev->dev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_OMAP_CONSOLE);
if (pdev->dev.of_node)
ret = of_alias_get_id(pdev->dev.of_node, "serial");
else
ret = pdev->id;
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
ret);
goto err_port_line;
}
up->port.line = ret;
if (up->port.line >= OMAP_MAX_HSUART_PORTS) {
dev_err(&pdev->dev, "uart ID %d > MAX %d.\n", up->port.line,
OMAP_MAX_HSUART_PORTS);
ret = -ENXIO;
goto err_port_line;
}
up->wakeirq = wakeirq;
if (!up->wakeirq)
dev_info(up->port.dev, "no wakeirq for uart%d\n",
up->port.line);
ret = serial_omap_probe_rs485(up, &pdev->dev);
if (ret < 0)
goto err_rs485;
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
up->port.membase = base;
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
"No clock speed specified: using default: %d\n",
DEFAULT_CLK_SPEED);
}
up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
if (omap_up_info->autosuspend_timeout == 0)
omap_up_info->autosuspend_timeout = -1;
device_init_wakeup(up->dev, true);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(up);
ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto err_add_port;
return 0;
err_add_port:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
return ret;
}
static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
pm_runtime_get_sync(up->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
return 0;
}
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
* causes UART to corrupt data.
*
* Need a delay =
* 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
* give 10 times as much
*/
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
{
u8 timeout = 255;
serial_out(up, UART_OMAP_MDR1, mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
/*
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
* TX_FIFO_E bit is 1.
*/
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
(UART_LSR_THRE | UART_LSR_DR))) {
timeout--;
if (!timeout) {
/* Should *never* happen. we warn and carry on */
dev_crit(up->dev, "Errata i202: timedout %x\n",
serial_in(up, UART_LSR));
break;
}
udelay(1);
}
}
#ifdef CONFIG_PM
static void serial_omap_restore_context(struct uart_omap_port *up)
{
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
else
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, 0x0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_DLL, up->dll);
serial_out(up, UART_DLM, up->dlh);
serial_out(up, UART_LCR, 0x0); /* Operational mode */
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
serial_out(up, UART_OMAP_SCR, up->scr);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, up->lcr);
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
serial_out(up, UART_OMAP_WER, up->wer);
}
static int serial_omap_runtime_suspend(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
if (!up)
return -EINVAL;
/*
* When using 'no_console_suspend', the console UART must not be
* suspended. Since driver suspend is managed by runtime suspend,
* preventing runtime suspend (by returning error) will keep device
* active during suspend.
*/
if (up->is_suspending && !console_suspend_enabled &&
uart_console(&up->port))
return -EBUSY;
up->context_loss_cnt = serial_omap_get_context_loss_count(up);
serial_omap_enable_wakeup(up, true);
up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&up->qos_work);
return 0;
}
static int serial_omap_runtime_resume(struct device *dev)
{
struct uart_omap_port *up = dev_get_drvdata(dev);
int loss_cnt = serial_omap_get_context_loss_count(up);
serial_omap_enable_wakeup(up, false);
if (loss_cnt < 0) {
dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n",
loss_cnt);
serial_omap_restore_context(up);
} else if (up->context_loss_cnt != loss_cnt) {
serial_omap_restore_context(up);
}
up->latency = up->calc_latency;
schedule_work(&up->qos_work);
return 0;
}
#endif
static const struct dev_pm_ops serial_omap_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
serial_omap_runtime_resume, NULL)
.prepare = serial_omap_prepare,
.complete = serial_omap_complete,
};
#if defined(CONFIG_OF)
static const struct of_device_id omap_serial_of_match[] = {
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
{ .compatible = "ti,omap4-uart" },
{},
};
MODULE_DEVICE_TABLE(of, omap_serial_of_match);
#endif
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
.driver = {
.name = OMAP_SERIAL_DRIVER_NAME,
.pm = &serial_omap_dev_pm_ops,
.of_match_table = of_match_ptr(omap_serial_of_match),
},
};
static int __init serial_omap_init(void)
{
int ret;
ret = uart_register_driver(&serial_omap_reg);
if (ret != 0)
return ret;
ret = platform_driver_register(&serial_omap_driver);
if (ret != 0)
uart_unregister_driver(&serial_omap_reg);
return ret;
}
static void __exit serial_omap_exit(void)
{
platform_driver_unregister(&serial_omap_driver);
uart_unregister_driver(&serial_omap_reg);
}
module_init(serial_omap_init);
module_exit(serial_omap_exit);
MODULE_DESCRIPTION("OMAP High Speed UART driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
| linux-master | drivers/tty/serial/omap-serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LiteUART serial controller (LiteX) Driver
*
* Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
*/
#include <linux/bits.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/litex.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/tty_flip.h>
#include <linux/xarray.h>
/*
* CSRs definitions (base address offsets + width)
*
* The definitions below are true for LiteX SoC configured for 8-bit CSR Bus,
* 32-bit aligned.
*
* Supporting other configurations might require new definitions or a more
* generic way of indexing the LiteX CSRs.
*
* For more details on how CSRs are defined and handled in LiteX, see comments
* in the LiteX SoC Driver: drivers/soc/litex/litex_soc_ctrl.c
*/
#define OFF_RXTX 0x00
#define OFF_TXFULL 0x04
#define OFF_RXEMPTY 0x08
#define OFF_EV_STATUS 0x0c
#define OFF_EV_PENDING 0x10
#define OFF_EV_ENABLE 0x14
/* events */
#define EV_TX BIT(0)
#define EV_RX BIT(1)
struct liteuart_port {
struct uart_port port;
struct timer_list timer;
u8 irq_reg;
};
#define to_liteuart_port(port) container_of(port, struct liteuart_port, port)
static DEFINE_XARRAY_FLAGS(liteuart_array, XA_FLAGS_ALLOC);
#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
static struct console liteuart_console;
#endif
static struct uart_driver liteuart_driver = {
.owner = THIS_MODULE,
.driver_name = KBUILD_MODNAME,
.dev_name = "ttyLXU",
.major = 0,
.minor = 0,
.nr = CONFIG_SERIAL_LITEUART_MAX_PORTS,
#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
.cons = &liteuart_console,
#endif
};
static void liteuart_update_irq_reg(struct uart_port *port, bool set, u8 mask)
{
struct liteuart_port *uart = to_liteuart_port(port);
if (set)
uart->irq_reg |= mask;
else
uart->irq_reg &= ~mask;
if (port->irq)
litex_write8(port->membase + OFF_EV_ENABLE, uart->irq_reg);
}
static void liteuart_stop_tx(struct uart_port *port)
{
liteuart_update_irq_reg(port, false, EV_TX);
}
static void liteuart_start_tx(struct uart_port *port)
{
liteuart_update_irq_reg(port, true, EV_TX);
}
static void liteuart_stop_rx(struct uart_port *port)
{
struct liteuart_port *uart = to_liteuart_port(port);
/* just delete timer */
del_timer(&uart->timer);
}
static void liteuart_rx_chars(struct uart_port *port)
{
unsigned char __iomem *membase = port->membase;
u8 ch;
while (!litex_read8(membase + OFF_RXEMPTY)) {
ch = litex_read8(membase + OFF_RXTX);
port->icount.rx++;
/* necessary for RXEMPTY to refresh its value */
litex_write8(membase + OFF_EV_PENDING, EV_RX);
/* no overflow bits in status */
if (!(uart_handle_sysrq_char(port, ch)))
uart_insert_char(port, 1, 0, ch, TTY_NORMAL);
}
tty_flip_buffer_push(&port->state->port);
}
static void liteuart_tx_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx(port, ch,
!litex_read8(port->membase + OFF_TXFULL),
litex_write8(port->membase + OFF_RXTX, ch));
}
static irqreturn_t liteuart_interrupt(int irq, void *data)
{
struct liteuart_port *uart = data;
struct uart_port *port = &uart->port;
unsigned long flags;
u8 isr;
/*
* if polling, the context would be "in_serving_softirq", so use
* irq[save|restore] spin_lock variants to cover all possibilities
*/
spin_lock_irqsave(&port->lock, flags);
isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
if (isr & EV_RX)
liteuart_rx_chars(port);
if (isr & EV_TX)
liteuart_tx_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_RETVAL(isr);
}
static void liteuart_timer(struct timer_list *t)
{
struct liteuart_port *uart = from_timer(uart, t, timer);
struct uart_port *port = &uart->port;
liteuart_interrupt(0, port);
mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
}
static unsigned int liteuart_tx_empty(struct uart_port *port)
{
/* not really tx empty, just checking if tx is not full */
if (!litex_read8(port->membase + OFF_TXFULL))
return TIOCSER_TEMT;
return 0;
}
static void liteuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
/* modem control register is not present in LiteUART */
}
static unsigned int liteuart_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static int liteuart_startup(struct uart_port *port)
{
struct liteuart_port *uart = to_liteuart_port(port);
unsigned long flags;
int ret;
if (port->irq) {
ret = request_irq(port->irq, liteuart_interrupt, 0,
KBUILD_MODNAME, uart);
if (ret) {
dev_warn(port->dev,
"line %d irq %d failed: switch to polling\n",
port->line, port->irq);
port->irq = 0;
}
}
spin_lock_irqsave(&port->lock, flags);
/* only enabling rx irqs during startup */
liteuart_update_irq_reg(port, true, EV_RX);
spin_unlock_irqrestore(&port->lock, flags);
if (!port->irq) {
timer_setup(&uart->timer, liteuart_timer, 0);
mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
}
return 0;
}
static void liteuart_shutdown(struct uart_port *port)
{
struct liteuart_port *uart = to_liteuart_port(port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
spin_unlock_irqrestore(&port->lock, flags);
if (port->irq)
free_irq(port->irq, port);
else
del_timer_sync(&uart->timer);
}
static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
const struct ktermios *old)
{
unsigned int baud;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* update baudrate */
baud = uart_get_baud_rate(port, new, old, 0, 460800);
uart_update_timeout(port, new->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *liteuart_type(struct uart_port *port)
{
return "liteuart";
}
static void liteuart_config_port(struct uart_port *port, int flags)
{
/*
* Driver core for serial ports forces a non-zero value for port type.
* Write an arbitrary value here to accommodate the serial core driver,
* as ID part of UAPI is redundant.
*/
port->type = 1;
}
static int liteuart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (port->type != PORT_UNKNOWN && ser->type != 1)
return -EINVAL;
return 0;
}
static const struct uart_ops liteuart_ops = {
.tx_empty = liteuart_tx_empty,
.set_mctrl = liteuart_set_mctrl,
.get_mctrl = liteuart_get_mctrl,
.stop_tx = liteuart_stop_tx,
.start_tx = liteuart_start_tx,
.stop_rx = liteuart_stop_rx,
.startup = liteuart_startup,
.shutdown = liteuart_shutdown,
.set_termios = liteuart_set_termios,
.type = liteuart_type,
.config_port = liteuart_config_port,
.verify_port = liteuart_verify_port,
};
static int liteuart_probe(struct platform_device *pdev)
{
struct liteuart_port *uart;
struct uart_port *port;
struct xa_limit limit;
int dev_id, ret;
uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL);
if (!uart)
return -ENOMEM;
port = &uart->port;
/* get membase */
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
ret = platform_get_irq_optional(pdev, 0);
if (ret < 0 && ret != -ENXIO)
return ret;
if (ret > 0)
port->irq = ret;
/* look for aliases; auto-enumerate for free index if not found */
dev_id = of_alias_get_id(pdev->dev.of_node, "serial");
if (dev_id < 0)
limit = XA_LIMIT(0, CONFIG_SERIAL_LITEUART_MAX_PORTS);
else
limit = XA_LIMIT(dev_id, dev_id);
ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL);
if (ret)
return ret;
/* values not from device tree */
port->dev = &pdev->dev;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &liteuart_ops;
port->fifosize = 16;
port->type = PORT_UNKNOWN;
port->line = dev_id;
spin_lock_init(&port->lock);
platform_set_drvdata(pdev, port);
ret = uart_add_one_port(&liteuart_driver, &uart->port);
if (ret)
goto err_erase_id;
return 0;
err_erase_id:
xa_erase(&liteuart_array, dev_id);
return ret;
}
static int liteuart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
unsigned int line = port->line;
uart_remove_one_port(&liteuart_driver, port);
xa_erase(&liteuart_array, line);
return 0;
}
static const struct of_device_id liteuart_of_match[] = {
{ .compatible = "litex,liteuart" },
{}
};
MODULE_DEVICE_TABLE(of, liteuart_of_match);
static struct platform_driver liteuart_platform_driver = {
.probe = liteuart_probe,
.remove = liteuart_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = liteuart_of_match,
},
};
#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
static void liteuart_putchar(struct uart_port *port, unsigned char ch)
{
while (litex_read8(port->membase + OFF_TXFULL))
cpu_relax();
litex_write8(port->membase + OFF_RXTX, ch);
}
static void liteuart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct liteuart_port *uart;
struct uart_port *port;
unsigned long flags;
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
port = &uart->port;
spin_lock_irqsave(&port->lock, flags);
uart_console_write(port, s, count, liteuart_putchar);
spin_unlock_irqrestore(&port->lock, flags);
}
static int liteuart_console_setup(struct console *co, char *options)
{
struct liteuart_port *uart;
struct uart_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
if (!uart)
return -ENODEV;
port = &uart->port;
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console liteuart_console = {
.name = KBUILD_MODNAME,
.write = liteuart_console_write,
.device = uart_console_device,
.setup = liteuart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &liteuart_driver,
};
static int __init liteuart_console_init(void)
{
register_console(&liteuart_console);
return 0;
}
console_initcall(liteuart_console_init);
static void early_liteuart_write(struct console *console, const char *s,
unsigned int count)
{
struct earlycon_device *device = console->data;
struct uart_port *port = &device->port;
uart_console_write(port, s, count, liteuart_putchar);
}
static int __init early_liteuart_setup(struct earlycon_device *device,
const char *options)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = early_liteuart_write;
return 0;
}
OF_EARLYCON_DECLARE(liteuart, "litex,liteuart", early_liteuart_setup);
#endif /* CONFIG_SERIAL_LITEUART_CONSOLE */
static int __init liteuart_init(void)
{
int res;
res = uart_register_driver(&liteuart_driver);
if (res)
return res;
res = platform_driver_register(&liteuart_platform_driver);
if (res)
uart_unregister_driver(&liteuart_driver);
return res;
}
static void __exit liteuart_exit(void)
{
platform_driver_unregister(&liteuart_platform_driver);
uart_unregister_driver(&liteuart_driver);
}
module_init(liteuart_init);
module_exit(liteuart_exit);
MODULE_AUTHOR("Antmicro <www.antmicro.com>");
MODULE_DESCRIPTION("LiteUART serial driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:liteuart");
| linux-master | drivers/tty/serial/liteuart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
* Copyright (C) 2012-2016 Alexander Shiyan <[email protected]>
*
* Based on max3100.c, by Christian Pellegrin <[email protected]>
* Based on max3110.c, by Feng Tang <[email protected]>
* Based on max3107.c, by Aavamobile
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
#define MAX310X_UART_NRMAX 16
/* MAX310X register definitions */
#define MAX310X_RHR_REG (0x00) /* RX FIFO */
#define MAX310X_THR_REG (0x00) /* TX FIFO */
#define MAX310X_IRQEN_REG (0x01) /* IRQ enable */
#define MAX310X_IRQSTS_REG (0x02) /* IRQ status */
#define MAX310X_LSR_IRQEN_REG (0x03) /* LSR IRQ enable */
#define MAX310X_LSR_IRQSTS_REG (0x04) /* LSR IRQ status */
#define MAX310X_REG_05 (0x05)
#define MAX310X_SPCHR_IRQEN_REG MAX310X_REG_05 /* Special char IRQ en */
#define MAX310X_SPCHR_IRQSTS_REG (0x06) /* Special char IRQ status */
#define MAX310X_STS_IRQEN_REG (0x07) /* Status IRQ enable */
#define MAX310X_STS_IRQSTS_REG (0x08) /* Status IRQ status */
#define MAX310X_MODE1_REG (0x09) /* MODE1 */
#define MAX310X_MODE2_REG (0x0a) /* MODE2 */
#define MAX310X_LCR_REG (0x0b) /* LCR */
#define MAX310X_RXTO_REG (0x0c) /* RX timeout */
#define MAX310X_HDPIXDELAY_REG (0x0d) /* Auto transceiver delays */
#define MAX310X_IRDA_REG (0x0e) /* IRDA settings */
#define MAX310X_FLOWLVL_REG (0x0f) /* Flow control levels */
#define MAX310X_FIFOTRIGLVL_REG (0x10) /* FIFO IRQ trigger levels */
#define MAX310X_TXFIFOLVL_REG (0x11) /* TX FIFO level */
#define MAX310X_RXFIFOLVL_REG (0x12) /* RX FIFO level */
#define MAX310X_FLOWCTRL_REG (0x13) /* Flow control */
#define MAX310X_XON1_REG (0x14) /* XON1 character */
#define MAX310X_XON2_REG (0x15) /* XON2 character */
#define MAX310X_XOFF1_REG (0x16) /* XOFF1 character */
#define MAX310X_XOFF2_REG (0x17) /* XOFF2 character */
#define MAX310X_GPIOCFG_REG (0x18) /* GPIO config */
#define MAX310X_GPIODATA_REG (0x19) /* GPIO data */
#define MAX310X_PLLCFG_REG (0x1a) /* PLL config */
#define MAX310X_BRGCFG_REG (0x1b) /* Baud rate generator conf */
#define MAX310X_BRGDIVLSB_REG (0x1c) /* Baud rate divisor LSB */
#define MAX310X_BRGDIVMSB_REG (0x1d) /* Baud rate divisor MSB */
#define MAX310X_CLKSRC_REG (0x1e) /* Clock source */
#define MAX310X_REG_1F (0x1f)
#define MAX310X_REVID_REG MAX310X_REG_1F /* Revision ID */
#define MAX310X_GLOBALIRQ_REG MAX310X_REG_1F /* Global IRQ (RO) */
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
#define MAX310X_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
#define MAX310X_IRQ_STS_BIT (1 << 2) /* Status interrupt */
#define MAX310X_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */
#define MAX310X_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */
#define MAX310X_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */
#define MAX310X_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */
#define MAX310X_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */
/* LSR register bits */
#define MAX310X_LSR_RXTO_BIT (1 << 0) /* RX timeout */
#define MAX310X_LSR_RXOVR_BIT (1 << 1) /* RX overrun */
#define MAX310X_LSR_RXPAR_BIT (1 << 2) /* RX parity error */
#define MAX310X_LSR_FRERR_BIT (1 << 3) /* Frame error */
#define MAX310X_LSR_RXBRK_BIT (1 << 4) /* RX break */
#define MAX310X_LSR_RXNOISE_BIT (1 << 5) /* RX noise */
#define MAX310X_LSR_CTS_BIT (1 << 7) /* CTS pin state */
/* Special character register bits */
#define MAX310X_SPCHR_XON1_BIT (1 << 0) /* XON1 character */
#define MAX310X_SPCHR_XON2_BIT (1 << 1) /* XON2 character */
#define MAX310X_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */
#define MAX310X_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */
#define MAX310X_SPCHR_BREAK_BIT (1 << 4) /* RX break */
#define MAX310X_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */
/* Status register bits */
#define MAX310X_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */
#define MAX310X_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */
#define MAX310X_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */
#define MAX310X_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */
#define MAX310X_STS_CLKREADY_BIT (1 << 5) /* Clock ready */
#define MAX310X_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */
/* MODE1 register bits */
#define MAX310X_MODE1_RXDIS_BIT (1 << 0) /* RX disable */
#define MAX310X_MODE1_TXDIS_BIT (1 << 1) /* TX disable */
#define MAX310X_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */
#define MAX310X_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */
#define MAX310X_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */
#define MAX310X_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */
#define MAX310X_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */
#define MAX310X_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */
/* MODE2 register bits */
#define MAX310X_MODE2_RST_BIT (1 << 0) /* Chip reset */
#define MAX310X_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */
#define MAX310X_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */
#define MAX310X_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */
#define MAX310X_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */
#define MAX310X_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */
#define MAX310X_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */
#define MAX310X_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */
/* LCR register bits */
#define MAX310X_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
#define MAX310X_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
*
* Word length bits table:
* 00 -> 5 bit words
* 01 -> 6 bit words
* 10 -> 7 bit words
* 11 -> 8 bit words
*/
#define MAX310X_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
*
* STOP length bit table:
* 0 -> 1 stop bit
* 1 -> 1-1.5 stop bits if
* word length is 5,
* 2 stop bits otherwise
*/
#define MAX310X_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
#define MAX310X_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
#define MAX310X_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
#define MAX310X_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
#define MAX310X_LCR_RTS_BIT (1 << 7) /* RTS pin control */
/* IRDA register bits */
#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
/* Flow control trigger level register masks */
#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
#define MAX310X_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
#define MAX310X_FLOWLVL_HALT(words) ((words / 8) & 0x0f)
#define MAX310X_FLOWLVL_RES(words) (((words / 8) & 0x0f) << 4)
/* FIFO interrupt trigger level register masks */
#define MAX310X_FIFOTRIGLVL_TX_MASK (0x0f) /* TX FIFO trigger level */
#define MAX310X_FIFOTRIGLVL_RX_MASK (0xf0) /* RX FIFO trigger level */
#define MAX310X_FIFOTRIGLVL_TX(words) ((words / 8) & 0x0f)
#define MAX310X_FIFOTRIGLVL_RX(words) (((words / 8) & 0x0f) << 4)
/* Flow control register bits */
#define MAX310X_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */
#define MAX310X_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */
#define MAX310X_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
* are used in conjunction with
* XOFF2 for definition of
* special character */
#define MAX310X_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
#define MAX310X_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
#define MAX310X_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
*
* SWFLOW bits 1 & 0 table:
* 00 -> no transmitter flow
* control
* 01 -> receiver compares
* XON2 and XOFF2
* and controls
* transmitter
* 10 -> receiver compares
* XON1 and XOFF1
* and controls
* transmitter
* 11 -> receiver compares
* XON1, XON2, XOFF1 and
* XOFF2 and controls
* transmitter
*/
#define MAX310X_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */
#define MAX310X_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3
*
* SWFLOW bits 3 & 2 table:
* 00 -> no received flow
* control
* 01 -> transmitter generates
* XON2 and XOFF2
* 10 -> transmitter generates
* XON1 and XOFF1
* 11 -> transmitter generates
* XON1, XON2, XOFF1 and
* XOFF2
*/
/* PLL configuration register masks */
#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */
#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */
/* Baud rate generator configuration register bits */
#define MAX310X_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
#define MAX310X_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */
/* Clock source register bits */
#define MAX310X_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */
#define MAX310X_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */
#define MAX310X_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */
#define MAX310X_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
#define MAX310X_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
/* Global commands */
#define MAX310X_EXTREG_ENBL (0xce)
#define MAX310X_EXTREG_DSBL (0xcd)
/* Misc definitions */
#define MAX310X_FIFO_SIZE (128)
#define MAX310x_REV_MASK (0xf8)
#define MAX310X_WRITE_BIT 0x80
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
/* MAX3109 specific */
#define MAX3109_REV_ID (0xc0)
/* MAX14830 specific */
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
struct max310x_if_cfg {
int (*extended_reg_enable)(struct device *dev, bool enable);
unsigned int rev_id_reg;
};
struct max310x_devtype {
struct {
unsigned short min;
unsigned short max;
} slave_addr;
char name[9];
int nr;
u8 mode1;
int (*detect)(struct device *);
void (*power)(struct uart_port *, int);
};
struct max310x_one {
struct uart_port port;
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
struct regmap *regmap;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
container_of(_port, struct max310x_one, port)
struct max310x_port {
const struct max310x_devtype *devtype;
const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
#endif
struct max310x_one p[];
};
static struct uart_driver max310x_uart = {
.owner = THIS_MODULE,
.driver_name = MAX310X_NAME,
.dev_name = "ttyMAX",
.major = MAX310X_MAJOR,
.minor = MAX310X_MINOR,
.nr = MAX310X_UART_NRMAX,
};
static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
struct max310x_one *one = to_max310x_port(port);
regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
struct max310x_one *one = to_max310x_port(port);
regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
if (ret)
return ret;
if (((val & MAX310x_REV_MASK) != MAX3107_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
return -ENODEV;
}
return 0;
}
static int max3108_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
/* MAX3108 have not REV ID register, we just check default value
* from clocksource register to make sure everything works.
*/
ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
if (ret)
return ret;
if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT)) {
dev_err(dev, "%s not present\n", s->devtype->name);
return -ENODEV;
}
return 0;
}
static int max3109_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
return -ENODEV;
}
return 0;
}
static void max310x_power(struct uart_port *port, int on)
{
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_FORCESLEEP_BIT,
on ? 0 : MAX310X_MODE1_FORCESLEEP_BIT);
if (on)
msleep(50);
}
static int max14830_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
return -ENODEV;
}
return 0;
}
static void max14830_power(struct uart_port *port, int on)
{
max310x_port_update(port, MAX310X_BRGCFG_REG,
MAX14830_BRGCFG_CLKDIS_BIT,
on ? 0 : MAX14830_BRGCFG_CLKDIS_BIT);
if (on)
msleep(50);
}
static const struct max310x_devtype max3107_devtype = {
.name = "MAX3107",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x2c,
.max = 0x2f,
},
};
static const struct max310x_devtype max3108_devtype = {
.name = "MAX3108",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static const struct max310x_devtype max3109_devtype = {
.name = "MAX3109",
.nr = 2,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static const struct max310x_devtype max14830_devtype = {
.name = "MAX14830",
.nr = 4,
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
case MAX310X_STS_IRQSTS_REG:
case MAX310X_TXFIFOLVL_REG:
case MAX310X_RXFIFOLVL_REG:
return false;
default:
break;
}
return true;
}
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
case MAX310X_STS_IRQSTS_REG:
case MAX310X_TXFIFOLVL_REG:
case MAX310X_RXFIFOLVL_REG:
case MAX310X_GPIODATA_REG:
case MAX310X_BRGDIVLSB_REG:
case MAX310X_REG_05:
case MAX310X_REG_1F:
return true;
default:
break;
}
return false;
}
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
case MAX310X_STS_IRQSTS_REG:
return true;
default:
break;
}
return false;
}
static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
{
return reg == MAX310X_RHR_REG;
}
static int max310x_set_baud(struct uart_port *port, int baud)
{
unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
/*
* Calculate the integer divisor first. Select a proper mode
* in case if the requested baud is too high for the pre-defined
* clocks frequency.
*/
div = port->uartclk / baud;
if (div < 8) {
/* Mode x4 */
c = 4;
mode = MAX310X_BRGCFG_4XMODE_BIT;
} else if (div < 16) {
/* Mode x2 */
c = 8;
mode = MAX310X_BRGCFG_2XMODE_BIT;
} else {
c = 16;
}
/* Calculate the divisor in accordance with the fraction coefficient */
div /= c;
F = c*baud;
/* Calculate the baud rate fraction */
if (div > 0)
frac = (16*(port->uartclk % F)) / F;
else
div = 1;
max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
/* Return the actual baud rate we just programmed */
return (16*port->uartclk) / (c*(16*div + frac));
}
static int max310x_update_best_err(unsigned long f, long *besterr)
{
/* Use baudrate 115200 for calculate error */
long err = f % (460800 * 16);
if ((*besterr < 0) || (*besterr > err)) {
*besterr = err;
return 0;
}
return 1;
}
static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
long besterr = -1;
unsigned long fdiv, fmul, bestfreq = freq;
/* First, update error without PLL */
max310x_update_best_err(freq, &besterr);
/* Try all possible PLL dividers */
for (div = 1; (div <= 63) && besterr; div++) {
fdiv = DIV_ROUND_CLOSEST(freq, div);
/* Try multiplier 6 */
fmul = fdiv * 6;
if ((fdiv >= 500000) && (fdiv <= 800000))
if (!max310x_update_best_err(fmul, &besterr)) {
pllcfg = (0 << 6) | div;
bestfreq = fmul;
}
/* Try multiplier 48 */
fmul = fdiv * 48;
if ((fdiv >= 850000) && (fdiv <= 1200000))
if (!max310x_update_best_err(fmul, &besterr)) {
pllcfg = (1 << 6) | div;
bestfreq = fmul;
}
/* Try multiplier 96 */
fmul = fdiv * 96;
if ((fdiv >= 425000) && (fdiv <= 1000000))
if (!max310x_update_best_err(fmul, &besterr)) {
pllcfg = (2 << 6) | div;
bestfreq = fmul;
}
/* Try multiplier 144 */
fmul = fdiv * 144;
if ((fdiv >= 390000) && (fdiv <= 667000))
if (!max310x_update_best_err(fmul, &besterr)) {
pllcfg = (3 << 6) | div;
bestfreq = fmul;
}
}
/* Configure clock source */
clksrc = MAX310X_CLKSRC_EXTCLK_BIT | (xtal ? MAX310X_CLKSRC_CRYST_BIT : 0);
/* Configure PLL */
if (pllcfg) {
clksrc |= MAX310X_CLKSRC_PLL_BIT;
regmap_write(s->regmap, MAX310X_PLLCFG_REG, pllcfg);
} else
clksrc |= MAX310X_CLKSRC_PLLBYP_BIT;
regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc);
/* Wait for crystal */
if (xtal) {
unsigned int val;
msleep(10);
regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
if (!(val & MAX310X_STS_CLKREADY_BIT)) {
dev_warn(dev, "clock is not stable yet\n");
}
}
return bestfreq;
}
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
{
struct max310x_one *one = to_max310x_port(port);
unsigned int sts, i;
u8 ch, flag;
if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) {
/* We are just reading, happily ignoring any error conditions.
* Break condition, parity checking, framing errors -- they
* are all ignored. That means that we can do a batch-read.
*
* There is a small opportunity for race if the RX FIFO
* overruns while we're reading the buffer; the datasheets says
* that the LSR register applies to the "current" character.
* That's also the reason why we cannot do batched reads when
* asked to check the individual statuses.
* */
sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
max310x_batch_read(port, one->rx_buf, rxlen);
port->icount.rx += rxlen;
flag = TTY_NORMAL;
sts &= port->read_status_mask;
if (sts & MAX310X_LSR_RXOVR_BIT) {
dev_warn_ratelimited(port->dev, "Hardware RX FIFO overrun\n");
port->icount.overrun++;
}
for (i = 0; i < (rxlen - 1); ++i)
uart_insert_char(port, sts, 0, one->rx_buf[i], flag);
/*
* Handle the overrun case for the last character only, since
* the RxFIFO overflow happens after it is pushed to the FIFO
* tail.
*/
uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT,
one->rx_buf[rxlen-1], flag);
} else {
if (unlikely(rxlen >= port->fifosize)) {
dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
port->icount.buf_overrun++;
/* Ensure sanity of RX level */
rxlen = port->fifosize;
}
while (rxlen--) {
ch = max310x_port_read(port, MAX310X_RHR_REG);
sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT |
MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT;
port->icount.rx++;
flag = TTY_NORMAL;
if (unlikely(sts)) {
if (sts & MAX310X_LSR_RXBRK_BIT) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (sts & MAX310X_LSR_RXPAR_BIT)
port->icount.parity++;
else if (sts & MAX310X_LSR_FRERR_BIT)
port->icount.frame++;
else if (sts & MAX310X_LSR_RXOVR_BIT)
port->icount.overrun++;
sts &= port->read_status_mask;
if (sts & MAX310X_LSR_RXBRK_BIT)
flag = TTY_BREAK;
else if (sts & MAX310X_LSR_RXPAR_BIT)
flag = TTY_PARITY;
else if (sts & MAX310X_LSR_FRERR_BIT)
flag = TTY_FRAME;
else if (sts & MAX310X_LSR_RXOVR_BIT)
flag = TTY_OVERRUN;
}
if (uart_handle_sysrq_char(port, ch))
continue;
if (sts & port->ignore_status_mask)
continue;
uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag);
}
}
tty_flip_buffer_push(&port->state->port);
}
static void max310x_handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
unsigned int txlen, to_send, until_end;
if (unlikely(port->x_char)) {
max310x_port_write(port, MAX310X_THR_REG, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
/* Get length of data pending in circular buffer */
to_send = uart_circ_chars_pending(xmit);
until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (likely(to_send)) {
/* Limit to size of TX FIFO */
txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
txlen = port->fifosize - txlen;
to_send = (to_send > txlen) ? txlen : to_send;
if (until_end < to_send) {
/* It's a circ buffer -- wrap around.
* We could do that in one SPI transaction, but meh. */
max310x_batch_write(port, xmit->buf + xmit->tail, until_end);
max310x_batch_write(port, xmit->buf, to_send - until_end);
} else {
max310x_batch_write(port, xmit->buf + xmit->tail, to_send);
}
uart_xmit_advance(port, to_send);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void max310x_start_tx(struct uart_port *port)
{
struct max310x_one *one = to_max310x_port(port);
schedule_work(&one->tx_work);
}
static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno)
{
struct uart_port *port = &s->p[portno].port;
irqreturn_t res = IRQ_NONE;
do {
unsigned int ists, lsr, rxlen;
/* Read IRQ status & RX FIFO level */
ists = max310x_port_read(port, MAX310X_IRQSTS_REG);
rxlen = max310x_port_read(port, MAX310X_RXFIFOLVL_REG);
if (!ists && !rxlen)
break;
res = IRQ_HANDLED;
if (ists & MAX310X_IRQ_CTS_BIT) {
lsr = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
uart_handle_cts_change(port, lsr & MAX310X_LSR_CTS_BIT);
}
if (rxlen)
max310x_handle_rx(port, rxlen);
if (ists & MAX310X_IRQ_TXEMPTY_BIT)
max310x_start_tx(port);
} while (1);
return res;
}
static irqreturn_t max310x_ist(int irq, void *dev_id)
{
struct max310x_port *s = (struct max310x_port *)dev_id;
bool handled = false;
if (s->devtype->nr > 1) {
do {
unsigned int val = ~0;
WARN_ON_ONCE(regmap_read(s->regmap,
MAX310X_GLOBALIRQ_REG, &val));
val = ((1 << s->devtype->nr) - 1) & ~val;
if (!val)
break;
if (max310x_port_irq(s, fls(val) - 1) == IRQ_HANDLED)
handled = true;
} while (1);
} else {
if (max310x_port_irq(s, 0) == IRQ_HANDLED)
handled = true;
}
return IRQ_RETVAL(handled);
}
static void max310x_tx_proc(struct work_struct *ws)
{
struct max310x_one *one = container_of(ws, struct max310x_one, tx_work);
max310x_handle_tx(&one->port);
}
static unsigned int max310x_tx_empty(struct uart_port *port)
{
u8 lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
return lvl ? 0 : TIOCSER_TEMT;
}
static unsigned int max310x_get_mctrl(struct uart_port *port)
{
/* DCD and DSR are not wired and CTS/RTS is handled automatically
* so just indicate DSR and CAR asserted
*/
return TIOCM_DSR | TIOCM_CAR;
}
static void max310x_md_proc(struct work_struct *ws)
{
struct max310x_one *one = container_of(ws, struct max310x_one, md_work);
max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_LOOPBACK_BIT,
(one->port.mctrl & TIOCM_LOOP) ?
MAX310X_MODE2_LOOPBACK_BIT : 0);
}
static void max310x_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct max310x_one *one = to_max310x_port(port);
schedule_work(&one->md_work);
}
static void max310x_break_ctl(struct uart_port *port, int break_state)
{
max310x_port_update(port, MAX310X_LCR_REG,
MAX310X_LCR_TXBREAK_BIT,
break_state ? MAX310X_LCR_TXBREAK_BIT : 0);
}
static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int lcr = 0, flow = 0;
int baud;
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
break;
case CS6:
lcr = MAX310X_LCR_LENGTH0_BIT;
break;
case CS7:
lcr = MAX310X_LCR_LENGTH1_BIT;
break;
case CS8:
default:
lcr = MAX310X_LCR_LENGTH1_BIT | MAX310X_LCR_LENGTH0_BIT;
break;
}
/* Parity */
if (termios->c_cflag & PARENB) {
lcr |= MAX310X_LCR_PARITY_BIT;
if (!(termios->c_cflag & PARODD))
lcr |= MAX310X_LCR_EVENPARITY_BIT;
}
/* Stop bits */
if (termios->c_cflag & CSTOPB)
lcr |= MAX310X_LCR_STOPLEN_BIT; /* 2 stops */
/* Update LCR register */
max310x_port_write(port, MAX310X_LCR_REG, lcr);
/* Set read status mask */
port->read_status_mask = MAX310X_LSR_RXOVR_BIT;
if (termios->c_iflag & INPCK)
port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
MAX310X_LSR_FRERR_BIT;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
/* Set status ignore mask */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNBRK)
port->ignore_status_mask |= MAX310X_LSR_RXBRK_BIT;
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= MAX310X_LSR_RXPAR_BIT |
MAX310X_LSR_RXOVR_BIT |
MAX310X_LSR_FRERR_BIT |
MAX310X_LSR_RXBRK_BIT;
/* Configure flow control */
max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]);
max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
/* Disable transmitter before enabling AutoCTS or auto transmitter
* flow control
*/
if (termios->c_cflag & CRTSCTS || termios->c_iflag & IXOFF) {
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_TXDIS_BIT,
MAX310X_MODE1_TXDIS_BIT);
}
port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
if (termios->c_cflag & CRTSCTS) {
/* Enable AUTORTS and AUTOCTS */
port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
flow |= MAX310X_FLOWCTRL_AUTOCTS_BIT |
MAX310X_FLOWCTRL_AUTORTS_BIT;
}
if (termios->c_iflag & IXON)
flow |= MAX310X_FLOWCTRL_SWFLOW3_BIT |
MAX310X_FLOWCTRL_SWFLOWEN_BIT;
if (termios->c_iflag & IXOFF) {
port->status |= UPSTAT_AUTOXOFF;
flow |= MAX310X_FLOWCTRL_SWFLOW1_BIT |
MAX310X_FLOWCTRL_SWFLOWEN_BIT;
}
max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow);
/* Enable transmitter after disabling AutoCTS and auto transmitter
* flow control
*/
if (!(termios->c_cflag & CRTSCTS) && !(termios->c_iflag & IXOFF)) {
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_TXDIS_BIT,
0);
}
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 0xffff,
port->uartclk / 4);
/* Setup baudrate generator */
baud = max310x_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
}
static void max310x_rs_proc(struct work_struct *ws)
{
struct max310x_one *one = container_of(ws, struct max310x_one, rs_work);
unsigned int delay, mode1 = 0, mode2 = 0;
delay = (one->port.rs485.delay_rts_before_send << 4) |
one->port.rs485.delay_rts_after_send;
max310x_port_write(&one->port, MAX310X_HDPIXDELAY_REG, delay);
if (one->port.rs485.flags & SER_RS485_ENABLED) {
mode1 = MAX310X_MODE1_TRNSCVCTRL_BIT;
if (!(one->port.rs485.flags & SER_RS485_RX_DURING_TX))
mode2 = MAX310X_MODE2_ECHOSUPR_BIT;
}
max310x_port_update(&one->port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT, mode1);
max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT, mode2);
}
static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct max310x_one *one = to_max310x_port(port);
if ((rs485->delay_rts_before_send > 0x0f) ||
(rs485->delay_rts_after_send > 0x0f))
return -ERANGE;
port->rs485 = *rs485;
schedule_work(&one->rs_work);
return 0;
}
static int max310x_startup(struct uart_port *port)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
unsigned int val;
s->devtype->power(port, 1);
/* Configure MODE1 register */
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
/* Configure MODE2 register & Reset FIFOs*/
val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT;
max310x_port_write(port, MAX310X_MODE2_REG, val);
max310x_port_update(port, MAX310X_MODE2_REG,
MAX310X_MODE2_FIFORST_BIT, 0);
/* Configure mode1/mode2 to have rs485/rs232 enabled at startup */
val = (clamp(port->rs485.delay_rts_before_send, 0U, 15U) << 4) |
clamp(port->rs485.delay_rts_after_send, 0U, 15U);
max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
if (port->rs485.flags & SER_RS485_ENABLED) {
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT,
MAX310X_MODE1_TRNSCVCTRL_BIT);
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
max310x_port_update(port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT,
MAX310X_MODE2_ECHOSUPR_BIT);
}
/* Configure flow control levels */
/* Flow control halt level 96, resume level 48 */
max310x_port_write(port, MAX310X_FLOWLVL_REG,
MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96));
/* Clear IRQ status register */
max310x_port_read(port, MAX310X_IRQSTS_REG);
/* Enable RX, TX, CTS change interrupts */
val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT;
max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT);
return 0;
}
static void max310x_shutdown(struct uart_port *port)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
/* Disable all interrupts */
max310x_port_write(port, MAX310X_IRQEN_REG, 0);
s->devtype->power(port, 0);
}
static const char *max310x_type(struct uart_port *port)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
return (port->type == PORT_MAX310X) ? s->devtype->name : NULL;
}
static int max310x_request_port(struct uart_port *port)
{
/* Do nothing */
return 0;
}
static void max310x_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_MAX310X;
}
static int max310x_verify_port(struct uart_port *port, struct serial_struct *s)
{
if ((s->type != PORT_UNKNOWN) && (s->type != PORT_MAX310X))
return -EINVAL;
if (s->irq != port->irq)
return -EINVAL;
return 0;
}
static void max310x_null_void(struct uart_port *port)
{
/* Do nothing */
}
static const struct uart_ops max310x_ops = {
.tx_empty = max310x_tx_empty,
.set_mctrl = max310x_set_mctrl,
.get_mctrl = max310x_get_mctrl,
.stop_tx = max310x_null_void,
.start_tx = max310x_start_tx,
.stop_rx = max310x_null_void,
.break_ctl = max310x_break_ctl,
.startup = max310x_startup,
.shutdown = max310x_shutdown,
.set_termios = max310x_set_termios,
.type = max310x_type,
.request_port = max310x_request_port,
.release_port = max310x_null_void,
.config_port = max310x_config_port,
.verify_port = max310x_verify_port,
};
static int __maybe_unused max310x_suspend(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
int i;
for (i = 0; i < s->devtype->nr; i++) {
uart_suspend_port(&max310x_uart, &s->p[i].port);
s->devtype->power(&s->p[i].port, 0);
}
return 0;
}
static int __maybe_unused max310x_resume(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
int i;
for (i = 0; i < s->devtype->nr; i++) {
s->devtype->power(&s->p[i].port, 1);
uart_resume_port(&max310x_uart, &s->p[i].port);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
#ifdef CONFIG_GPIOLIB
static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
unsigned int val;
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
val = max310x_port_read(port, MAX310X_GPIODATA_REG);
return !!((val >> 4) & (1 << (offset % 4)));
}
static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
value ? 1 << (offset % 4) : 0);
}
static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), 0);
return 0;
}
static int max310x_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
value ? 1 << (offset % 4) : 0);
max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4),
1 << (offset % 4));
return 0;
}
static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long config)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
max310x_port_update(port, MAX310X_GPIOCFG_REG,
1 << ((offset % 4) + 4),
1 << ((offset % 4) + 4));
return 0;
case PIN_CONFIG_DRIVE_PUSH_PULL:
max310x_port_update(port, MAX310X_GPIOCFG_REG,
1 << ((offset % 4) + 4), 0);
return 0;
default:
return -ENOTSUPP;
}
}
#endif
static const struct serial_rs485 max310x_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
const struct max310x_if_cfg *if_cfg,
struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
u32 uartclk = 0;
bool xtal;
for (i = 0; i < devtype->nr; i++)
if (IS_ERR(regmaps[i]))
return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
if (!s) {
dev_err(dev, "Error allocating port structure\n");
return -ENOMEM;
}
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
xtal = device_property_match_string(dev, "clock-names", "osc") < 0;
if (xtal)
s->clk = devm_clk_get_optional(dev, "xtal");
else
s->clk = devm_clk_get_optional(dev, "osc");
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
ret = clk_prepare_enable(s->clk);
if (ret)
return ret;
freq = clk_get_rate(s->clk);
if (freq == 0)
freq = uartclk;
if (freq == 0) {
dev_err(dev, "Cannot get clock rate\n");
ret = -EINVAL;
goto out_clk;
}
if (xtal) {
fmin = 1000000;
fmax = 4000000;
} else {
fmin = 500000;
fmax = 35000000;
}
/* Check frequency limits */
if (freq < fmin || freq > fmax) {
ret = -ERANGE;
goto out_clk;
}
s->regmap = regmaps[0];
s->devtype = devtype;
s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
ret = devtype->detect(dev);
if (ret)
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
/* Reset port */
regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
} while (ret != 0x01);
regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
for (i = 0; i < devtype->nr; i++) {
unsigned int line;
line = find_first_zero_bit(max310x_lines, MAX310X_UART_NRMAX);
if (line == MAX310X_UART_NRMAX) {
ret = -ERANGE;
goto out_uart;
}
/* Initialize port data */
s->p[i].port.line = line;
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_MAX310X;
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.iobase = i;
/*
* Use all ones as membase to make sure uart_configure_port() in
* serial_core.c does not abort for SPI/I2C devices where the
* membase address is not applicable.
*/
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
s->p[i].port.rs485_supported = max310x_rs485_supported;
s->p[i].port.ops = &max310x_ops;
s->p[i].regmap = regmaps[i];
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
max310x_port_read(&s->p[i].port, MAX310X_IRQSTS_REG);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_tx_proc);
/* Initialize queue for changing LOOPBACK mode */
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
if (ret) {
s->p[i].port.dev = NULL;
goto out_uart;
}
set_bit(line, max310x_lines);
/* Go to suspend mode */
devtype->power(&s->p[i].port, 0);
}
#ifdef CONFIG_GPIOLIB
/* Setup GPIO controller */
s->gpio.owner = THIS_MODULE;
s->gpio.parent = dev;
s->gpio.label = devtype->name;
s->gpio.direction_input = max310x_gpio_direction_input;
s->gpio.get = max310x_gpio_get;
s->gpio.direction_output= max310x_gpio_direction_output;
s->gpio.set = max310x_gpio_set;
s->gpio.set_config = max310x_gpio_set_config;
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
s->gpio.can_sleep = 1;
ret = devm_gpiochip_add_data(dev, &s->gpio, s);
if (ret)
goto out_uart;
#endif
/* Setup interrupt */
ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist,
IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), s);
if (!ret)
return 0;
dev_err(dev, "Unable to reguest IRQ %i\n", irq);
out_uart:
for (i = 0; i < devtype->nr; i++) {
if (s->p[i].port.dev) {
uart_remove_one_port(&max310x_uart, &s->p[i].port);
clear_bit(s->p[i].port.line, max310x_lines);
}
}
out_clk:
clk_disable_unprepare(s->clk);
return ret;
}
static void max310x_remove(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
int i;
for (i = 0; i < s->devtype->nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
cancel_work_sync(&s->p[i].rs_work);
uart_remove_one_port(&max310x_uart, &s->p[i].port);
clear_bit(s->p[i].port.line, max310x_lines);
s->devtype->power(&s->p[i].port, 0);
}
clk_disable_unprepare(s->clk);
}
static const struct of_device_id __maybe_unused max310x_dt_ids[] = {
{ .compatible = "maxim,max3107", .data = &max3107_devtype, },
{ .compatible = "maxim,max3108", .data = &max3108_devtype, },
{ .compatible = "maxim,max3109", .data = &max3109_devtype, },
{ .compatible = "maxim,max14830", .data = &max14830_devtype },
{ }
};
MODULE_DEVICE_TABLE(of, max310x_dt_ids);
static struct regmap_config regcfg = {
.reg_bits = 8,
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
.max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
.max_raw_write = MAX310X_FIFO_SIZE,
};
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
{
struct max310x_port *s = dev_get_drvdata(dev);
return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
}
static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
.extended_reg_enable = max310x_spi_extended_reg_enable,
.rev_id_reg = MAX310X_SPI_REVID_EXTREG,
};
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
struct regmap *regmaps[4];
unsigned int i;
int ret;
/* Setup SPI bus */
spi->bits_per_word = 8;
spi->mode = spi->mode ? : SPI_MODE_0;
spi->max_speed_hz = spi->max_speed_hz ? : 26000000;
ret = spi_setup(spi);
if (ret)
return ret;
devtype = device_get_match_data(&spi->dev);
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
for (i = 0; i < devtype->nr; i++) {
u8 port_mask = i * 0x20;
regcfg.read_flag_mask = port_mask;
regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
regmaps[i] = devm_regmap_init_spi(spi, ®cfg);
}
return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static void max310x_spi_remove(struct spi_device *spi)
{
max310x_remove(&spi->dev);
}
static const struct spi_device_id max310x_id_table[] = {
{ "max3107", (kernel_ulong_t)&max3107_devtype, },
{ "max3108", (kernel_ulong_t)&max3108_devtype, },
{ "max3109", (kernel_ulong_t)&max3109_devtype, },
{ "max14830", (kernel_ulong_t)&max14830_devtype, },
{ }
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_spi_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
.probe = max310x_spi_probe,
.remove = max310x_spi_remove,
.id_table = max310x_id_table,
};
#endif
#ifdef CONFIG_I2C
static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
{
return 0;
}
static struct regmap_config regcfg_i2c = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
.max_register = MAX310X_I2C_REVID_EXTREG,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
.max_raw_write = MAX310X_FIFO_SIZE,
};
static const struct max310x_if_cfg max310x_i2c_if_cfg = {
.extended_reg_enable = max310x_i2c_extended_reg_enable,
.rev_id_reg = MAX310X_I2C_REVID_EXTREG,
};
static unsigned short max310x_i2c_slave_addr(unsigned short addr,
unsigned int nr)
{
/*
* For MAX14830 and MAX3109, the slave address depends on what the
* A0 and A1 pins are tied to.
* See Table I2C Address Map of the datasheet.
* Based on that table, the following formulas were determined.
* UART1 - UART0 = 0x10
* UART2 - UART1 = 0x20 + 0x10
* UART3 - UART2 = 0x10
*/
addr -= nr * 0x10;
if (nr >= 2)
addr -= 0x20;
return addr;
}
static int max310x_i2c_probe(struct i2c_client *client)
{
const struct max310x_devtype *devtype =
device_get_match_data(&client->dev);
struct i2c_client *port_client;
struct regmap *regmaps[4];
unsigned int i;
u8 port_addr;
if (client->addr < devtype->slave_addr.min ||
client->addr > devtype->slave_addr.max)
return dev_err_probe(&client->dev, -EINVAL,
"Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
client->addr, devtype->slave_addr.min,
devtype->slave_addr.max);
regmaps[0] = devm_regmap_init_i2c(client, ®cfg_i2c);
for (i = 1; i < devtype->nr; i++) {
port_addr = max310x_i2c_slave_addr(client->addr, i);
port_client = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
port_addr);
regmaps[i] = devm_regmap_init_i2c(port_client, ®cfg_i2c);
}
return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
regmaps, client->irq);
}
static void max310x_i2c_remove(struct i2c_client *client)
{
max310x_remove(&client->dev);
}
static struct i2c_driver max310x_i2c_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
.probe = max310x_i2c_probe,
.remove = max310x_i2c_remove,
};
#endif
static int __init max310x_uart_init(void)
{
int ret;
bitmap_zero(max310x_lines, MAX310X_UART_NRMAX);
ret = uart_register_driver(&max310x_uart);
if (ret)
return ret;
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
goto err_spi_register;
#endif
#ifdef CONFIG_I2C
ret = i2c_add_driver(&max310x_i2c_driver);
if (ret)
goto err_i2c_register;
#endif
return 0;
#ifdef CONFIG_I2C
err_i2c_register:
spi_unregister_driver(&max310x_spi_driver);
#endif
err_spi_register:
uart_unregister_driver(&max310x_uart);
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
#ifdef CONFIG_I2C
i2c_del_driver(&max310x_i2c_driver);
#endif
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
uart_unregister_driver(&max310x_uart);
}
module_exit(max310x_uart_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <[email protected]>");
MODULE_DESCRIPTION("MAX310X serial driver");
| linux-master | drivers/tty/serial/max310x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale LINFlexD UART serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*/
#include <linux/console.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/delay.h>
/* All registers are 32-bit width */
#define LINCR1 0x0000 /* LIN control register */
#define LINIER 0x0004 /* LIN interrupt enable register */
#define LINSR 0x0008 /* LIN status register */
#define LINESR 0x000C /* LIN error status register */
#define UARTCR 0x0010 /* UART mode control register */
#define UARTSR 0x0014 /* UART mode status register */
#define LINTCSR 0x0018 /* LIN timeout control status register */
#define LINOCR 0x001C /* LIN output compare register */
#define LINTOCR 0x0020 /* LIN timeout control register */
#define LINFBRR 0x0024 /* LIN fractional baud rate register */
#define LINIBRR 0x0028 /* LIN integer baud rate register */
#define LINCFR 0x002C /* LIN checksum field register */
#define LINCR2 0x0030 /* LIN control register 2 */
#define BIDR 0x0034 /* Buffer identifier register */
#define BDRL 0x0038 /* Buffer data register least significant */
#define BDRM 0x003C /* Buffer data register most significant */
#define IFER 0x0040 /* Identifier filter enable register */
#define IFMI 0x0044 /* Identifier filter match index */
#define IFMR 0x0048 /* Identifier filter mode register */
#define GCR 0x004C /* Global control register */
#define UARTPTO 0x0050 /* UART preset timeout register */
#define UARTCTO 0x0054 /* UART current timeout register */
/*
* Register field definitions
*/
#define LINFLEXD_LINCR1_INIT BIT(0)
#define LINFLEXD_LINCR1_MME BIT(4)
#define LINFLEXD_LINCR1_BF BIT(7)
#define LINFLEXD_LINSR_LINS_INITMODE BIT(12)
#define LINFLEXD_LINSR_LINS_MASK (0xF << 12)
#define LINFLEXD_LINIER_SZIE BIT(15)
#define LINFLEXD_LINIER_OCIE BIT(14)
#define LINFLEXD_LINIER_BEIE BIT(13)
#define LINFLEXD_LINIER_CEIE BIT(12)
#define LINFLEXD_LINIER_HEIE BIT(11)
#define LINFLEXD_LINIER_FEIE BIT(8)
#define LINFLEXD_LINIER_BOIE BIT(7)
#define LINFLEXD_LINIER_LSIE BIT(6)
#define LINFLEXD_LINIER_WUIE BIT(5)
#define LINFLEXD_LINIER_DBFIE BIT(4)
#define LINFLEXD_LINIER_DBEIETOIE BIT(3)
#define LINFLEXD_LINIER_DRIE BIT(2)
#define LINFLEXD_LINIER_DTIE BIT(1)
#define LINFLEXD_LINIER_HRIE BIT(0)
#define LINFLEXD_UARTCR_OSR_MASK (0xF << 24)
#define LINFLEXD_UARTCR_OSR(uartcr) (((uartcr) \
& LINFLEXD_UARTCR_OSR_MASK) >> 24)
#define LINFLEXD_UARTCR_ROSE BIT(23)
#define LINFLEXD_UARTCR_RFBM BIT(9)
#define LINFLEXD_UARTCR_TFBM BIT(8)
#define LINFLEXD_UARTCR_WL1 BIT(7)
#define LINFLEXD_UARTCR_PC1 BIT(6)
#define LINFLEXD_UARTCR_RXEN BIT(5)
#define LINFLEXD_UARTCR_TXEN BIT(4)
#define LINFLEXD_UARTCR_PC0 BIT(3)
#define LINFLEXD_UARTCR_PCE BIT(2)
#define LINFLEXD_UARTCR_WL0 BIT(1)
#define LINFLEXD_UARTCR_UART BIT(0)
#define LINFLEXD_UARTSR_SZF BIT(15)
#define LINFLEXD_UARTSR_OCF BIT(14)
#define LINFLEXD_UARTSR_PE3 BIT(13)
#define LINFLEXD_UARTSR_PE2 BIT(12)
#define LINFLEXD_UARTSR_PE1 BIT(11)
#define LINFLEXD_UARTSR_PE0 BIT(10)
#define LINFLEXD_UARTSR_RMB BIT(9)
#define LINFLEXD_UARTSR_FEF BIT(8)
#define LINFLEXD_UARTSR_BOF BIT(7)
#define LINFLEXD_UARTSR_RPS BIT(6)
#define LINFLEXD_UARTSR_WUF BIT(5)
#define LINFLEXD_UARTSR_4 BIT(4)
#define LINFLEXD_UARTSR_TO BIT(3)
#define LINFLEXD_UARTSR_DRFRFE BIT(2)
#define LINFLEXD_UARTSR_DTFTFF BIT(1)
#define LINFLEXD_UARTSR_NF BIT(0)
#define LINFLEXD_UARTSR_PE (LINFLEXD_UARTSR_PE0 |\
LINFLEXD_UARTSR_PE1 |\
LINFLEXD_UARTSR_PE2 |\
LINFLEXD_UARTSR_PE3)
#define LINFLEX_LDIV_MULTIPLIER (16)
#define DRIVER_NAME "fsl-linflexuart"
#define DEV_NAME "ttyLF"
#define UART_NR 4
#define EARLYCON_BUFFER_INITIAL_CAP 8
#define PREINIT_DELAY 2000 /* us */
static const struct of_device_id linflex_dt_ids[] = {
{
.compatible = "fsl,s32v234-linflexuart",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, linflex_dt_ids);
#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE
static struct uart_port *earlycon_port;
static bool linflex_earlycon_same_instance;
static DEFINE_SPINLOCK(init_lock);
static bool during_init;
static struct {
char *content;
unsigned int len, cap;
} earlycon_buf;
#endif
static void linflex_stop_tx(struct uart_port *port)
{
unsigned long ier;
ier = readl(port->membase + LINIER);
ier &= ~(LINFLEXD_LINIER_DTIE);
writel(ier, port->membase + LINIER);
}
static void linflex_stop_rx(struct uart_port *port)
{
unsigned long ier;
ier = readl(port->membase + LINIER);
writel(ier & ~LINFLEXD_LINIER_DRIE, port->membase + LINIER);
}
static void linflex_put_char(struct uart_port *sport, unsigned char c)
{
unsigned long status;
writeb(c, sport->membase + BDRL);
/* Waiting for data transmission completed. */
while (((status = readl(sport->membase + UARTSR)) &
LINFLEXD_UARTSR_DTFTFF) !=
LINFLEXD_UARTSR_DTFTFF)
;
writel(status | LINFLEXD_UARTSR_DTFTFF, sport->membase + UARTSR);
}
static inline void linflex_transmit_buffer(struct uart_port *sport)
{
struct circ_buf *xmit = &sport->state->xmit;
while (!uart_circ_empty(xmit)) {
linflex_put_char(sport, xmit->buf[xmit->tail]);
uart_xmit_advance(sport, 1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(sport);
if (uart_circ_empty(xmit))
linflex_stop_tx(sport);
}
static void linflex_start_tx(struct uart_port *port)
{
unsigned long ier;
linflex_transmit_buffer(port);
ier = readl(port->membase + LINIER);
writel(ier | LINFLEXD_LINIER_DTIE, port->membase + LINIER);
}
static irqreturn_t linflex_txint(int irq, void *dev_id)
{
struct uart_port *sport = dev_id;
struct circ_buf *xmit = &sport->state->xmit;
unsigned long flags;
spin_lock_irqsave(&sport->lock, flags);
if (sport->x_char) {
linflex_put_char(sport, sport->x_char);
goto out;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(sport)) {
linflex_stop_tx(sport);
goto out;
}
linflex_transmit_buffer(sport);
out:
spin_unlock_irqrestore(&sport->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t linflex_rxint(int irq, void *dev_id)
{
struct uart_port *sport = dev_id;
unsigned int flg;
struct tty_port *port = &sport->state->port;
unsigned long flags, status;
unsigned char rx;
bool brk;
spin_lock_irqsave(&sport->lock, flags);
status = readl(sport->membase + UARTSR);
while (status & LINFLEXD_UARTSR_RMB) {
rx = readb(sport->membase + BDRM);
brk = false;
flg = TTY_NORMAL;
sport->icount.rx++;
if (status & (LINFLEXD_UARTSR_BOF | LINFLEXD_UARTSR_FEF |
LINFLEXD_UARTSR_PE)) {
if (status & LINFLEXD_UARTSR_BOF)
sport->icount.overrun++;
if (status & LINFLEXD_UARTSR_FEF) {
if (!rx) {
brk = true;
sport->icount.brk++;
} else
sport->icount.frame++;
}
if (status & LINFLEXD_UARTSR_PE)
sport->icount.parity++;
}
writel(status, sport->membase + UARTSR);
status = readl(sport->membase + UARTSR);
if (brk) {
uart_handle_break(sport);
} else {
if (uart_handle_sysrq_char(sport, (unsigned char)rx))
continue;
tty_insert_flip_char(port, rx, flg);
}
}
spin_unlock_irqrestore(&sport->lock, flags);
tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
static irqreturn_t linflex_int(int irq, void *dev_id)
{
struct uart_port *sport = dev_id;
unsigned long status;
status = readl(sport->membase + UARTSR);
if (status & LINFLEXD_UARTSR_DRFRFE)
linflex_rxint(irq, dev_id);
if (status & LINFLEXD_UARTSR_DTFTFF)
linflex_txint(irq, dev_id);
return IRQ_HANDLED;
}
/* return TIOCSER_TEMT when transmitter is not busy */
static unsigned int linflex_tx_empty(struct uart_port *port)
{
unsigned long status;
status = readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF;
return status ? TIOCSER_TEMT : 0;
}
static unsigned int linflex_get_mctrl(struct uart_port *port)
{
return 0;
}
static void linflex_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static void linflex_break_ctl(struct uart_port *port, int break_state)
{
}
static void linflex_setup_watermark(struct uart_port *sport)
{
unsigned long cr, ier, cr1;
/* Disable transmission/reception */
ier = readl(sport->membase + LINIER);
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
writel(ier, sport->membase + LINIER);
cr = readl(sport->membase + UARTCR);
cr &= ~(LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN);
writel(cr, sport->membase + UARTCR);
/* Enter initialization mode by setting INIT bit */
/* set the Linflex in master mode and activate by-pass filter */
cr1 = LINFLEXD_LINCR1_BF | LINFLEXD_LINCR1_MME
| LINFLEXD_LINCR1_INIT;
writel(cr1, sport->membase + LINCR1);
/* wait for init mode entry */
while ((readl(sport->membase + LINSR)
& LINFLEXD_LINSR_LINS_MASK)
!= LINFLEXD_LINSR_LINS_INITMODE)
;
/*
* UART = 0x1; - Linflex working in UART mode
* TXEN = 0x1; - Enable transmission of data now
* RXEn = 0x1; - Receiver enabled
* WL0 = 0x1; - 8 bit data
* PCE = 0x0; - No parity
*/
/* set UART bit to allow writing other bits */
writel(LINFLEXD_UARTCR_UART, sport->membase + UARTCR);
cr = (LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN |
LINFLEXD_UARTCR_WL0 | LINFLEXD_UARTCR_UART);
writel(cr, sport->membase + UARTCR);
cr1 &= ~(LINFLEXD_LINCR1_INIT);
writel(cr1, sport->membase + LINCR1);
ier = readl(sport->membase + LINIER);
ier |= LINFLEXD_LINIER_DRIE;
ier |= LINFLEXD_LINIER_DTIE;
writel(ier, sport->membase + LINIER);
}
static int linflex_startup(struct uart_port *port)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
linflex_setup_watermark(port);
spin_unlock_irqrestore(&port->lock, flags);
ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
DRIVER_NAME, port);
return ret;
}
static void linflex_shutdown(struct uart_port *port)
{
unsigned long ier;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* disable interrupts */
ier = readl(port->membase + LINIER);
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
writel(ier, port->membase + LINIER);
spin_unlock_irqrestore(&port->lock, flags);
devm_free_irq(port->dev, port->irq, port);
}
static void
linflex_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
unsigned long cr, old_cr, cr1;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
cr = readl(port->membase + UARTCR);
old_cr = cr;
/* Enter initialization mode by setting INIT bit */
cr1 = readl(port->membase + LINCR1);
cr1 |= LINFLEXD_LINCR1_INIT;
writel(cr1, port->membase + LINCR1);
/* wait for init mode entry */
while ((readl(port->membase + LINSR)
& LINFLEXD_LINSR_LINS_MASK)
!= LINFLEXD_LINSR_LINS_INITMODE)
;
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
* - (7,e/o,1)
* - (8,n,1)
* - (8,e/o,1)
*/
/* enter the UART into configuration mode */
while ((termios->c_cflag & CSIZE) != CS8 &&
(termios->c_cflag & CSIZE) != CS7) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS7) {
/* Word length: WL1WL0:00 */
cr = old_cr & ~LINFLEXD_UARTCR_WL1 & ~LINFLEXD_UARTCR_WL0;
}
if ((termios->c_cflag & CSIZE) == CS8) {
/* Word length: WL1WL0:01 */
cr = (old_cr | LINFLEXD_UARTCR_WL0) & ~LINFLEXD_UARTCR_WL1;
}
if (termios->c_cflag & CMSPAR) {
if ((termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
}
/* has a space/sticky bit */
cr |= LINFLEXD_UARTCR_WL0;
}
if (termios->c_cflag & CSTOPB)
termios->c_cflag &= ~CSTOPB;
/* parity must be enabled when CS7 to match 8-bits format */
if ((termios->c_cflag & CSIZE) == CS7)
termios->c_cflag |= PARENB;
if ((termios->c_cflag & PARENB)) {
cr |= LINFLEXD_UARTCR_PCE;
if (termios->c_cflag & PARODD)
cr = (cr | LINFLEXD_UARTCR_PC0) &
(~LINFLEXD_UARTCR_PC1);
else
cr = cr & (~LINFLEXD_UARTCR_PC1 &
~LINFLEXD_UARTCR_PC0);
} else {
cr &= ~LINFLEXD_UARTCR_PCE;
}
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (LINFLEXD_UARTSR_FEF |
LINFLEXD_UARTSR_PE0 |
LINFLEXD_UARTSR_PE1 |
LINFLEXD_UARTSR_PE2 |
LINFLEXD_UARTSR_PE3);
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= LINFLEXD_UARTSR_FEF;
/* characters to ignore */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= LINFLEXD_UARTSR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= LINFLEXD_UARTSR_PE;
/*
* if we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= LINFLEXD_UARTSR_BOF;
}
writel(cr, port->membase + UARTCR);
cr1 &= ~(LINFLEXD_LINCR1_INIT);
writel(cr1, port->membase + LINCR1);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *linflex_type(struct uart_port *port)
{
return "FSL_LINFLEX";
}
static void linflex_release_port(struct uart_port *port)
{
/* nothing to do */
}
static int linflex_request_port(struct uart_port *port)
{
return 0;
}
/* configure/auto-configure the port */
static void linflex_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_LINFLEXUART;
}
static const struct uart_ops linflex_pops = {
.tx_empty = linflex_tx_empty,
.set_mctrl = linflex_set_mctrl,
.get_mctrl = linflex_get_mctrl,
.stop_tx = linflex_stop_tx,
.start_tx = linflex_start_tx,
.stop_rx = linflex_stop_rx,
.break_ctl = linflex_break_ctl,
.startup = linflex_startup,
.shutdown = linflex_shutdown,
.set_termios = linflex_set_termios,
.type = linflex_type,
.request_port = linflex_request_port,
.release_port = linflex_release_port,
.config_port = linflex_config_port,
};
static struct uart_port *linflex_ports[UART_NR];
#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE
static void linflex_console_putchar(struct uart_port *port, unsigned char ch)
{
unsigned long cr;
cr = readl(port->membase + UARTCR);
writeb(ch, port->membase + BDRL);
if (!(cr & LINFLEXD_UARTCR_TFBM))
while ((readl(port->membase + UARTSR) &
LINFLEXD_UARTSR_DTFTFF)
!= LINFLEXD_UARTSR_DTFTFF)
;
else
while (readl(port->membase + UARTSR) &
LINFLEXD_UARTSR_DTFTFF)
;
if (!(cr & LINFLEXD_UARTCR_TFBM)) {
writel((readl(port->membase + UARTSR) |
LINFLEXD_UARTSR_DTFTFF),
port->membase + UARTSR);
}
}
static void linflex_earlycon_putchar(struct uart_port *port, unsigned char ch)
{
unsigned long flags;
char *ret;
if (!linflex_earlycon_same_instance) {
linflex_console_putchar(port, ch);
return;
}
spin_lock_irqsave(&init_lock, flags);
if (!during_init)
goto outside_init;
if (earlycon_buf.len >= 1 << CONFIG_LOG_BUF_SHIFT)
goto init_release;
if (!earlycon_buf.cap) {
earlycon_buf.content = kmalloc(EARLYCON_BUFFER_INITIAL_CAP,
GFP_ATOMIC);
earlycon_buf.cap = earlycon_buf.content ?
EARLYCON_BUFFER_INITIAL_CAP : 0;
} else if (earlycon_buf.len == earlycon_buf.cap) {
ret = krealloc(earlycon_buf.content, earlycon_buf.cap << 1,
GFP_ATOMIC);
if (ret) {
earlycon_buf.content = ret;
earlycon_buf.cap <<= 1;
}
}
if (earlycon_buf.len < earlycon_buf.cap)
earlycon_buf.content[earlycon_buf.len++] = ch;
goto init_release;
outside_init:
linflex_console_putchar(port, ch);
init_release:
spin_unlock_irqrestore(&init_lock, flags);
}
static void linflex_string_write(struct uart_port *sport, const char *s,
unsigned int count)
{
unsigned long cr, ier = 0;
ier = readl(sport->membase + LINIER);
linflex_stop_tx(sport);
cr = readl(sport->membase + UARTCR);
cr |= (LINFLEXD_UARTCR_TXEN);
writel(cr, sport->membase + UARTCR);
uart_console_write(sport, s, count, linflex_console_putchar);
writel(ier, sport->membase + LINIER);
}
static void
linflex_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_port *sport = linflex_ports[co->index];
unsigned long flags;
int locked = 1;
if (sport->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&sport->lock, flags);
else
spin_lock_irqsave(&sport->lock, flags);
linflex_string_write(sport, s, count);
if (locked)
spin_unlock_irqrestore(&sport->lock, flags);
}
/*
* if the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
linflex_console_get_options(struct uart_port *sport, int *parity, int *bits)
{
unsigned long cr;
cr = readl(sport->membase + UARTCR);
cr &= LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN;
if (!cr)
return;
/* ok, the port was enabled */
*parity = 'n';
if (cr & LINFLEXD_UARTCR_PCE) {
if (cr & LINFLEXD_UARTCR_PC0)
*parity = 'o';
else
*parity = 'e';
}
if ((cr & LINFLEXD_UARTCR_WL0) && ((cr & LINFLEXD_UARTCR_WL1) == 0)) {
if (cr & LINFLEXD_UARTCR_PCE)
*bits = 9;
else
*bits = 8;
}
}
static int __init linflex_console_setup(struct console *co, char *options)
{
struct uart_port *sport;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
int i;
unsigned long flags;
/*
* check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= ARRAY_SIZE(linflex_ports))
co->index = 0;
sport = linflex_ports[co->index];
if (!sport)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
linflex_console_get_options(sport, &parity, &bits);
if (earlycon_port && sport->mapbase == earlycon_port->mapbase) {
linflex_earlycon_same_instance = true;
spin_lock_irqsave(&init_lock, flags);
during_init = true;
spin_unlock_irqrestore(&init_lock, flags);
/* Workaround for character loss or output of many invalid
* characters, when INIT mode is entered shortly after a
* character has just been printed.
*/
udelay(PREINIT_DELAY);
}
linflex_setup_watermark(sport);
ret = uart_set_options(sport, co, baud, parity, bits, flow);
if (!linflex_earlycon_same_instance)
goto done;
spin_lock_irqsave(&init_lock, flags);
/* Emptying buffer */
if (earlycon_buf.len) {
for (i = 0; i < earlycon_buf.len; i++)
linflex_console_putchar(earlycon_port,
earlycon_buf.content[i]);
kfree(earlycon_buf.content);
earlycon_buf.len = 0;
}
during_init = false;
spin_unlock_irqrestore(&init_lock, flags);
done:
return ret;
}
static struct uart_driver linflex_reg;
static struct console linflex_console = {
.name = DEV_NAME,
.write = linflex_console_write,
.device = uart_console_device,
.setup = linflex_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &linflex_reg,
};
static void linflex_earlycon_write(struct console *con, const char *s,
unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, linflex_earlycon_putchar);
}
static int __init linflex_early_console_setup(struct earlycon_device *device,
const char *options)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = linflex_earlycon_write;
earlycon_port = &device->port;
return 0;
}
OF_EARLYCON_DECLARE(linflex, "fsl,s32v234-linflexuart",
linflex_early_console_setup);
#define LINFLEX_CONSOLE (&linflex_console)
#else
#define LINFLEX_CONSOLE NULL
#endif
static struct uart_driver linflex_reg = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = DEV_NAME,
.nr = ARRAY_SIZE(linflex_ports),
.cons = LINFLEX_CONSOLE,
};
static int linflex_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct uart_port *sport;
struct resource *res;
int ret;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
return ret;
}
if (ret >= UART_NR) {
dev_err(&pdev->dev, "driver limited to %d serial ports\n",
UART_NR);
return -ENOMEM;
}
sport->line = ret;
sport->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(sport->membase))
return PTR_ERR(sport->membase);
sport->mapbase = res->start;
sport->dev = &pdev->dev;
sport->type = PORT_LINFLEXUART;
sport->iotype = UPIO_MEM;
sport->irq = platform_get_irq(pdev, 0);
sport->ops = &linflex_pops;
sport->flags = UPF_BOOT_AUTOCONF;
sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE);
linflex_ports[sport->line] = sport;
platform_set_drvdata(pdev, sport);
return uart_add_one_port(&linflex_reg, sport);
}
static int linflex_remove(struct platform_device *pdev)
{
struct uart_port *sport = platform_get_drvdata(pdev);
uart_remove_one_port(&linflex_reg, sport);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int linflex_suspend(struct device *dev)
{
struct uart_port *sport = dev_get_drvdata(dev);
uart_suspend_port(&linflex_reg, sport);
return 0;
}
static int linflex_resume(struct device *dev)
{
struct uart_port *sport = dev_get_drvdata(dev);
uart_resume_port(&linflex_reg, sport);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume);
static struct platform_driver linflex_driver = {
.probe = linflex_probe,
.remove = linflex_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = linflex_dt_ids,
.pm = &linflex_pm_ops,
},
};
static int __init linflex_serial_init(void)
{
int ret;
ret = uart_register_driver(&linflex_reg);
if (ret)
return ret;
ret = platform_driver_register(&linflex_driver);
if (ret)
uart_unregister_driver(&linflex_reg);
return ret;
}
static void __exit linflex_serial_exit(void)
{
platform_driver_unregister(&linflex_driver);
uart_unregister_driver(&linflex_reg);
}
module_init(linflex_serial_init);
module_exit(linflex_serial_exit);
MODULE_DESCRIPTION("Freescale LINFlexD serial port driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/fsl_linflexuart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012-2015 Spreadtrum Communications Inc.
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/sprd-dma.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
/* device name */
#define UART_NR_MAX 8
#define SPRD_TTY_NAME "ttyS"
#define SPRD_FIFO_SIZE 128
#define SPRD_DEF_RATE 26000000
#define SPRD_BAUD_IO_LIMIT 3000000
#define SPRD_TIMEOUT 256000
/* the offset of serial registers and BITs for them */
/* data registers */
#define SPRD_TXD 0x0000
#define SPRD_RXD 0x0004
/* line status register and its BITs */
#define SPRD_LSR 0x0008
#define SPRD_LSR_OE BIT(4)
#define SPRD_LSR_FE BIT(3)
#define SPRD_LSR_PE BIT(2)
#define SPRD_LSR_BI BIT(7)
#define SPRD_LSR_TX_OVER BIT(15)
/* data number in TX and RX fifo */
#define SPRD_STS1 0x000C
#define SPRD_RX_FIFO_CNT_MASK GENMASK(7, 0)
#define SPRD_TX_FIFO_CNT_MASK GENMASK(15, 8)
/* interrupt enable register and its BITs */
#define SPRD_IEN 0x0010
#define SPRD_IEN_RX_FULL BIT(0)
#define SPRD_IEN_TX_EMPTY BIT(1)
#define SPRD_IEN_BREAK_DETECT BIT(7)
#define SPRD_IEN_TIMEOUT BIT(13)
/* interrupt clear register */
#define SPRD_ICLR 0x0014
#define SPRD_ICLR_TIMEOUT BIT(13)
/* line control register */
#define SPRD_LCR 0x0018
#define SPRD_LCR_STOP_1BIT 0x10
#define SPRD_LCR_STOP_2BIT 0x30
#define SPRD_LCR_DATA_LEN (BIT(2) | BIT(3))
#define SPRD_LCR_DATA_LEN5 0x0
#define SPRD_LCR_DATA_LEN6 0x4
#define SPRD_LCR_DATA_LEN7 0x8
#define SPRD_LCR_DATA_LEN8 0xc
#define SPRD_LCR_PARITY (BIT(0) | BIT(1))
#define SPRD_LCR_PARITY_EN 0x2
#define SPRD_LCR_EVEN_PAR 0x0
#define SPRD_LCR_ODD_PAR 0x1
/* control register 1 */
#define SPRD_CTL1 0x001C
#define SPRD_DMA_EN BIT(15)
#define SPRD_LOOPBACK_EN BIT(14)
#define RX_HW_FLOW_CTL_THLD BIT(6)
#define RX_HW_FLOW_CTL_EN BIT(7)
#define TX_HW_FLOW_CTL_EN BIT(8)
#define RX_TOUT_THLD_DEF 0x3E00
#define RX_HFC_THLD_DEF 0x40
/* fifo threshold register */
#define SPRD_CTL2 0x0020
#define THLD_TX_EMPTY 0x40
#define THLD_TX_EMPTY_SHIFT 8
#define THLD_RX_FULL 0x40
#define THLD_RX_FULL_MASK GENMASK(6, 0)
/* config baud rate register */
#define SPRD_CLKD0 0x0024
#define SPRD_CLKD0_MASK GENMASK(15, 0)
#define SPRD_CLKD1 0x0028
#define SPRD_CLKD1_MASK GENMASK(20, 16)
#define SPRD_CLKD1_SHIFT 16
/* interrupt mask status register */
#define SPRD_IMSR 0x002C
#define SPRD_IMSR_RX_FIFO_FULL BIT(0)
#define SPRD_IMSR_TX_FIFO_EMPTY BIT(1)
#define SPRD_IMSR_BREAK_DETECT BIT(7)
#define SPRD_IMSR_TIMEOUT BIT(13)
#define SPRD_DEFAULT_SOURCE_CLK 26000000
#define SPRD_RX_DMA_STEP 1
#define SPRD_RX_FIFO_FULL 1
#define SPRD_TX_FIFO_FULL 0x20
#define SPRD_UART_RX_SIZE (UART_XMIT_SIZE / 4)
struct sprd_uart_dma {
struct dma_chan *chn;
unsigned char *virt;
dma_addr_t phys_addr;
dma_cookie_t cookie;
u32 trans_len;
bool enable;
};
struct sprd_uart_port {
struct uart_port port;
char name[16];
struct clk *clk;
struct sprd_uart_dma tx_dma;
struct sprd_uart_dma rx_dma;
dma_addr_t pos;
unsigned char *rx_buf_tail;
};
static struct sprd_uart_port *sprd_port[UART_NR_MAX];
static int sprd_ports_num;
static int sprd_start_dma_rx(struct uart_port *port);
static int sprd_tx_dma_config(struct uart_port *port);
static inline unsigned int serial_in(struct uart_port *port,
unsigned int offset)
{
return readl_relaxed(port->membase + offset);
}
static inline void serial_out(struct uart_port *port, unsigned int offset,
int value)
{
writel_relaxed(value, port->membase + offset);
}
static unsigned int sprd_tx_empty(struct uart_port *port)
{
if (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
return 0;
else
return TIOCSER_TEMT;
}
static unsigned int sprd_get_mctrl(struct uart_port *port)
{
return TIOCM_DSR | TIOCM_CTS;
}
static void sprd_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val = serial_in(port, SPRD_CTL1);
if (mctrl & TIOCM_LOOP)
val |= SPRD_LOOPBACK_EN;
else
val &= ~SPRD_LOOPBACK_EN;
serial_out(port, SPRD_CTL1, val);
}
static void sprd_stop_rx(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
unsigned int ien, iclr;
if (sp->rx_dma.enable)
dmaengine_terminate_all(sp->rx_dma.chn);
iclr = serial_in(port, SPRD_ICLR);
ien = serial_in(port, SPRD_IEN);
ien &= ~(SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT);
iclr |= SPRD_IEN_RX_FULL | SPRD_IEN_BREAK_DETECT;
serial_out(port, SPRD_IEN, ien);
serial_out(port, SPRD_ICLR, iclr);
}
static void sprd_uart_dma_enable(struct uart_port *port, bool enable)
{
u32 val = serial_in(port, SPRD_CTL1);
if (enable)
val |= SPRD_DMA_EN;
else
val &= ~SPRD_DMA_EN;
serial_out(port, SPRD_CTL1, val);
}
static void sprd_stop_tx_dma(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct dma_tx_state state;
u32 trans_len;
dmaengine_pause(sp->tx_dma.chn);
dmaengine_tx_status(sp->tx_dma.chn, sp->tx_dma.cookie, &state);
if (state.residue) {
trans_len = state.residue - sp->tx_dma.phys_addr;
uart_xmit_advance(port, trans_len);
dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
sp->tx_dma.trans_len, DMA_TO_DEVICE);
}
dmaengine_terminate_all(sp->tx_dma.chn);
sp->tx_dma.trans_len = 0;
}
static int sprd_tx_buf_remap(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct circ_buf *xmit = &port->state->xmit;
sp->tx_dma.trans_len =
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
sp->tx_dma.phys_addr = dma_map_single(port->dev,
(void *)&(xmit->buf[xmit->tail]),
sp->tx_dma.trans_len,
DMA_TO_DEVICE);
return dma_mapping_error(port->dev, sp->tx_dma.phys_addr);
}
static void sprd_complete_tx_dma(void *data)
{
struct uart_port *port = (struct uart_port *)data;
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
sp->tx_dma.trans_len, DMA_TO_DEVICE);
uart_xmit_advance(port, sp->tx_dma.trans_len);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit) || sprd_tx_buf_remap(port) ||
sprd_tx_dma_config(port))
sp->tx_dma.trans_len = 0;
spin_unlock_irqrestore(&port->lock, flags);
}
static int sprd_uart_dma_submit(struct uart_port *port,
struct sprd_uart_dma *ud, u32 trans_len,
enum dma_transfer_direction direction,
dma_async_tx_callback callback)
{
struct dma_async_tx_descriptor *dma_des;
unsigned long flags;
flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE,
SPRD_DMA_NO_TRG,
SPRD_DMA_FRAG_REQ,
SPRD_DMA_TRANS_INT);
dma_des = dmaengine_prep_slave_single(ud->chn, ud->phys_addr, trans_len,
direction, flags);
if (!dma_des)
return -ENODEV;
dma_des->callback = callback;
dma_des->callback_param = port;
ud->cookie = dmaengine_submit(dma_des);
if (dma_submit_error(ud->cookie))
return dma_submit_error(ud->cookie);
dma_async_issue_pending(ud->chn);
return 0;
}
static int sprd_tx_dma_config(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
u32 burst = sp->tx_dma.trans_len > SPRD_TX_FIFO_FULL ?
SPRD_TX_FIFO_FULL : sp->tx_dma.trans_len;
int ret;
struct dma_slave_config cfg = {
.dst_addr = port->mapbase + SPRD_TXD,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_maxburst = burst,
};
ret = dmaengine_slave_config(sp->tx_dma.chn, &cfg);
if (ret < 0)
return ret;
return sprd_uart_dma_submit(port, &sp->tx_dma, sp->tx_dma.trans_len,
DMA_MEM_TO_DEV, sprd_complete_tx_dma);
}
static void sprd_start_tx_dma(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
serial_out(port, SPRD_TXD, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
sprd_stop_tx_dma(port);
return;
}
if (sp->tx_dma.trans_len)
return;
if (sprd_tx_buf_remap(port) || sprd_tx_dma_config(port))
sp->tx_dma.trans_len = 0;
}
static void sprd_rx_full_thld(struct uart_port *port, u32 thld)
{
u32 val = serial_in(port, SPRD_CTL2);
val &= ~THLD_RX_FULL_MASK;
val |= thld & THLD_RX_FULL_MASK;
serial_out(port, SPRD_CTL2, val);
}
static int sprd_rx_alloc_buf(struct sprd_uart_port *sp)
{
sp->rx_dma.virt = dma_alloc_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
&sp->rx_dma.phys_addr, GFP_KERNEL);
if (!sp->rx_dma.virt)
return -ENOMEM;
return 0;
}
static void sprd_rx_free_buf(struct sprd_uart_port *sp)
{
if (sp->rx_dma.virt)
dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
sp->rx_dma.virt, sp->rx_dma.phys_addr);
sp->rx_dma.virt = NULL;
}
static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct dma_slave_config cfg = {
.src_addr = port->mapbase + SPRD_RXD,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_maxburst = burst,
};
return dmaengine_slave_config(sp->rx_dma.chn, &cfg);
}
static void sprd_uart_dma_rx(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct tty_port *tty = &port->state->port;
port->icount.rx += sp->rx_dma.trans_len;
tty_insert_flip_string(tty, sp->rx_buf_tail, sp->rx_dma.trans_len);
tty_flip_buffer_push(tty);
}
static void sprd_uart_dma_irq(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct dma_tx_state state;
enum dma_status status;
status = dmaengine_tx_status(sp->rx_dma.chn,
sp->rx_dma.cookie, &state);
if (status == DMA_ERROR)
sprd_stop_rx(port);
if (!state.residue && sp->pos == sp->rx_dma.phys_addr)
return;
if (!state.residue) {
sp->rx_dma.trans_len = SPRD_UART_RX_SIZE +
sp->rx_dma.phys_addr - sp->pos;
sp->pos = sp->rx_dma.phys_addr;
} else {
sp->rx_dma.trans_len = state.residue - sp->pos;
sp->pos = state.residue;
}
sprd_uart_dma_rx(port);
sp->rx_buf_tail += sp->rx_dma.trans_len;
}
static void sprd_complete_rx_dma(void *data)
{
struct uart_port *port = (struct uart_port *)data;
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
struct dma_tx_state state;
enum dma_status status;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
status = dmaengine_tx_status(sp->rx_dma.chn,
sp->rx_dma.cookie, &state);
if (status != DMA_COMPLETE) {
sprd_stop_rx(port);
spin_unlock_irqrestore(&port->lock, flags);
return;
}
if (sp->pos != sp->rx_dma.phys_addr) {
sp->rx_dma.trans_len = SPRD_UART_RX_SIZE +
sp->rx_dma.phys_addr - sp->pos;
sprd_uart_dma_rx(port);
sp->rx_buf_tail += sp->rx_dma.trans_len;
}
if (sprd_start_dma_rx(port))
sprd_stop_rx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static int sprd_start_dma_rx(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
int ret;
if (!sp->rx_dma.enable)
return 0;
sp->pos = sp->rx_dma.phys_addr;
sp->rx_buf_tail = sp->rx_dma.virt;
sprd_rx_full_thld(port, SPRD_RX_FIFO_FULL);
ret = sprd_rx_dma_config(port, SPRD_RX_DMA_STEP);
if (ret)
return ret;
return sprd_uart_dma_submit(port, &sp->rx_dma, SPRD_UART_RX_SIZE,
DMA_DEV_TO_MEM, sprd_complete_rx_dma);
}
static void sprd_release_dma(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
sprd_uart_dma_enable(port, false);
if (sp->rx_dma.enable)
dma_release_channel(sp->rx_dma.chn);
if (sp->tx_dma.enable)
dma_release_channel(sp->tx_dma.chn);
sp->tx_dma.enable = false;
sp->rx_dma.enable = false;
}
static void sprd_request_dma(struct uart_port *port)
{
struct sprd_uart_port *sp =
container_of(port, struct sprd_uart_port, port);
sp->tx_dma.enable = true;
sp->rx_dma.enable = true;
sp->tx_dma.chn = dma_request_chan(port->dev, "tx");
if (IS_ERR(sp->tx_dma.chn)) {
dev_err(port->dev, "request TX DMA channel failed, ret = %ld\n",
PTR_ERR(sp->tx_dma.chn));
sp->tx_dma.enable = false;
}
sp->rx_dma.chn = dma_request_chan(port->dev, "rx");
if (IS_ERR(sp->rx_dma.chn)) {
dev_err(port->dev, "request RX DMA channel failed, ret = %ld\n",
PTR_ERR(sp->rx_dma.chn));
sp->rx_dma.enable = false;
}
}
static void sprd_stop_tx(struct uart_port *port)
{
struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
port);
unsigned int ien, iclr;
if (sp->tx_dma.enable) {
sprd_stop_tx_dma(port);
return;
}
iclr = serial_in(port, SPRD_ICLR);
ien = serial_in(port, SPRD_IEN);
iclr |= SPRD_IEN_TX_EMPTY;
ien &= ~SPRD_IEN_TX_EMPTY;
serial_out(port, SPRD_IEN, ien);
serial_out(port, SPRD_ICLR, iclr);
}
static void sprd_start_tx(struct uart_port *port)
{
struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
port);
unsigned int ien;
if (sp->tx_dma.enable) {
sprd_start_tx_dma(port);
return;
}
ien = serial_in(port, SPRD_IEN);
if (!(ien & SPRD_IEN_TX_EMPTY)) {
ien |= SPRD_IEN_TX_EMPTY;
serial_out(port, SPRD_IEN, ien);
}
}
/* The Sprd serial does not support this function. */
static void sprd_break_ctl(struct uart_port *port, int break_state)
{
/* nothing to do */
}
static int handle_lsr_errors(struct uart_port *port,
u8 *flag,
unsigned int *lsr)
{
int ret = 0;
/* statistics */
if (*lsr & SPRD_LSR_BI) {
*lsr &= ~(SPRD_LSR_FE | SPRD_LSR_PE);
port->icount.brk++;
ret = uart_handle_break(port);
if (ret)
return ret;
} else if (*lsr & SPRD_LSR_PE)
port->icount.parity++;
else if (*lsr & SPRD_LSR_FE)
port->icount.frame++;
if (*lsr & SPRD_LSR_OE)
port->icount.overrun++;
/* mask off conditions which should be ignored */
*lsr &= port->read_status_mask;
if (*lsr & SPRD_LSR_BI)
*flag = TTY_BREAK;
else if (*lsr & SPRD_LSR_PE)
*flag = TTY_PARITY;
else if (*lsr & SPRD_LSR_FE)
*flag = TTY_FRAME;
return ret;
}
static inline void sprd_rx(struct uart_port *port)
{
struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
port);
struct tty_port *tty = &port->state->port;
unsigned int lsr, max_count = SPRD_TIMEOUT;
u8 ch, flag;
if (sp->rx_dma.enable) {
sprd_uart_dma_irq(port);
return;
}
while ((serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK) &&
max_count--) {
lsr = serial_in(port, SPRD_LSR);
ch = serial_in(port, SPRD_RXD);
flag = TTY_NORMAL;
port->icount.rx++;
if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
SPRD_LSR_FE | SPRD_LSR_OE))
if (handle_lsr_errors(port, &flag, &lsr))
continue;
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, lsr, SPRD_LSR_OE, ch, flag);
}
tty_flip_buffer_push(tty);
}
static inline void sprd_tx(struct uart_port *port)
{
u8 ch;
uart_port_tx_limited(port, ch, THLD_TX_EMPTY,
true,
serial_out(port, SPRD_TXD, ch),
({}));
}
/* this handles the interrupt from one port */
static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int ims;
spin_lock(&port->lock);
ims = serial_in(port, SPRD_IMSR);
if (!ims) {
spin_unlock(&port->lock);
return IRQ_NONE;
}
if (ims & SPRD_IMSR_TIMEOUT)
serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
if (ims & SPRD_IMSR_BREAK_DETECT)
serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT);
if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT |
SPRD_IMSR_TIMEOUT))
sprd_rx(port);
if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
sprd_tx(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static void sprd_uart_dma_startup(struct uart_port *port,
struct sprd_uart_port *sp)
{
int ret;
sprd_request_dma(port);
if (!(sp->rx_dma.enable || sp->tx_dma.enable))
return;
ret = sprd_start_dma_rx(port);
if (ret) {
sp->rx_dma.enable = false;
dma_release_channel(sp->rx_dma.chn);
dev_warn(port->dev, "fail to start RX dma mode\n");
}
sprd_uart_dma_enable(port, true);
}
static int sprd_startup(struct uart_port *port)
{
int ret = 0;
unsigned int ien, fc;
unsigned int timeout;
struct sprd_uart_port *sp;
unsigned long flags;
serial_out(port, SPRD_CTL2,
THLD_TX_EMPTY << THLD_TX_EMPTY_SHIFT | THLD_RX_FULL);
/* clear rx fifo */
timeout = SPRD_TIMEOUT;
while (timeout-- && serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK)
serial_in(port, SPRD_RXD);
/* clear tx fifo */
timeout = SPRD_TIMEOUT;
while (timeout-- && serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
cpu_relax();
/* clear interrupt */
serial_out(port, SPRD_IEN, 0);
serial_out(port, SPRD_ICLR, ~0);
/* allocate irq */
sp = container_of(port, struct sprd_uart_port, port);
snprintf(sp->name, sizeof(sp->name), "sprd_serial%d", port->line);
sprd_uart_dma_startup(port, sp);
ret = devm_request_irq(port->dev, port->irq, sprd_handle_irq,
IRQF_SHARED, sp->name, port);
if (ret) {
dev_err(port->dev, "fail to request serial irq %d, ret=%d\n",
port->irq, ret);
return ret;
}
fc = serial_in(port, SPRD_CTL1);
fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
serial_out(port, SPRD_CTL1, fc);
/* enable interrupt */
spin_lock_irqsave(&port->lock, flags);
ien = serial_in(port, SPRD_IEN);
ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
if (!sp->rx_dma.enable)
ien |= SPRD_IEN_RX_FULL;
serial_out(port, SPRD_IEN, ien);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void sprd_shutdown(struct uart_port *port)
{
sprd_release_dma(port);
serial_out(port, SPRD_IEN, 0);
serial_out(port, SPRD_ICLR, ~0);
devm_free_irq(port->dev, port->irq, port);
}
static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned int baud, quot;
unsigned int lcr = 0, fc;
unsigned long flags;
/* ask the core to calculate the divisor for us */
baud = uart_get_baud_rate(port, termios, old, 0, SPRD_BAUD_IO_LIMIT);
quot = port->uartclk / baud;
/* set data length */
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr |= SPRD_LCR_DATA_LEN5;
break;
case CS6:
lcr |= SPRD_LCR_DATA_LEN6;
break;
case CS7:
lcr |= SPRD_LCR_DATA_LEN7;
break;
case CS8:
default:
lcr |= SPRD_LCR_DATA_LEN8;
break;
}
/* calculate stop bits */
lcr &= ~(SPRD_LCR_STOP_1BIT | SPRD_LCR_STOP_2BIT);
if (termios->c_cflag & CSTOPB)
lcr |= SPRD_LCR_STOP_2BIT;
else
lcr |= SPRD_LCR_STOP_1BIT;
/* calculate parity */
lcr &= ~SPRD_LCR_PARITY;
termios->c_cflag &= ~CMSPAR; /* no support mark/space */
if (termios->c_cflag & PARENB) {
lcr |= SPRD_LCR_PARITY_EN;
if (termios->c_cflag & PARODD)
lcr |= SPRD_LCR_ODD_PAR;
else
lcr |= SPRD_LCR_EVEN_PAR;
}
spin_lock_irqsave(&port->lock, flags);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = SPRD_LSR_OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SPRD_LSR_FE | SPRD_LSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= SPRD_LSR_BI;
/* characters to ignore */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SPRD_LSR_PE | SPRD_LSR_FE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= SPRD_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SPRD_LSR_OE;
}
/* flow control */
fc = serial_in(port, SPRD_CTL1);
fc &= ~(RX_HW_FLOW_CTL_THLD | RX_HW_FLOW_CTL_EN | TX_HW_FLOW_CTL_EN);
if (termios->c_cflag & CRTSCTS) {
fc |= RX_HW_FLOW_CTL_THLD;
fc |= RX_HW_FLOW_CTL_EN;
fc |= TX_HW_FLOW_CTL_EN;
}
/* clock divider bit0~bit15 */
serial_out(port, SPRD_CLKD0, quot & SPRD_CLKD0_MASK);
/* clock divider bit16~bit20 */
serial_out(port, SPRD_CLKD1,
(quot & SPRD_CLKD1_MASK) >> SPRD_CLKD1_SHIFT);
serial_out(port, SPRD_LCR, lcr);
fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
serial_out(port, SPRD_CTL1, fc);
spin_unlock_irqrestore(&port->lock, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
}
static const char *sprd_type(struct uart_port *port)
{
return "SPX";
}
static void sprd_release_port(struct uart_port *port)
{
/* nothing to do */
}
static int sprd_request_port(struct uart_port *port)
{
return 0;
}
static void sprd_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_SPRD;
}
static int sprd_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->type != PORT_SPRD)
return -EINVAL;
if (port->irq != ser->irq)
return -EINVAL;
if (port->iotype != ser->io_type)
return -EINVAL;
return 0;
}
static void sprd_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct sprd_uart_port *sup =
container_of(port, struct sprd_uart_port, port);
switch (state) {
case UART_PM_STATE_ON:
clk_prepare_enable(sup->clk);
break;
case UART_PM_STATE_OFF:
clk_disable_unprepare(sup->clk);
break;
}
}
#ifdef CONFIG_CONSOLE_POLL
static int sprd_poll_init(struct uart_port *port)
{
if (port->state->pm_state != UART_PM_STATE_ON) {
sprd_pm(port, UART_PM_STATE_ON, 0);
port->state->pm_state = UART_PM_STATE_ON;
}
return 0;
}
static int sprd_poll_get_char(struct uart_port *port)
{
while (!(serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK))
cpu_relax();
return serial_in(port, SPRD_RXD);
}
static void sprd_poll_put_char(struct uart_port *port, unsigned char ch)
{
while (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
cpu_relax();
serial_out(port, SPRD_TXD, ch);
}
#endif
static const struct uart_ops serial_sprd_ops = {
.tx_empty = sprd_tx_empty,
.get_mctrl = sprd_get_mctrl,
.set_mctrl = sprd_set_mctrl,
.stop_tx = sprd_stop_tx,
.start_tx = sprd_start_tx,
.stop_rx = sprd_stop_rx,
.break_ctl = sprd_break_ctl,
.startup = sprd_startup,
.shutdown = sprd_shutdown,
.set_termios = sprd_set_termios,
.type = sprd_type,
.release_port = sprd_release_port,
.request_port = sprd_request_port,
.config_port = sprd_config_port,
.verify_port = sprd_verify_port,
.pm = sprd_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = sprd_poll_init,
.poll_get_char = sprd_poll_get_char,
.poll_put_char = sprd_poll_put_char,
#endif
};
#ifdef CONFIG_SERIAL_SPRD_CONSOLE
static void wait_for_xmitr(struct uart_port *port)
{
unsigned int status, tmout = 10000;
/* wait up to 10ms for the character(s) to be sent */
do {
status = serial_in(port, SPRD_STS1);
if (--tmout == 0)
break;
udelay(1);
} while (status & SPRD_TX_FIFO_CNT_MASK);
}
static void sprd_console_putchar(struct uart_port *port, unsigned char ch)
{
wait_for_xmitr(port);
serial_out(port, SPRD_TXD, ch);
}
static void sprd_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &sprd_port[co->index]->port;
int locked = 1;
unsigned long flags;
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
uart_console_write(port, s, count, sprd_console_putchar);
/* wait for transmitter to become empty */
wait_for_xmitr(port);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static int sprd_console_setup(struct console *co, char *options)
{
struct sprd_uart_port *sprd_uart_port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index >= UART_NR_MAX || co->index < 0)
co->index = 0;
sprd_uart_port = sprd_port[co->index];
if (!sprd_uart_port || !sprd_uart_port->port.membase) {
pr_info("serial port %d not yet initialized\n", co->index);
return -ENODEV;
}
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&sprd_uart_port->port, co, baud,
parity, bits, flow);
}
static struct uart_driver sprd_uart_driver;
static struct console sprd_console = {
.name = SPRD_TTY_NAME,
.write = sprd_console_write,
.device = uart_console_device,
.setup = sprd_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sprd_uart_driver,
};
static int __init sprd_serial_console_init(void)
{
register_console(&sprd_console);
return 0;
}
console_initcall(sprd_serial_console_init);
#define SPRD_CONSOLE (&sprd_console)
/* Support for earlycon */
static void sprd_putc(struct uart_port *port, unsigned char c)
{
unsigned int timeout = SPRD_TIMEOUT;
while (timeout-- &&
!(readl(port->membase + SPRD_LSR) & SPRD_LSR_TX_OVER))
cpu_relax();
writeb(c, port->membase + SPRD_TXD);
}
static void sprd_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, sprd_putc);
}
static int __init sprd_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = sprd_early_write;
return 0;
}
OF_EARLYCON_DECLARE(sprd_serial, "sprd,sc9836-uart",
sprd_early_console_setup);
#else /* !CONFIG_SERIAL_SPRD_CONSOLE */
#define SPRD_CONSOLE NULL
#endif
static struct uart_driver sprd_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "sprd_serial",
.dev_name = SPRD_TTY_NAME,
.major = 0,
.minor = 0,
.nr = UART_NR_MAX,
.cons = SPRD_CONSOLE,
};
static int sprd_remove(struct platform_device *dev)
{
struct sprd_uart_port *sup = platform_get_drvdata(dev);
if (sup) {
uart_remove_one_port(&sprd_uart_driver, &sup->port);
sprd_port[sup->port.line] = NULL;
sprd_rx_free_buf(sup);
sprd_ports_num--;
}
if (!sprd_ports_num)
uart_unregister_driver(&sprd_uart_driver);
return 0;
}
static bool sprd_uart_is_console(struct uart_port *uport)
{
struct console *cons = sprd_uart_driver.cons;
if ((cons && cons->index >= 0 && cons->index == uport->line) ||
of_console_check(uport->dev->of_node, SPRD_TTY_NAME, uport->line))
return true;
return false;
}
static int sprd_clk_init(struct uart_port *uport)
{
struct clk *clk_uart, *clk_parent;
struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port);
clk_uart = devm_clk_get(uport->dev, "uart");
if (IS_ERR(clk_uart)) {
dev_warn(uport->dev, "uart%d can't get uart clock\n",
uport->line);
clk_uart = NULL;
}
clk_parent = devm_clk_get(uport->dev, "source");
if (IS_ERR(clk_parent)) {
dev_warn(uport->dev, "uart%d can't get source clock\n",
uport->line);
clk_parent = NULL;
}
if (!clk_uart || clk_set_parent(clk_uart, clk_parent))
uport->uartclk = SPRD_DEFAULT_SOURCE_CLK;
else
uport->uartclk = clk_get_rate(clk_uart);
u->clk = devm_clk_get(uport->dev, "enable");
if (IS_ERR(u->clk)) {
if (PTR_ERR(u->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(uport->dev, "uart%d can't get enable clock\n",
uport->line);
/* To keep console alive even if the error occurred */
if (!sprd_uart_is_console(uport))
return PTR_ERR(u->clk);
u->clk = NULL;
}
return 0;
}
static int sprd_probe(struct platform_device *pdev)
{
struct resource *res;
struct uart_port *up;
struct sprd_uart_port *sport;
int irq;
int index;
int ret;
index = of_alias_get_id(pdev->dev.of_node, "serial");
if (index < 0 || index >= UART_NR_MAX) {
dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
return -EINVAL;
}
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
up = &sport->port;
up->dev = &pdev->dev;
up->line = index;
up->type = PORT_SPRD;
up->iotype = UPIO_MEM;
up->uartclk = SPRD_DEF_RATE;
up->fifosize = SPRD_FIFO_SIZE;
up->ops = &serial_sprd_ops;
up->flags = UPF_BOOT_AUTOCONF;
up->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPRD_CONSOLE);
ret = sprd_clk_init(up);
if (ret)
return ret;
up->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(up->membase))
return PTR_ERR(up->membase);
up->mapbase = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
up->irq = irq;
/*
* Allocate one dma buffer to prepare for receive transfer, in case
* memory allocation failure at runtime.
*/
ret = sprd_rx_alloc_buf(sport);
if (ret)
return ret;
if (!sprd_ports_num) {
ret = uart_register_driver(&sprd_uart_driver);
if (ret < 0) {
pr_err("Failed to register SPRD-UART driver\n");
goto free_rx_buf;
}
}
sprd_ports_num++;
sprd_port[index] = sport;
ret = uart_add_one_port(&sprd_uart_driver, up);
if (ret)
goto clean_port;
platform_set_drvdata(pdev, up);
return 0;
clean_port:
sprd_port[index] = NULL;
if (--sprd_ports_num == 0)
uart_unregister_driver(&sprd_uart_driver);
free_rx_buf:
sprd_rx_free_buf(sport);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int sprd_suspend(struct device *dev)
{
struct sprd_uart_port *sup = dev_get_drvdata(dev);
uart_suspend_port(&sprd_uart_driver, &sup->port);
return 0;
}
static int sprd_resume(struct device *dev)
{
struct sprd_uart_port *sup = dev_get_drvdata(dev);
uart_resume_port(&sprd_uart_driver, &sup->port);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(sprd_pm_ops, sprd_suspend, sprd_resume);
static const struct of_device_id serial_ids[] = {
{.compatible = "sprd,sc9836-uart",},
{}
};
MODULE_DEVICE_TABLE(of, serial_ids);
static struct platform_driver sprd_platform_driver = {
.probe = sprd_probe,
.remove = sprd_remove,
.driver = {
.name = "sprd_serial",
.of_match_table = serial_ids,
.pm = &sprd_pm_ops,
},
};
module_platform_driver(sprd_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Spreadtrum SoC serial driver series");
| linux-master | drivers/tty/serial/sprd_serial.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Serial core port device driver
*
* Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
* Author: Tony Lindgren <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
#include "serial_base.h"
#define SERIAL_PORT_AUTOSUSPEND_DELAY_MS 500
/* Only considers pending TX for now. Caller must take care of locking */
static int __serial_port_busy(struct uart_port *port)
{
return !uart_tx_stopped(port) &&
uart_circ_chars_pending(&port->state->xmit);
}
static int serial_port_runtime_resume(struct device *dev)
{
struct serial_port_device *port_dev = to_serial_base_port_device(dev);
struct uart_port *port;
unsigned long flags;
port = port_dev->port;
if (port->flags & UPF_DEAD)
goto out;
/* Flush any pending TX for the port */
spin_lock_irqsave(&port->lock, flags);
if (__serial_port_busy(port))
port->ops->start_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
out:
pm_runtime_mark_last_busy(dev);
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
NULL, serial_port_runtime_resume, NULL);
static int serial_port_probe(struct device *dev)
{
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, SERIAL_PORT_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
return 0;
}
static int serial_port_remove(struct device *dev)
{
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
/*
* Serial core port device init functions. Note that the physical serial
* port device driver may not have completed probe at this point.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
{
return serial_ctrl_register_port(drv, port);
}
EXPORT_SYMBOL(uart_add_one_port);
void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
{
serial_ctrl_unregister_port(drv, port);
}
EXPORT_SYMBOL(uart_remove_one_port);
static struct device_driver serial_port_driver = {
.name = "port",
.suppress_bind_attrs = true,
.probe = serial_port_probe,
.remove = serial_port_remove,
.pm = pm_ptr(&serial_port_pm),
};
int serial_base_port_init(void)
{
return serial_base_driver_register(&serial_port_driver);
}
void serial_base_port_exit(void)
{
serial_base_driver_unregister(&serial_port_driver);
}
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("Serial controller port driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/serial_port.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Based on drivers/serial/8250.c by Russell King.
*
* Author: Nicolas Pitre
* Created: Feb 20, 2003
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
* Note 1: This driver is made separate from the already too overloaded
* 8250.c because it needs some kirks of its own and that'll make it
* easier to add DMA support.
*
* Note 2: I'm too sick of device allocation policies for serial ports.
* If someone else wants to request an "official" allocation of major/minor
* for this driver please be my guest. And don't forget that new hardware
* to come from Intel might have more than 3 or 4 of those UARTs. Let's
* hope for a better port registration and dynamic device allocation scheme
* with the serial core maintainer satisfaction to appear soon.
*/
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#define PXA_NAME_LEN 8
struct uart_pxa_port {
struct uart_port port;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned int lsr_break_flag;
struct clk *clk;
char name[PXA_NAME_LEN];
};
static inline unsigned int serial_in(struct uart_pxa_port *up, int offset)
{
offset <<= 2;
return readl(up->port.membase + offset);
}
static inline void serial_out(struct uart_pxa_port *up, int offset, int value)
{
offset <<= 2;
writel(value, up->port.membase + offset);
}
static void serial_pxa_enable_ms(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
}
static void serial_pxa_stop_tx(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_pxa_stop_rx(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
static inline void receive_chars(struct uart_pxa_port *up, int *status)
{
u8 ch, flag;
int max_count = 256;
do {
/* work around Errata #20 according to
* Intel(R) PXA27x Processor Family
* Specification Update (May 2005)
*
* Step 2
* Disable the Reciever Time Out Interrupt via IER[RTOEI]
*/
up->ier &= ~UART_IER_RTOIE;
serial_out(up, UART_IER, up->ier);
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE))) {
/*
* For statistics only
*/
if (*status & UART_LSR_BI) {
*status &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
goto ignore_char;
} else if (*status & UART_LSR_PE)
up->port.icount.parity++;
else if (*status & UART_LSR_FE)
up->port.icount.frame++;
if (*status & UART_LSR_OE)
up->port.icount.overrun++;
/*
* Mask off conditions which should be ignored.
*/
*status &= up->port.read_status_mask;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
*status |= up->lsr_break_flag;
up->lsr_break_flag = 0;
}
#endif
if (*status & UART_LSR_BI) {
flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
tty_flip_buffer_push(&up->port.state->port);
/* work around Errata #20 according to
* Intel(R) PXA27x Processor Family
* Specification Update (May 2005)
*
* Step 6:
* No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE]
*/
up->ier |= UART_IER_RTOIE;
serial_out(up, UART_IER, up->ier);
}
static void transmit_chars(struct uart_pxa_port *up)
{
u8 ch;
uart_port_tx_limited(&up->port, ch, up->port.fifosize / 2,
true,
serial_out(up, UART_TX, ch),
({}));
}
static void serial_pxa_start_tx(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
/* should hold up->port.lock */
static inline void check_modem_status(struct uart_pxa_port *up)
{
int status;
status = serial_in(up, UART_MSR);
if ((status & UART_MSR_ANY_DELTA) == 0)
return;
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
/*
* This handles the interrupt from one port.
*/
static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
{
struct uart_pxa_port *up = dev_id;
unsigned int iir, lsr;
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
spin_lock(&up->port.lock);
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
static unsigned int serial_pxa_tx_empty(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_pxa_get_mctrl(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char status;
unsigned int ret;
status = serial_in(up, UART_MSR);
ret = 0;
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char mcr = 0;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
mcr |= up->mcr;
serial_out(up, UART_MCR, mcr);
}
static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int serial_pxa_startup(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
int retval;
if (port->line == 3) /* HWUART */
up->mcr |= UART_MCR_AFE;
else
up->mcr = 0;
up->port.uartclk = clk_get_rate(up->clk);
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up);
if (retval)
return retval;
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl |= TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE;
serial_out(up, UART_IER, up->ier);
/*
* And clear the interrupt registers again for luck.
*/
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
return 0;
}
static void serial_pxa_shutdown(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
free_irq(up->port.irq, up);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
}
static void
serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
unsigned int dll;
cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
if ((up->port.uartclk / quot) < (2400 * 16))
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1;
else if ((up->port.uartclk / quot) < (230400 * 16))
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/*
* Ensure the port will be enabled.
* This is required especially for serial console.
*/
up->ier |= UART_IER_UUE;
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* CTS flow control flag and modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
if (termios->c_cflag & CRTSCTS)
up->mcr |= UART_MCR_AFE;
else
up->mcr &= ~UART_MCR_AFE;
serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
/*
* work around Errata #75 according to Intel(R) PXA27x Processor Family
* Specification Update (Nov 2005)
*/
dll = serial_in(up, UART_DLL);
WARN_ON(dll != (quot & 0xff));
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval; /* Save LCR */
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static void
serial_pxa_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
if (!state)
clk_prepare_enable(up->clk);
else
clk_disable_unprepare(up->clk);
}
static void serial_pxa_release_port(struct uart_port *port)
{
}
static int serial_pxa_request_port(struct uart_port *port)
{
return 0;
}
static void serial_pxa_config_port(struct uart_port *port, int flags)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->port.type = PORT_PXA;
}
static int
serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
return -EINVAL;
}
static const char *
serial_pxa_type(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
return up->name;
}
static struct uart_pxa_port *serial_pxa_ports[4];
static struct uart_driver serial_pxa_reg;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
/*
* Wait for transmitter & holding register to empty
*/
static void wait_for_xmitr(struct uart_pxa_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout &&
((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
udelay(1);
}
}
static void serial_pxa_console_putchar(struct uart_port *port, unsigned char ch)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void
serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_pxa_port *up = serial_pxa_ports[co->index];
unsigned int ier;
unsigned long flags;
int locked = 1;
clk_enable(up->clk);
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, UART_IER_UUE);
uart_console_write(&up->port, s, count, serial_pxa_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
clk_disable(up->clk);
}
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context.
*/
static int serial_pxa_get_poll_char(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char lsr = serial_in(up, UART_LSR);
while (!(lsr & UART_LSR_DR))
lsr = serial_in(up, UART_LSR);
return serial_in(up, UART_RX);
}
static void serial_pxa_put_poll_char(struct uart_port *port,
unsigned char c)
{
unsigned int ier;
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, UART_IER_UUE);
wait_for_xmitr(up);
/*
* Send the character out.
*/
serial_out(up, UART_TX, c);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
}
#endif /* CONFIG_CONSOLE_POLL */
static int __init
serial_pxa_console_setup(struct console *co, char *options)
{
struct uart_pxa_port *up;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index == -1 || co->index >= serial_pxa_reg.nr)
co->index = 0;
up = serial_pxa_ports[co->index];
if (!up)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_pxa_console = {
.name = "ttyS",
.write = serial_pxa_console_write,
.device = uart_console_device,
.setup = serial_pxa_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_pxa_reg,
};
#define PXA_CONSOLE &serial_pxa_console
#else
#define PXA_CONSOLE NULL
#endif
static const struct uart_ops serial_pxa_pops = {
.tx_empty = serial_pxa_tx_empty,
.set_mctrl = serial_pxa_set_mctrl,
.get_mctrl = serial_pxa_get_mctrl,
.stop_tx = serial_pxa_stop_tx,
.start_tx = serial_pxa_start_tx,
.stop_rx = serial_pxa_stop_rx,
.enable_ms = serial_pxa_enable_ms,
.break_ctl = serial_pxa_break_ctl,
.startup = serial_pxa_startup,
.shutdown = serial_pxa_shutdown,
.set_termios = serial_pxa_set_termios,
.pm = serial_pxa_pm,
.type = serial_pxa_type,
.release_port = serial_pxa_release_port,
.request_port = serial_pxa_request_port,
.config_port = serial_pxa_config_port,
.verify_port = serial_pxa_verify_port,
#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_PXA_CONSOLE)
.poll_get_char = serial_pxa_get_poll_char,
.poll_put_char = serial_pxa_put_poll_char,
#endif
};
static struct uart_driver serial_pxa_reg = {
.owner = THIS_MODULE,
.driver_name = "PXA serial",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = 4,
.cons = PXA_CONSOLE,
};
#ifdef CONFIG_PM
static int serial_pxa_suspend(struct device *dev)
{
struct uart_pxa_port *sport = dev_get_drvdata(dev);
if (sport)
uart_suspend_port(&serial_pxa_reg, &sport->port);
return 0;
}
static int serial_pxa_resume(struct device *dev)
{
struct uart_pxa_port *sport = dev_get_drvdata(dev);
if (sport)
uart_resume_port(&serial_pxa_reg, &sport->port);
return 0;
}
static const struct dev_pm_ops serial_pxa_pm_ops = {
.suspend = serial_pxa_suspend,
.resume = serial_pxa_resume,
};
#endif
static const struct of_device_id serial_pxa_dt_ids[] = {
{ .compatible = "mrvl,pxa-uart", },
{ .compatible = "mrvl,mmp-uart", },
{}
};
static int serial_pxa_probe_dt(struct platform_device *pdev,
struct uart_pxa_port *sport)
{
struct device_node *np = pdev->dev.of_node;
int ret;
if (!np)
return 1;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
return ret;
}
sport->port.line = ret;
return 0;
}
static int serial_pxa_probe(struct platform_device *dev)
{
struct uart_pxa_port *sport;
struct resource *mmres;
int ret;
int irq;
mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mmres)
return -ENODEV;
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
if (!sport)
return -ENOMEM;
sport->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(sport->clk)) {
ret = PTR_ERR(sport->clk);
goto err_free;
}
ret = clk_prepare(sport->clk);
if (ret) {
clk_put(sport->clk);
goto err_free;
}
sport->port.type = PORT_PXA;
sport->port.iotype = UPIO_MEM;
sport->port.mapbase = mmres->start;
sport->port.irq = irq;
sport->port.fifosize = 64;
sport->port.ops = &serial_pxa_pops;
sport->port.dev = &dev->dev;
sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
sport->port.uartclk = clk_get_rate(sport->clk);
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PXA_CONSOLE);
ret = serial_pxa_probe_dt(dev, sport);
if (ret > 0)
sport->port.line = dev->id;
else if (ret < 0)
goto err_clk;
if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) {
dev_err(&dev->dev, "serial%d out of range\n", sport->port.line);
ret = -EINVAL;
goto err_clk;
}
snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);
sport->port.membase = ioremap(mmres->start, resource_size(mmres));
if (!sport->port.membase) {
ret = -ENOMEM;
goto err_clk;
}
serial_pxa_ports[sport->port.line] = sport;
uart_add_one_port(&serial_pxa_reg, &sport->port);
platform_set_drvdata(dev, sport);
return 0;
err_clk:
clk_unprepare(sport->clk);
clk_put(sport->clk);
err_free:
kfree(sport);
return ret;
}
static struct platform_driver serial_pxa_driver = {
.probe = serial_pxa_probe,
.driver = {
.name = "pxa2xx-uart",
#ifdef CONFIG_PM
.pm = &serial_pxa_pm_ops,
#endif
.suppress_bind_attrs = true,
.of_match_table = serial_pxa_dt_ids,
},
};
/* 8250 driver for PXA serial ports should be used */
static int __init serial_pxa_init(void)
{
int ret;
ret = uart_register_driver(&serial_pxa_reg);
if (ret != 0)
return ret;
ret = platform_driver_register(&serial_pxa_driver);
if (ret != 0)
uart_unregister_driver(&serial_pxa_reg);
return ret;
}
device_initcall(serial_pxa_init);
| linux-master | drivers/tty/serial/pxa.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Actions Semi Owl family serial console
*
* Copyright 2013 Actions Semi Inc.
* Author: Actions Semi, Inc.
*
* Copyright (c) 2016-2017 Andreas Färber
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define OWL_UART_PORT_NUM 7
#define OWL_UART_DEV_NAME "ttyOWL"
#define OWL_UART_CTL 0x000
#define OWL_UART_RXDAT 0x004
#define OWL_UART_TXDAT 0x008
#define OWL_UART_STAT 0x00c
#define OWL_UART_CTL_DWLS_MASK GENMASK(1, 0)
#define OWL_UART_CTL_DWLS_5BITS (0x0 << 0)
#define OWL_UART_CTL_DWLS_6BITS (0x1 << 0)
#define OWL_UART_CTL_DWLS_7BITS (0x2 << 0)
#define OWL_UART_CTL_DWLS_8BITS (0x3 << 0)
#define OWL_UART_CTL_STPS_2BITS BIT(2)
#define OWL_UART_CTL_PRS_MASK GENMASK(6, 4)
#define OWL_UART_CTL_PRS_NONE (0x0 << 4)
#define OWL_UART_CTL_PRS_ODD (0x4 << 4)
#define OWL_UART_CTL_PRS_MARK (0x5 << 4)
#define OWL_UART_CTL_PRS_EVEN (0x6 << 4)
#define OWL_UART_CTL_PRS_SPACE (0x7 << 4)
#define OWL_UART_CTL_AFE BIT(12)
#define OWL_UART_CTL_TRFS_TX BIT(14)
#define OWL_UART_CTL_EN BIT(15)
#define OWL_UART_CTL_RXDE BIT(16)
#define OWL_UART_CTL_TXDE BIT(17)
#define OWL_UART_CTL_RXIE BIT(18)
#define OWL_UART_CTL_TXIE BIT(19)
#define OWL_UART_CTL_LBEN BIT(20)
#define OWL_UART_STAT_RIP BIT(0)
#define OWL_UART_STAT_TIP BIT(1)
#define OWL_UART_STAT_RXER BIT(2)
#define OWL_UART_STAT_TFER BIT(3)
#define OWL_UART_STAT_RXST BIT(4)
#define OWL_UART_STAT_RFEM BIT(5)
#define OWL_UART_STAT_TFFU BIT(6)
#define OWL_UART_STAT_CTSS BIT(7)
#define OWL_UART_STAT_RTSS BIT(8)
#define OWL_UART_STAT_TFES BIT(10)
#define OWL_UART_STAT_TRFL_MASK GENMASK(16, 11)
#define OWL_UART_STAT_UTBB BIT(17)
#define OWL_UART_POLL_USEC 5
#define OWL_UART_TIMEOUT_USEC 10000
static struct uart_driver owl_uart_driver;
struct owl_uart_info {
unsigned int tx_fifosize;
};
struct owl_uart_port {
struct uart_port port;
struct clk *clk;
};
#define to_owl_uart_port(prt) container_of(prt, struct owl_uart_port, prt)
static struct owl_uart_port *owl_uart_ports[OWL_UART_PORT_NUM];
static inline void owl_uart_write(struct uart_port *port, u32 val, unsigned int off)
{
writel(val, port->membase + off);
}
static inline u32 owl_uart_read(struct uart_port *port, unsigned int off)
{
return readl(port->membase + off);
}
static void owl_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 ctl;
ctl = owl_uart_read(port, OWL_UART_CTL);
if (mctrl & TIOCM_LOOP)
ctl |= OWL_UART_CTL_LBEN;
else
ctl &= ~OWL_UART_CTL_LBEN;
owl_uart_write(port, ctl, OWL_UART_CTL);
}
static unsigned int owl_uart_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR;
u32 stat, ctl;
ctl = owl_uart_read(port, OWL_UART_CTL);
stat = owl_uart_read(port, OWL_UART_STAT);
if (stat & OWL_UART_STAT_RTSS)
mctrl |= TIOCM_RTS;
if ((stat & OWL_UART_STAT_CTSS) || !(ctl & OWL_UART_CTL_AFE))
mctrl |= TIOCM_CTS;
return mctrl;
}
static unsigned int owl_uart_tx_empty(struct uart_port *port)
{
unsigned long flags;
u32 val;
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
val = owl_uart_read(port, OWL_UART_STAT);
ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
static void owl_uart_stop_rx(struct uart_port *port)
{
u32 val;
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~(OWL_UART_CTL_RXIE | OWL_UART_CTL_RXDE);
owl_uart_write(port, val, OWL_UART_CTL);
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_RIP;
owl_uart_write(port, val, OWL_UART_STAT);
}
static void owl_uart_stop_tx(struct uart_port *port)
{
u32 val;
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_TXDE);
owl_uart_write(port, val, OWL_UART_CTL);
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_TIP;
owl_uart_write(port, val, OWL_UART_STAT);
}
static void owl_uart_start_tx(struct uart_port *port)
{
u32 val;
if (uart_tx_stopped(port)) {
owl_uart_stop_tx(port);
return;
}
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_TIP;
owl_uart_write(port, val, OWL_UART_STAT);
val = owl_uart_read(port, OWL_UART_CTL);
val |= OWL_UART_CTL_TXIE;
owl_uart_write(port, val, OWL_UART_CTL);
}
static void owl_uart_send_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx(port, ch,
!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU),
owl_uart_write(port, ch, OWL_UART_TXDAT));
}
static void owl_uart_receive_chars(struct uart_port *port)
{
u32 stat, val;
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~OWL_UART_CTL_TRFS_TX;
owl_uart_write(port, val, OWL_UART_CTL);
stat = owl_uart_read(port, OWL_UART_STAT);
while (!(stat & OWL_UART_STAT_RFEM)) {
char flag = TTY_NORMAL;
if (stat & OWL_UART_STAT_RXER)
port->icount.overrun++;
if (stat & OWL_UART_STAT_RXST) {
/* We are not able to distinguish the error type. */
port->icount.brk++;
port->icount.frame++;
stat &= port->read_status_mask;
if (stat & OWL_UART_STAT_RXST)
flag = TTY_PARITY;
} else
port->icount.rx++;
val = owl_uart_read(port, OWL_UART_RXDAT);
val &= 0xff;
if ((stat & port->ignore_status_mask) == 0)
tty_insert_flip_char(&port->state->port, val, flag);
stat = owl_uart_read(port, OWL_UART_STAT);
}
tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t owl_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned long flags;
u32 stat;
spin_lock_irqsave(&port->lock, flags);
stat = owl_uart_read(port, OWL_UART_STAT);
if (stat & OWL_UART_STAT_RIP)
owl_uart_receive_chars(port);
if (stat & OWL_UART_STAT_TIP)
owl_uart_send_chars(port);
stat = owl_uart_read(port, OWL_UART_STAT);
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
owl_uart_write(port, stat, OWL_UART_STAT);
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
static void owl_uart_shutdown(struct uart_port *port)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
owl_uart_write(port, val, OWL_UART_CTL);
spin_unlock_irqrestore(&port->lock, flags);
free_irq(port->irq, port);
}
static int owl_uart_startup(struct uart_port *port)
{
u32 val;
unsigned long flags;
int ret;
ret = request_irq(port->irq, owl_uart_irq, IRQF_TRIGGER_HIGH,
"owl-uart", port);
if (ret)
return ret;
spin_lock_irqsave(&port->lock, flags);
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
| OWL_UART_STAT_RXER | OWL_UART_STAT_TFER | OWL_UART_STAT_RXST;
owl_uart_write(port, val, OWL_UART_STAT);
val = owl_uart_read(port, OWL_UART_CTL);
val |= OWL_UART_CTL_RXIE | OWL_UART_CTL_TXIE;
val |= OWL_UART_CTL_EN;
owl_uart_write(port, val, OWL_UART_CTL);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void owl_uart_change_baudrate(struct owl_uart_port *owl_port,
unsigned long baud)
{
clk_set_rate(owl_port->clk, baud * 8);
}
static void owl_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct owl_uart_port *owl_port = to_owl_uart_port(port);
unsigned int baud;
u32 ctl;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
ctl = owl_uart_read(port, OWL_UART_CTL);
ctl &= ~OWL_UART_CTL_DWLS_MASK;
switch (termios->c_cflag & CSIZE) {
case CS5:
ctl |= OWL_UART_CTL_DWLS_5BITS;
break;
case CS6:
ctl |= OWL_UART_CTL_DWLS_6BITS;
break;
case CS7:
ctl |= OWL_UART_CTL_DWLS_7BITS;
break;
case CS8:
default:
ctl |= OWL_UART_CTL_DWLS_8BITS;
break;
}
if (termios->c_cflag & CSTOPB)
ctl |= OWL_UART_CTL_STPS_2BITS;
else
ctl &= ~OWL_UART_CTL_STPS_2BITS;
ctl &= ~OWL_UART_CTL_PRS_MASK;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
ctl |= OWL_UART_CTL_PRS_MARK;
else
ctl |= OWL_UART_CTL_PRS_SPACE;
} else if (termios->c_cflag & PARODD)
ctl |= OWL_UART_CTL_PRS_ODD;
else
ctl |= OWL_UART_CTL_PRS_EVEN;
} else
ctl |= OWL_UART_CTL_PRS_NONE;
if (termios->c_cflag & CRTSCTS)
ctl |= OWL_UART_CTL_AFE;
else
ctl &= ~OWL_UART_CTL_AFE;
owl_uart_write(port, ctl, OWL_UART_CTL);
baud = uart_get_baud_rate(port, termios, old, 9600, 3200000);
owl_uart_change_baudrate(owl_port, baud);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
port->read_status_mask |= OWL_UART_STAT_RXER;
if (termios->c_iflag & INPCK)
port->read_status_mask |= OWL_UART_STAT_RXST;
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static void owl_uart_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return;
if (port->flags & UPF_IOREMAP) {
devm_release_mem_region(port->dev, port->mapbase,
resource_size(res));
devm_iounmap(port->dev, port->membase);
port->membase = NULL;
}
}
static int owl_uart_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
if (!devm_request_mem_region(port->dev, port->mapbase,
resource_size(res), dev_name(port->dev)))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
}
return 0;
}
static const char *owl_uart_type(struct uart_port *port)
{
return (port->type == PORT_OWL) ? "owl-uart" : NULL;
}
static int owl_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (port->type != PORT_OWL)
return -EINVAL;
if (port->irq != ser->irq)
return -EINVAL;
return 0;
}
static void owl_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_OWL;
owl_uart_request_port(port);
}
}
#ifdef CONFIG_CONSOLE_POLL
static int owl_uart_poll_get_char(struct uart_port *port)
{
if (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_RFEM)
return NO_POLL_CHAR;
return owl_uart_read(port, OWL_UART_RXDAT);
}
static void owl_uart_poll_put_char(struct uart_port *port, unsigned char ch)
{
u32 reg;
int ret;
/* Wait while FIFO is full or timeout */
ret = readl_poll_timeout_atomic(port->membase + OWL_UART_STAT, reg,
!(reg & OWL_UART_STAT_TFFU),
OWL_UART_POLL_USEC,
OWL_UART_TIMEOUT_USEC);
if (ret == -ETIMEDOUT) {
dev_err(port->dev, "Timeout waiting while UART TX FULL\n");
return;
}
owl_uart_write(port, ch, OWL_UART_TXDAT);
}
#endif /* CONFIG_CONSOLE_POLL */
static const struct uart_ops owl_uart_ops = {
.set_mctrl = owl_uart_set_mctrl,
.get_mctrl = owl_uart_get_mctrl,
.tx_empty = owl_uart_tx_empty,
.start_tx = owl_uart_start_tx,
.stop_rx = owl_uart_stop_rx,
.stop_tx = owl_uart_stop_tx,
.startup = owl_uart_startup,
.shutdown = owl_uart_shutdown,
.set_termios = owl_uart_set_termios,
.type = owl_uart_type,
.config_port = owl_uart_config_port,
.request_port = owl_uart_request_port,
.release_port = owl_uart_release_port,
.verify_port = owl_uart_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = owl_uart_poll_get_char,
.poll_put_char = owl_uart_poll_put_char,
#endif
};
#ifdef CONFIG_SERIAL_OWL_CONSOLE
static void owl_console_putchar(struct uart_port *port, unsigned char ch)
{
if (!port->membase)
return;
while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)
cpu_relax();
owl_uart_write(port, ch, OWL_UART_TXDAT);
}
static void owl_uart_port_write(struct uart_port *port, const char *s,
u_int count)
{
u32 old_ctl, val;
unsigned long flags;
int locked;
local_irq_save(flags);
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&port->lock);
else {
spin_lock(&port->lock);
locked = 1;
}
old_ctl = owl_uart_read(port, OWL_UART_CTL);
val = old_ctl | OWL_UART_CTL_TRFS_TX;
/* disable IRQ */
val &= ~(OWL_UART_CTL_RXIE | OWL_UART_CTL_TXIE);
owl_uart_write(port, val, OWL_UART_CTL);
uart_console_write(port, s, count, owl_console_putchar);
/* wait until all contents have been sent out */
while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TRFL_MASK)
cpu_relax();
/* clear IRQ pending */
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_TIP | OWL_UART_STAT_RIP;
owl_uart_write(port, val, OWL_UART_STAT);
owl_uart_write(port, old_ctl, OWL_UART_CTL);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
}
static void owl_uart_console_write(struct console *co, const char *s,
u_int count)
{
struct owl_uart_port *owl_port;
owl_port = owl_uart_ports[co->index];
if (!owl_port)
return;
owl_uart_port_write(&owl_port->port, s, count);
}
static int owl_uart_console_setup(struct console *co, char *options)
{
struct owl_uart_port *owl_port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= OWL_UART_PORT_NUM)
return -EINVAL;
owl_port = owl_uart_ports[co->index];
if (!owl_port || !owl_port->port.membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&owl_port->port, co, baud, parity, bits, flow);
}
static struct console owl_uart_console = {
.name = OWL_UART_DEV_NAME,
.write = owl_uart_console_write,
.device = uart_console_device,
.setup = owl_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &owl_uart_driver,
};
static int __init owl_uart_console_init(void)
{
register_console(&owl_uart_console);
return 0;
}
console_initcall(owl_uart_console_init);
static void owl_uart_early_console_write(struct console *co,
const char *s,
u_int count)
{
struct earlycon_device *dev = co->data;
owl_uart_port_write(&dev->port, s, count);
}
static int __init
owl_uart_early_console_setup(struct earlycon_device *device, const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = owl_uart_early_console_write;
return 0;
}
OF_EARLYCON_DECLARE(owl, "actions,owl-uart",
owl_uart_early_console_setup);
#define OWL_UART_CONSOLE (&owl_uart_console)
#else
#define OWL_UART_CONSOLE NULL
#endif
static struct uart_driver owl_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "owl-uart",
.dev_name = OWL_UART_DEV_NAME,
.nr = OWL_UART_PORT_NUM,
.cons = OWL_UART_CONSOLE,
};
static const struct owl_uart_info owl_s500_info = {
.tx_fifosize = 16,
};
static const struct owl_uart_info owl_s900_info = {
.tx_fifosize = 32,
};
static const struct of_device_id owl_uart_dt_matches[] = {
{ .compatible = "actions,s500-uart", .data = &owl_s500_info },
{ .compatible = "actions,s900-uart", .data = &owl_s900_info },
{ }
};
MODULE_DEVICE_TABLE(of, owl_uart_dt_matches);
static int owl_uart_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct owl_uart_info *info = NULL;
struct resource *res_mem;
struct owl_uart_port *owl_port;
int ret, irq;
if (pdev->dev.of_node) {
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
match = of_match_node(owl_uart_dt_matches, pdev->dev.of_node);
if (match)
info = match->data;
}
if (pdev->id < 0 || pdev->id >= OWL_UART_PORT_NUM) {
dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
return -EINVAL;
}
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem) {
dev_err(&pdev->dev, "could not get mem\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (owl_uart_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
return -EBUSY;
}
owl_port = devm_kzalloc(&pdev->dev, sizeof(*owl_port), GFP_KERNEL);
if (!owl_port)
return -ENOMEM;
owl_port->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(owl_port->clk)) {
dev_err(&pdev->dev, "could not get clk\n");
return PTR_ERR(owl_port->clk);
}
ret = clk_prepare_enable(owl_port->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clk\n");
return ret;
}
owl_port->port.dev = &pdev->dev;
owl_port->port.line = pdev->id;
owl_port->port.type = PORT_OWL;
owl_port->port.iotype = UPIO_MEM;
owl_port->port.mapbase = res_mem->start;
owl_port->port.irq = irq;
owl_port->port.uartclk = clk_get_rate(owl_port->clk);
if (owl_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
clk_disable_unprepare(owl_port->clk);
return -EINVAL;
}
owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
owl_port->port.x_char = 0;
owl_port->port.fifosize = (info) ? info->tx_fifosize : 16;
owl_port->port.ops = &owl_uart_ops;
owl_uart_ports[pdev->id] = owl_port;
platform_set_drvdata(pdev, owl_port);
ret = uart_add_one_port(&owl_uart_driver, &owl_port->port);
if (ret)
owl_uart_ports[pdev->id] = NULL;
return ret;
}
static int owl_uart_remove(struct platform_device *pdev)
{
struct owl_uart_port *owl_port = platform_get_drvdata(pdev);
uart_remove_one_port(&owl_uart_driver, &owl_port->port);
owl_uart_ports[pdev->id] = NULL;
clk_disable_unprepare(owl_port->clk);
return 0;
}
static struct platform_driver owl_uart_platform_driver = {
.probe = owl_uart_probe,
.remove = owl_uart_remove,
.driver = {
.name = "owl-uart",
.of_match_table = owl_uart_dt_matches,
},
};
static int __init owl_uart_init(void)
{
int ret;
ret = uart_register_driver(&owl_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&owl_uart_platform_driver);
if (ret)
uart_unregister_driver(&owl_uart_driver);
return ret;
}
static void __exit owl_uart_exit(void)
{
platform_driver_unregister(&owl_uart_platform_driver);
uart_unregister_driver(&owl_uart_driver);
}
module_init(owl_uart_init);
module_exit(owl_uart_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/owl-uart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver core for serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/serial_core.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/math64.h>
#include <linux/security.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include "serial_base.h"
/*
* This is used to lock changes in serial line configuration.
*/
static DEFINE_MUTEX(port_mutex);
/*
* lockdep: port->lock is initialized in two places, but we
* want only one lock-class:
*/
static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
/*
* Max time with active RTS before/after data is sent.
*/
#define RS485_MAX_RTS_DELAY 100 /* msecs */
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state);
static void uart_port_shutdown(struct tty_port *port);
static int uart_dcd_enabled(struct uart_port *uport)
{
return !!(uport->status & UPSTAT_DCD_ENABLE);
}
static inline struct uart_port *uart_port_ref(struct uart_state *state)
{
if (atomic_add_unless(&state->refcount, 1, 0))
return state->uart_port;
return NULL;
}
static inline void uart_port_deref(struct uart_port *uport)
{
if (atomic_dec_and_test(&uport->state->refcount))
wake_up(&uport->state->remove_wait);
}
#define uart_port_lock(state, flags) \
({ \
struct uart_port *__uport = uart_port_ref(state); \
if (__uport) \
spin_lock_irqsave(&__uport->lock, flags); \
__uport; \
})
#define uart_port_unlock(uport, flags) \
({ \
struct uart_port *__uport = uport; \
if (__uport) { \
spin_unlock_irqrestore(&__uport->lock, flags); \
uart_port_deref(__uport); \
} \
})
static inline struct uart_port *uart_port_check(struct uart_state *state)
{
lockdep_assert_held(&state->port.mutex);
return state->uart_port;
}
/**
* uart_write_wakeup - schedule write processing
* @port: port to be processed
*
* This routine is used by the interrupt handler to schedule processing in the
* software interrupt portion of the driver. A driver is expected to call this
* function when the number of characters in the transmit buffer have dropped
* below a threshold.
*
* Locking: @port->lock should be held
*/
void uart_write_wakeup(struct uart_port *port)
{
struct uart_state *state = port->state;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
BUG_ON(!state);
tty_port_tty_wakeup(&state->port);
}
EXPORT_SYMBOL(uart_write_wakeup);
static void uart_stop(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
port = uart_port_lock(state, flags);
if (port)
port->ops->stop_tx(port);
uart_port_unlock(port, flags);
}
static void __uart_start(struct uart_state *state)
{
struct uart_port *port = state->uart_port;
struct serial_port_device *port_dev;
int err;
if (!port || port->flags & UPF_DEAD || uart_tx_stopped(port))
return;
port_dev = port->port_dev;
/* Increment the runtime PM usage count for the active check below */
err = pm_runtime_get(&port_dev->dev);
if (err < 0) {
pm_runtime_put_noidle(&port_dev->dev);
return;
}
/*
* Start TX if enabled, and kick runtime PM. If the device is not
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
if (pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
}
static void uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
port = uart_port_lock(state, flags);
__uart_start(state);
uart_port_unlock(port, flags);
}
static void
uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
{
unsigned long flags;
unsigned int old;
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear)
static void uart_port_dtr_rts(struct uart_port *uport, bool active)
{
if (active)
uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
else
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/* Caller holds port mutex */
static void uart_change_line_settings(struct tty_struct *tty, struct uart_state *state,
const struct ktermios *old_termios)
{
struct uart_port *uport = uart_port_check(state);
struct ktermios *termios;
bool old_hw_stopped;
/*
* If we have no tty, termios, or the port does not exist,
* then we can't set the parameters for this port.
*/
if (!tty || uport->type == PORT_UNKNOWN)
return;
termios = &tty->termios;
uport->ops->set_termios(uport, termios, old_termios);
/*
* Set modem status enables based on termios cflag
*/
spin_lock_irq(&uport->lock);
if (termios->c_cflag & CRTSCTS)
uport->status |= UPSTAT_CTS_ENABLE;
else
uport->status &= ~UPSTAT_CTS_ENABLE;
if (termios->c_cflag & CLOCAL)
uport->status &= ~UPSTAT_DCD_ENABLE;
else
uport->status |= UPSTAT_DCD_ENABLE;
/* reset sw-assisted CTS flow control based on (possibly) new mode */
old_hw_stopped = uport->hw_stopped;
uport->hw_stopped = uart_softcts_mode(uport) &&
!(uport->ops->get_mctrl(uport) & TIOCM_CTS);
if (uport->hw_stopped != old_hw_stopped) {
if (!old_hw_stopped)
uport->ops->stop_tx(uport);
else
__uart_start(state);
}
spin_unlock_irq(&uport->lock);
}
/*
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct uart_port *uport = uart_port_check(state);
unsigned long flags;
unsigned long page;
int retval = 0;
if (uport->type == PORT_UNKNOWN)
return 1;
/*
* Make sure the device is in D0 state.
*/
uart_change_pm(state, UART_PM_STATE_ON);
/*
* Initialise and allocate the transmit and temporary
* buffer.
*/
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
uart_port_lock(state, flags);
if (!state->xmit.buf) {
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
uart_port_unlock(uport, flags);
} else {
uart_port_unlock(uport, flags);
/*
* Do not free() the page under the port lock, see
* uart_shutdown().
*/
free_page(page);
}
retval = uport->ops->startup(uport);
if (retval == 0) {
if (uart_console(uport) && uport->cons->cflag) {
tty->termios.c_cflag = uport->cons->cflag;
tty->termios.c_ispeed = uport->cons->ispeed;
tty->termios.c_ospeed = uport->cons->ospeed;
uport->cons->cflag = 0;
uport->cons->ispeed = 0;
uport->cons->ospeed = 0;
}
/*
* Initialise the hardware port settings.
*/
uart_change_line_settings(tty, state, NULL);
/*
* Setup the RTS and DTR signals once the
* port is open and ready to respond.
*/
if (init_hw && C_BAUD(tty))
uart_port_dtr_rts(uport, true);
}
/*
* This is to allow setserial on this port. People may want to set
* port/irq/type and then reconfigure the port properly if it failed
* now.
*/
if (retval && capable(CAP_SYS_ADMIN))
return 1;
return retval;
}
static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
int retval;
if (tty_port_initialized(port))
return 0;
retval = uart_port_startup(tty, state, init_hw);
if (retval)
set_bit(TTY_IO_ERROR, &tty->flags);
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on. Calls to
* uart_shutdown are serialised by the per-port semaphore.
*
* uport == NULL if uart_port has already been removed
*/
static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
unsigned long flags;
char *xmit_buf = NULL;
/*
* Set the TTY IO error marker
*/
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
/*
* Turn off DTR and RTS early.
*/
if (uport && uart_console(uport) && tty) {
uport->cons->cflag = tty->termios.c_cflag;
uport->cons->ispeed = tty->termios.c_ispeed;
uport->cons->ospeed = tty->termios.c_ospeed;
}
if (!tty || C_HUPCL(tty))
uart_port_dtr_rts(uport, false);
uart_port_shutdown(port);
}
/*
* It's possible for shutdown to be called after suspend if we get
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
tty_port_set_suspended(port, false);
/*
* Do not free() the transmit buffer page under the port lock since
* this can create various circular locking scenarios. For instance,
* console driver may need to allocate/free a debug object, which
* can endup in printk() recursion.
*/
uart_port_lock(state, flags);
xmit_buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock(uport, flags);
free_page((unsigned long)xmit_buf);
}
/**
* uart_update_timeout - update per-port frame timing information
* @port: uart_port structure describing the port
* @cflag: termios cflag value
* @baud: speed of the port
*
* Set the @port frame timing information from which the FIFO timeout value is
* derived. The @cflag value should reflect the actual hardware settings as
* number of bits, parity, stop bits and baud rate is taken into account here.
*
* Locking: caller is expected to take @port->lock
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
unsigned int size = tty_get_frame_size(cflag);
u64 frame_time;
frame_time = (u64)size * NSEC_PER_SEC;
port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
/**
* uart_get_baud_rate - return baud rate for a particular port
* @port: uart_port structure describing the port in question.
* @termios: desired termios settings
* @old: old termios (or %NULL)
* @min: minimum acceptable baud rate
* @max: maximum acceptable baud rate
*
* Decode the termios structure into a numeric baud rate, taking account of the
* magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600
* baud.
*
* If the new baud rate is invalid, try the @old termios setting. If it's still
* invalid, we try 9600 baud.
*
* The @termios structure is updated to reflect the baud rate we're actually
* going to be using. Don't do this for the case where B0 is requested ("hang
* up").
*
* Locking: caller dependent
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try;
unsigned int baud;
unsigned int altbaud;
int hung_up = 0;
upf_t flags = port->flags & UPF_SPD_MASK;
switch (flags) {
case UPF_SPD_HI:
altbaud = 57600;
break;
case UPF_SPD_VHI:
altbaud = 115200;
break;
case UPF_SPD_SHI:
altbaud = 230400;
break;
case UPF_SPD_WARP:
altbaud = 460800;
break;
default:
altbaud = 38400;
break;
}
for (try = 0; try < 2; try++) {
baud = tty_termios_baud_rate(termios);
/*
* The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
* Die! Die! Die!
*/
if (try == 0 && baud == 38400)
baud = altbaud;
/*
* Special case: B0 rate.
*/
if (baud == 0) {
hung_up = 1;
baud = 9600;
}
if (baud >= min && baud <= max)
return baud;
/*
* Oops, the quotient was zero. Try again with
* the old baud rate if possible.
*/
termios->c_cflag &= ~CBAUD;
if (old) {
baud = tty_termios_baud_rate(old);
if (!hung_up)
tty_termios_encode_baud_rate(termios,
baud, baud);
old = NULL;
continue;
}
/*
* As a last resort, if the range cannot be met then clip to
* the nearest chip supported rate.
*/
if (!hung_up) {
if (baud <= min)
tty_termios_encode_baud_rate(termios,
min + 1, min + 1);
else
tty_termios_encode_baud_rate(termios,
max - 1, max - 1);
}
}
/* Should never happen */
WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(uart_get_baud_rate);
/**
* uart_get_divisor - return uart clock divisor
* @port: uart_port structure describing the port
* @baud: desired baud rate
*
* Calculate the divisor (baud_base / baud) for the specified @baud,
* appropriately rounded.
*
* If 38400 baud and custom divisor is selected, return the custom divisor
* instead.
*
* Locking: caller dependent
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int quot;
/*
* Old custom speed handling.
*/
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
return quot;
}
EXPORT_SYMBOL(uart_get_divisor);
static int uart_put_char(struct tty_struct *tty, u8 c)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
struct circ_buf *circ;
unsigned long flags;
int ret = 0;
circ = &state->xmit;
port = uart_port_lock(state, flags);
if (!circ->buf) {
uart_port_unlock(port, flags);
return 0;
}
if (port && uart_circ_chars_free(circ) != 0) {
circ->buf[circ->head] = c;
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
ret = 1;
}
uart_port_unlock(port, flags);
return ret;
}
static void uart_flush_chars(struct tty_struct *tty)
{
uart_start(tty);
}
static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
struct circ_buf *circ;
unsigned long flags;
int c, ret = 0;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (WARN_ON(!state))
return -EL3HLT;
port = uart_port_lock(state, flags);
circ = &state->xmit;
if (!circ->buf) {
uart_port_unlock(port, flags);
return 0;
}
while (port) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0)
break;
memcpy(circ->buf + circ->head, buf, c);
circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
__uart_start(state);
uart_port_unlock(port, flags);
return ret;
}
static unsigned int uart_write_room(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
unsigned int ret;
port = uart_port_lock(state, flags);
ret = uart_circ_chars_free(&state->xmit);
uart_port_unlock(port, flags);
return ret;
}
static unsigned int uart_chars_in_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
unsigned int ret;
port = uart_port_lock(state, flags);
ret = uart_circ_chars_pending(&state->xmit);
uart_port_unlock(port, flags);
return ret;
}
static void uart_flush_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (WARN_ON(!state))
return;
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
port = uart_port_lock(state, flags);
if (!port)
return;
uart_circ_clear(&state->xmit);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
tty_port_tty_wakeup(&state->port);
}
/*
* This function performs low-level write of high-priority XON/XOFF
* character and accounting for it.
*
* Requires uart_port to implement .serial_out().
*/
void uart_xchar_out(struct uart_port *uport, int offset)
{
serial_port_out(uport, offset, uport->x_char);
uport->icount.tx++;
uport->x_char = 0;
}
EXPORT_SYMBOL_GPL(uart_xchar_out);
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void uart_send_xchar(struct tty_struct *tty, char ch)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
port = uart_port_ref(state);
if (!port)
return;
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
spin_lock_irqsave(&port->lock, flags);
port->x_char = ch;
if (ch)
port->ops->start_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
uart_port_deref(port);
}
static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
upstat_t mask = UPSTAT_SYNC_FIFO;
struct uart_port *port;
port = uart_port_ref(state);
if (!port)
return;
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
if (C_CRTSCTS(tty))
mask |= UPSTAT_AUTORTS;
if (port->status & mask) {
port->ops->throttle(port);
mask &= ~port->status;
}
if (mask & UPSTAT_AUTORTS)
uart_clear_mctrl(port, TIOCM_RTS);
if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, STOP_CHAR(tty));
uart_port_deref(port);
}
static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
upstat_t mask = UPSTAT_SYNC_FIFO;
struct uart_port *port;
port = uart_port_ref(state);
if (!port)
return;
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
if (C_CRTSCTS(tty))
mask |= UPSTAT_AUTORTS;
if (port->status & mask) {
port->ops->unthrottle(port);
mask &= ~port->status;
}
if (mask & UPSTAT_AUTORTS)
uart_set_mctrl(port, TIOCM_RTS);
if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, START_CHAR(tty));
uart_port_deref(port);
}
static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
int ret = -ENODEV;
/*
* Ensure the state we copy is consistent and no hardware changes
* occur as we go
*/
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
retinfo->type = uport->type;
retinfo->line = uport->line;
retinfo->port = uport->iobase;
if (HIGH_BITS_OFFSET)
retinfo->port_high = (long) uport->iobase >> HIGH_BITS_OFFSET;
retinfo->irq = uport->irq;
retinfo->flags = (__force int)uport->flags;
retinfo->xmit_fifo_size = uport->fifosize;
retinfo->baud_base = uport->uartclk / 16;
retinfo->close_delay = jiffies_to_msecs(port->close_delay) / 10;
retinfo->closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(port->closing_wait) / 10;
retinfo->custom_divisor = uport->custom_divisor;
retinfo->hub6 = uport->hub6;
retinfo->io_type = uport->iotype;
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
ret = 0;
out:
mutex_unlock(&port->mutex);
return ret;
}
static int uart_get_info_user(struct tty_struct *tty,
struct serial_struct *ss)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
return uart_get_info(port, ss) < 0 ? -EIO : 0;
}
static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
struct uart_state *state,
struct serial_struct *new_info)
{
struct uart_port *uport = uart_port_check(state);
unsigned long new_port;
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
int retval = 0;
if (!uport)
return -EIO;
new_port = new_info->port;
if (HIGH_BITS_OFFSET)
new_port += (unsigned long) new_info->port_high << HIGH_BITS_OFFSET;
new_info->irq = irq_canonicalize(new_info->irq);
close_delay = msecs_to_jiffies(new_info->close_delay * 10);
closing_wait = new_info->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
msecs_to_jiffies(new_info->closing_wait * 10);
change_irq = !(uport->flags & UPF_FIXED_PORT)
&& new_info->irq != uport->irq;
/*
* Since changing the 'type' of the port changes its resource
* allocations, we should treat type changes the same as
* IO port changes.
*/
change_port = !(uport->flags & UPF_FIXED_PORT)
&& (new_port != uport->iobase ||
(unsigned long)new_info->iomem_base != uport->mapbase ||
new_info->hub6 != uport->hub6 ||
new_info->io_type != uport->iotype ||
new_info->iomem_reg_shift != uport->regshift ||
new_info->type != uport->type);
old_flags = uport->flags;
new_flags = (__force upf_t)new_info->flags;
old_custom_divisor = uport->custom_divisor;
if (!capable(CAP_SYS_ADMIN)) {
retval = -EPERM;
if (change_irq || change_port ||
(new_info->baud_base != uport->uartclk / 16) ||
(close_delay != port->close_delay) ||
(closing_wait != port->closing_wait) ||
(new_info->xmit_fifo_size &&
new_info->xmit_fifo_size != uport->fifosize) ||
(((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
goto exit;
uport->flags = ((uport->flags & ~UPF_USR_MASK) |
(new_flags & UPF_USR_MASK));
uport->custom_divisor = new_info->custom_divisor;
goto check_and_exit;
}
if (change_irq || change_port) {
retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
if (retval)
goto exit;
}
/*
* Ask the low level driver to verify the settings.
*/
if (uport->ops->verify_port)
retval = uport->ops->verify_port(uport, new_info);
if ((new_info->irq >= nr_irqs) || (new_info->irq < 0) ||
(new_info->baud_base < 9600))
retval = -EINVAL;
if (retval)
goto exit;
if (change_port || change_irq) {
retval = -EBUSY;
/*
* Make sure that we are the sole user of this port.
*/
if (tty_port_users(port) > 1)
goto exit;
/*
* We need to shutdown the serial port at the old
* port/type/irq combination.
*/
uart_shutdown(tty, state);
}
if (change_port) {
unsigned long old_iobase, old_mapbase;
unsigned int old_type, old_iotype, old_hub6, old_shift;
old_iobase = uport->iobase;
old_mapbase = uport->mapbase;
old_type = uport->type;
old_hub6 = uport->hub6;
old_iotype = uport->iotype;
old_shift = uport->regshift;
/*
* Free and release old regions
*/
if (old_type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
uport->iobase = new_port;
uport->type = new_info->type;
uport->hub6 = new_info->hub6;
uport->iotype = new_info->io_type;
uport->regshift = new_info->iomem_reg_shift;
uport->mapbase = (unsigned long)new_info->iomem_base;
/*
* Claim and map the new regions
*/
if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
retval = uport->ops->request_port(uport);
} else {
/* Always success - Jean II */
retval = 0;
}
/*
* If we fail to request resources for the
* new port, try to restore the old settings.
*/
if (retval) {
uport->iobase = old_iobase;
uport->type = old_type;
uport->hub6 = old_hub6;
uport->iotype = old_iotype;
uport->regshift = old_shift;
uport->mapbase = old_mapbase;
if (old_type != PORT_UNKNOWN) {
retval = uport->ops->request_port(uport);
/*
* If we failed to restore the old settings,
* we fail like this.
*/
if (retval)
uport->type = PORT_UNKNOWN;
/*
* We failed anyway.
*/
retval = -EBUSY;
}
/* Added to return the correct error -Ram Gupta */
goto exit;
}
}
if (change_irq)
uport->irq = new_info->irq;
if (!(uport->flags & UPF_FIXED_PORT))
uport->uartclk = new_info->baud_base * 16;
uport->flags = (uport->flags & ~UPF_CHANGE_MASK) |
(new_flags & UPF_CHANGE_MASK);
uport->custom_divisor = new_info->custom_divisor;
port->close_delay = close_delay;
port->closing_wait = closing_wait;
if (new_info->xmit_fifo_size)
uport->fifosize = new_info->xmit_fifo_size;
check_and_exit:
retval = 0;
if (uport->type == PORT_UNKNOWN)
goto exit;
if (tty_port_initialized(port)) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
/*
* If they're setting up a custom divisor or speed,
* instead of clearing it, then bitch about it.
*/
if (uport->flags & UPF_SPD_MASK) {
dev_notice_ratelimited(uport->dev,
"%s sets custom speed on %s. This is deprecated.\n",
current->comm,
tty_name(port->tty));
}
uart_change_line_settings(tty, state, NULL);
}
} else {
retval = uart_startup(tty, state, true);
if (retval == 0)
tty_port_set_initialized(port, true);
if (retval > 0)
retval = 0;
}
exit:
return retval;
}
static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
int retval;
down_write(&tty->termios_rwsem);
/*
* This semaphore protects port->count. It is also
* very useful to prevent opens. Also, take the
* port configuration semaphore to make sure that a
* module insertion/removal doesn't change anything
* under us.
*/
mutex_lock(&port->mutex);
retval = uart_set_info(tty, port, state, ss);
mutex_unlock(&port->mutex);
up_write(&tty->termios_rwsem);
return retval;
}
/**
* uart_get_lsr_info - get line status register info
* @tty: tty associated with the UART
* @state: UART being queried
* @value: returned modem value
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
{
struct uart_port *uport = uart_port_check(state);
unsigned int result;
result = uport->ops->tx_empty(uport);
/*
* If we're about to load something into the transmit
* register, we'll pretend the transmitter isn't empty to
* avoid a race condition (depending on when the transmit
* interrupt happens).
*/
if (uport->x_char ||
((uart_circ_chars_pending(&state->xmit) > 0) &&
!uart_tx_stopped(uport)))
result &= ~TIOCSER_TEMT;
return put_user(result, value);
}
static int uart_tiocmget(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
int result = -EIO;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
if (!tty_io_error(tty)) {
result = uport->mctrl;
spin_lock_irq(&uport->lock);
result |= uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
}
out:
mutex_unlock(&port->mutex);
return result;
}
static int
uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
int ret = -EIO;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
if (!tty_io_error(tty)) {
uart_update_mctrl(uport, set, clear);
ret = 0;
}
out:
mutex_unlock(&port->mutex);
return ret;
}
static int uart_break_ctl(struct tty_struct *tty, int break_state)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
int ret = -EIO;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
ret = 0;
out:
mutex_unlock(&port->mutex);
return ret;
}
static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
{
struct tty_port *port = &state->port;
struct uart_port *uport;
int flags, ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
* Take the per-port semaphore. This prevents count from
* changing, and hence any extra opens of the port while
* we're auto-configuring.
*/
if (mutex_lock_interruptible(&port->mutex))
return -ERESTARTSYS;
uport = uart_port_check(state);
if (!uport) {
ret = -EIO;
goto out;
}
ret = -EBUSY;
if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
/*
* If we already have a port type configured,
* we must release its resources.
*/
if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
flags = UART_CONFIG_TYPE;
if (uport->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
/*
* This will claim the ports resources if
* a port is found.
*/
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, true);
if (ret == 0)
tty_port_set_initialized(port, true);
if (ret > 0)
ret = 0;
}
out:
mutex_unlock(&port->mutex);
return ret;
}
static void uart_enable_ms(struct uart_port *uport)
{
/*
* Force modem status interrupts on
*/
if (uport->ops->enable_ms)
uport->ops->enable_ms(uport);
}
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*
* FIXME: This wants extracting into a common all driver implementation
* of TIOCMWAIT using tty_port.
*/
static int uart_wait_modem_status(struct uart_state *state, unsigned long arg)
{
struct uart_port *uport;
struct tty_port *port = &state->port;
DECLARE_WAITQUEUE(wait, current);
struct uart_icount cprev, cnow;
int ret;
/*
* note the counters on entry
*/
uport = uart_port_ref(state);
if (!uport)
return -EIO;
spin_lock_irq(&uport->lock);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
uart_enable_ms(uport);
spin_unlock_irq(&uport->lock);
add_wait_queue(&port->delta_msr_wait, &wait);
for (;;) {
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
ret = 0;
break;
}
schedule();
/* see if a signal did it */
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cprev = cnow;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&port->delta_msr_wait, &wait);
uart_port_deref(uport);
return ret;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int uart_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct uart_state *state = tty->driver_data;
struct uart_icount cnow;
struct uart_port *uport;
uport = uart_port_ref(state);
if (!uport)
return -EIO;
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
uart_port_deref(uport);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \
SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \
SER_RS485_TERMINATE_BUS)
static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485)
{
u32 flags = rs485->flags;
/* Don't return -EINVAL for unsupported legacy flags */
flags &= ~SER_RS485_LEGACY_FLAGS;
/*
* For any bit outside of the legacy ones that is not supported by
* the driver, return -EINVAL.
*/
if (flags & ~port->rs485_supported.flags)
return -EINVAL;
/* Asking for address w/o addressing mode? */
if (!(rs485->flags & SER_RS485_ADDRB) &&
(rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST)))
return -EINVAL;
/* Address given but not enabled? */
if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv)
return -EINVAL;
if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest)
return -EINVAL;
return 0;
}
static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
struct serial_rs485 *rs485)
{
if (!port->rs485_supported.delay_rts_before_send) {
if (rs485->delay_rts_before_send) {
dev_warn_ratelimited(port->dev,
"%s (%d): RTS delay before sending not supported\n",
port->name, port->line);
}
rs485->delay_rts_before_send = 0;
} else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
dev_warn_ratelimited(port->dev,
"%s (%d): RTS delay before sending clamped to %u ms\n",
port->name, port->line, rs485->delay_rts_before_send);
}
if (!port->rs485_supported.delay_rts_after_send) {
if (rs485->delay_rts_after_send) {
dev_warn_ratelimited(port->dev,
"%s (%d): RTS delay after sending not supported\n",
port->name, port->line);
}
rs485->delay_rts_after_send = 0;
} else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
dev_warn_ratelimited(port->dev,
"%s (%d): RTS delay after sending clamped to %u ms\n",
port->name, port->line, rs485->delay_rts_after_send);
}
}
static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
u32 supported_flags = port->rs485_supported.flags;
if (!(rs485->flags & SER_RS485_ENABLED)) {
memset(rs485, 0, sizeof(*rs485));
return;
}
/* Pick sane settings if the user hasn't */
if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
dev_warn_ratelimited(port->dev,
"%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
port->name, port->line);
rs485->flags |= SER_RS485_RTS_ON_SEND;
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
}
rs485->flags &= supported_flags;
uart_sanitize_serial_rs485_delays(port, rs485);
/* Return clean padding area to userspace */
memset(rs485->padding0, 0, sizeof(rs485->padding0));
memset(rs485->padding1, 0, sizeof(rs485->padding1));
}
static void uart_set_rs485_termination(struct uart_port *port,
const struct serial_rs485 *rs485)
{
if (!(rs485->flags & SER_RS485_ENABLED))
return;
gpiod_set_value_cansleep(port->rs485_term_gpio,
!!(rs485->flags & SER_RS485_TERMINATE_BUS));
}
static int uart_rs485_config(struct uart_port *port)
{
struct serial_rs485 *rs485 = &port->rs485;
int ret;
uart_sanitize_serial_rs485(port, rs485);
uart_set_rs485_termination(port, rs485);
ret = port->rs485_config(port, NULL, rs485);
if (ret)
memset(rs485, 0, sizeof(*rs485));
return ret;
}
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
{
unsigned long flags;
struct serial_rs485 aux;
spin_lock_irqsave(&port->lock, flags);
aux = port->rs485;
spin_unlock_irqrestore(&port->lock, flags);
if (copy_to_user(rs485, &aux, sizeof(aux)))
return -EFAULT;
return 0;
}
static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
struct serial_rs485 __user *rs485_user)
{
struct serial_rs485 rs485;
int ret;
unsigned long flags;
if (!port->rs485_config)
return -ENOTTY;
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
ret = uart_check_rs485_flags(port, &rs485);
if (ret)
return ret;
uart_sanitize_serial_rs485(port, &rs485);
uart_set_rs485_termination(port, &rs485);
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret) {
port->rs485 = rs485;
/* Reset RTS and other mctrl lines when disabling RS485 */
if (!(rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
}
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
if (copy_to_user(rs485_user, &port->rs485, sizeof(port->rs485)))
return -EFAULT;
return 0;
}
static int uart_get_iso7816_config(struct uart_port *port,
struct serial_iso7816 __user *iso7816)
{
unsigned long flags;
struct serial_iso7816 aux;
if (!port->iso7816_config)
return -ENOTTY;
spin_lock_irqsave(&port->lock, flags);
aux = port->iso7816;
spin_unlock_irqrestore(&port->lock, flags);
if (copy_to_user(iso7816, &aux, sizeof(aux)))
return -EFAULT;
return 0;
}
static int uart_set_iso7816_config(struct uart_port *port,
struct serial_iso7816 __user *iso7816_user)
{
struct serial_iso7816 iso7816;
int i, ret;
unsigned long flags;
if (!port->iso7816_config)
return -ENOTTY;
if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
return -EFAULT;
/*
* There are 5 words reserved for future use. Check that userspace
* doesn't put stuff in there to prevent breakages in the future.
*/
for (i = 0; i < ARRAY_SIZE(iso7816.reserved); i++)
if (iso7816.reserved[i])
return -EINVAL;
spin_lock_irqsave(&port->lock, flags);
ret = port->iso7816_config(port, &iso7816);
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
if (copy_to_user(iso7816_user, &port->iso7816, sizeof(port->iso7816)))
return -EFAULT;
return 0;
}
/*
* Called via sys_ioctl. We can use spin_lock_irq() here.
*/
static int
uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
void __user *uarg = (void __user *)arg;
int ret = -ENOIOCTLCMD;
/*
* These ioctls don't rely on the hardware to be present.
*/
switch (cmd) {
case TIOCSERCONFIG:
down_write(&tty->termios_rwsem);
ret = uart_do_autoconfig(tty, state);
up_write(&tty->termios_rwsem);
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
if (tty_io_error(tty)) {
ret = -EIO;
goto out;
}
/*
* The following should only be used when hardware is present.
*/
switch (cmd) {
case TIOCMIWAIT:
ret = uart_wait_modem_status(state, arg);
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
/* rs485_config requires more locking than others */
if (cmd == TIOCSRS485)
down_write(&tty->termios_rwsem);
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport || tty_io_error(tty)) {
ret = -EIO;
goto out_up;
}
/*
* All these rely on hardware being present and need to be
* protected against the tty being hung up.
*/
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
ret = uart_get_lsr_info(tty, state, uarg);
break;
case TIOCGRS485:
ret = uart_get_rs485_config(uport, uarg);
break;
case TIOCSRS485:
ret = uart_set_rs485_config(tty, uport, uarg);
break;
case TIOCSISO7816:
ret = uart_set_iso7816_config(state->uart_port, uarg);
break;
case TIOCGISO7816:
ret = uart_get_iso7816_config(state->uart_port, uarg);
break;
default:
if (uport->ops->ioctl)
ret = uport->ops->ioctl(uport, cmd, arg);
break;
}
out_up:
mutex_unlock(&port->mutex);
if (cmd == TIOCSRS485)
up_write(&tty->termios_rwsem);
out:
return ret;
}
static void uart_set_ldisc(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport;
struct tty_port *port = &state->port;
if (!tty_port_initialized(port))
return;
mutex_lock(&state->port.mutex);
uport = uart_port_check(state);
if (uport && uport->ops->set_ldisc)
uport->ops->set_ldisc(uport, &tty->termios);
mutex_unlock(&state->port.mutex);
}
static void uart_set_termios(struct tty_struct *tty,
const struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport;
unsigned int cflag = tty->termios.c_cflag;
unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK;
bool sw_changed = false;
mutex_lock(&state->port.mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
/*
* Drivers doing software flow control also need to know
* about changes to these input settings.
*/
if (uport->flags & UPF_SOFT_FLOW) {
iflag_mask |= IXANY|IXON|IXOFF;
sw_changed =
tty->termios.c_cc[VSTART] != old_termios->c_cc[VSTART] ||
tty->termios.c_cc[VSTOP] != old_termios->c_cc[VSTOP];
}
/*
* These are the bits that are used to setup various
* flags in the low level driver. We can ignore the Bfoo
* bits in c_cflag; c_[io]speed will always be set
* appropriately by set_termios() in tty_ioctl.c
*/
if ((cflag ^ old_termios->c_cflag) == 0 &&
tty->termios.c_ospeed == old_termios->c_ospeed &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 &&
!sw_changed) {
goto out;
}
uart_change_line_settings(tty, state, old_termios);
/* reload cflag from termios; port driver may have overridden flags */
cflag = tty->termios.c_cflag;
/* Handle transition to B0 status */
if (((old_termios->c_cflag & CBAUD) != B0) && ((cflag & CBAUD) == B0))
uart_clear_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
/* Handle transition away from B0 status */
else if (((old_termios->c_cflag & CBAUD) == B0) && ((cflag & CBAUD) != B0)) {
unsigned int mask = TIOCM_DTR;
if (!(cflag & CRTSCTS) || !tty_throttled(tty))
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
out:
mutex_unlock(&state->port.mutex);
}
/*
* Calls to uart_close() are serialised via the tty_lock in
* drivers/tty/tty_io.c:tty_release()
* drivers/tty/tty_io.c:do_tty_hangup()
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
if (!state) {
struct uart_driver *drv = tty->driver->driver_state;
struct tty_port *port;
state = drv->state + tty->index;
port = &state->port;
spin_lock_irq(&port->lock);
--port->count;
spin_unlock_irq(&port->lock);
return;
}
pr_debug("uart_close(%d) called\n", tty->index);
tty_port_close(tty->port, tty, filp);
}
static void uart_tty_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = uart_port_check(state);
char *buf;
/*
* At this point, we stop accepting input. To do this, we
* disable the receive line status interrupts.
*/
if (WARN(!uport, "detached port still initialized!\n"))
return;
spin_lock_irq(&uport->lock);
uport->ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
uart_port_shutdown(port);
/*
* It's possible for shutdown to be called after suspend if we get
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
tty_port_set_suspended(port, false);
/*
* Free the transmit buffer.
*/
spin_lock_irq(&uport->lock);
buf = state->xmit.buf;
state->xmit.buf = NULL;
spin_unlock_irq(&uport->lock);
free_page((unsigned long)buf);
uart_change_pm(state, UART_PM_STATE_OFF);
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long char_time, expire, fifo_timeout;
port = uart_port_ref(state);
if (!port)
return;
if (port->type == PORT_UNKNOWN || port->fifosize == 0) {
uart_port_deref(port);
return;
}
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = max(nsecs_to_jiffies(port->frame_time / 5), 1UL);
if (timeout && timeout < char_time)
char_time = timeout;
if (!uart_cts_enabled(port)) {
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than FIFO timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2 * FIFO timeout.
*/
fifo_timeout = uart_fifo_timeout(port);
if (timeout == 0 || timeout > 2 * fifo_timeout)
timeout = 2 * fifo_timeout;
}
expire = jiffies + timeout;
pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n",
port->line, jiffies, expire);
/*
* Check whether the transmitter is empty every 'char_time'.
* 'timeout' / 'expire' give us the maximum amount of time
* we wait.
*/
while (!port->ops->tx_empty(port)) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, expire))
break;
}
uart_port_deref(port);
}
/*
* Calls to uart_hangup() are serialised by the tty_lock in
* drivers/tty/tty_io.c:do_tty_hangup()
* This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_hangup(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
unsigned long flags;
pr_debug("uart_hangup(%d)\n", tty->index);
mutex_lock(&port->mutex);
uport = uart_port_check(state);
WARN(!uport, "hangup of detached port!\n");
if (tty_port_active(port)) {
uart_flush_buffer(tty);
uart_shutdown(tty, state);
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
spin_unlock_irqrestore(&port->lock, flags);
tty_port_set_active(port, false);
tty_port_tty_set(port, NULL);
if (uport && !uart_console(uport))
uart_change_pm(state, UART_PM_STATE_OFF);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
}
mutex_unlock(&port->mutex);
}
/* uport == NULL if uart_port has already been removed */
static void uart_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = uart_port_check(state);
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free
* the irq here so the queue might never be woken up. Note
* that we won't end up waiting on delta_msr_wait again since
* any outstanding file descriptors should be pointing at
* hung_up_tty_fops now.
*/
wake_up_interruptible(&port->delta_msr_wait);
if (uport) {
/* Free the IRQ and disable the port. */
uport->ops->shutdown(uport);
/* Ensure that the IRQ handler isn't running on another CPU. */
synchronize_irq(uport->irq);
}
}
static bool uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
int mctrl;
uport = uart_port_ref(state);
/*
* Should never observe uport == NULL since checks for hangup should
* abort the tty_port_block_til_ready() loop before checking for carrier
* raised -- but report carrier raised if it does anyway so open will
* continue and not sleep
*/
if (WARN_ON(!uport))
return true;
spin_lock_irq(&uport->lock);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
uart_port_deref(uport);
return mctrl & TIOCM_CAR;
}
static void uart_dtr_rts(struct tty_port *port, bool active)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
uport = uart_port_ref(state);
if (!uport)
return;
uart_port_dtr_rts(uport, active);
uart_port_deref(uport);
}
static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + tty->index;
tty->driver_data = state;
return tty_standard_install(driver, tty);
}
/*
* Calls to uart_open are serialised by the tty_lock in
* drivers/tty/tty_io.c:tty_open()
* Note that if this fails, then uart_close() _will_ be called.
*
* In time, we want to scrap the "opening nonpresent ports"
* behaviour and implement an alternative way for setserial
* to set base addresses/ports/types. This will allow us to
* get rid of a certain amount of extra tests.
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
int retval;
retval = tty_port_open(&state->port, tty, filp);
if (retval > 0)
retval = 0;
return retval;
}
static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
int ret;
uport = uart_port_check(state);
if (!uport || uport->flags & UPF_DEAD)
return -ENXIO;
/*
* Start up the serial port.
*/
ret = uart_startup(tty, state, false);
if (ret > 0)
tty_port_set_active(port, true);
return ret;
}
static const char *uart_type(struct uart_port *port)
{
const char *str = NULL;
if (port->ops->type)
str = port->ops->type(port);
if (!str)
str = "unknown";
return str;
}
#ifdef CONFIG_PROC_FS
static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
{
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
enum uart_pm_state pm_state;
struct uart_port *uport;
char stat_buf[32];
unsigned int status;
int mmio;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (!uport)
goto out;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
uport->line, uart_type(uport),
mmio ? "mmio:0x" : "port:",
mmio ? (unsigned long long)uport->mapbase
: (unsigned long long)uport->iobase,
uport->irq);
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
goto out;
}
if (capable(CAP_SYS_ADMIN)) {
pm_state = state->pm_state;
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
status = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
seq_printf(m, " tx:%d rx:%d",
uport->icount.tx, uport->icount.rx);
if (uport->icount.frame)
seq_printf(m, " fe:%d", uport->icount.frame);
if (uport->icount.parity)
seq_printf(m, " pe:%d", uport->icount.parity);
if (uport->icount.brk)
seq_printf(m, " brk:%d", uport->icount.brk);
if (uport->icount.overrun)
seq_printf(m, " oe:%d", uport->icount.overrun);
if (uport->icount.buf_overrun)
seq_printf(m, " bo:%d", uport->icount.buf_overrun);
#define INFOBIT(bit, str) \
if (uport->mctrl & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
#define STATBIT(bit, str) \
if (status & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
stat_buf[0] = '\0';
stat_buf[1] = '\0';
INFOBIT(TIOCM_RTS, "|RTS");
STATBIT(TIOCM_CTS, "|CTS");
INFOBIT(TIOCM_DTR, "|DTR");
STATBIT(TIOCM_DSR, "|DSR");
STATBIT(TIOCM_CAR, "|CD");
STATBIT(TIOCM_RNG, "|RI");
if (stat_buf[0])
stat_buf[0] = ' ';
seq_puts(m, stat_buf);
}
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
out:
mutex_unlock(&port->mutex);
}
static int uart_proc_show(struct seq_file *m, void *v)
{
struct tty_driver *ttydrv = m->private;
struct uart_driver *drv = ttydrv->driver_state;
int i;
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", "");
for (i = 0; i < drv->nr; i++)
uart_line_info(m, drv, i);
return 0;
}
#endif
static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);
lockdep_set_class(&port->lock, &port_lock_key);
}
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
* uart_console_write - write a console message to a serial port
* @port: the port to write the message
* @s: array of characters
* @count: number of characters in string to write
* @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
void (*putchar)(struct uart_port *, unsigned char))
{
unsigned int i;
for (i = 0; i < count; i++, s++) {
if (*s == '\n')
putchar(port, '\r');
putchar(port, *s);
}
}
EXPORT_SYMBOL_GPL(uart_console_write);
/**
* uart_get_console - get uart port for console
* @ports: ports to search in
* @nr: number of @ports
* @co: console to search for
* Returns: uart_port for the console @co
*
* Check whether an invalid uart number has been specified (as @co->index), and
* if so, search for the first available port that does have console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
{
int idx = co->index;
if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 &&
ports[idx].membase == NULL))
for (idx = 0; idx < nr; idx++)
if (ports[idx].iobase != 0 ||
ports[idx].membase != NULL)
break;
co->index = idx;
return ports + idx;
}
/**
* uart_parse_earlycon - Parse earlycon options
* @p: ptr to 2nd field (ie., just beyond '<name>,')
* @iotype: ptr for decoded iotype (out)
* @addr: ptr for decoded mapbase/iobase (out)
* @options: ptr for <options> field; %NULL if not present (out)
*
* Decodes earlycon kernel command line parameters of the form:
* * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
* * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
* The optional form:
* * earlycon=<name>,0x<addr>,<options>
* * console=<name>,0x<addr>,<options>
*
* is also accepted; the returned @iotype will be %UPIO_MEM.
*
* Returns: 0 on success or -%EINVAL on failure
*/
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
{
if (strncmp(p, "mmio,", 5) == 0) {
*iotype = UPIO_MEM;
p += 5;
} else if (strncmp(p, "mmio16,", 7) == 0) {
*iotype = UPIO_MEM16;
p += 7;
} else if (strncmp(p, "mmio32,", 7) == 0) {
*iotype = UPIO_MEM32;
p += 7;
} else if (strncmp(p, "mmio32be,", 9) == 0) {
*iotype = UPIO_MEM32BE;
p += 9;
} else if (strncmp(p, "mmio32native,", 13) == 0) {
*iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
UPIO_MEM32BE : UPIO_MEM32;
p += 13;
} else if (strncmp(p, "io,", 3) == 0) {
*iotype = UPIO_PORT;
p += 3;
} else if (strncmp(p, "0x", 2) == 0) {
*iotype = UPIO_MEM;
} else {
return -EINVAL;
}
/*
* Before you replace it with kstrtoull(), think about options separator
* (',') it will not tolerate
*/
*addr = simple_strtoull(p, NULL, 0);
p = strchr(p, ',');
if (p)
p++;
*options = p;
return 0;
}
EXPORT_SYMBOL_GPL(uart_parse_earlycon);
/**
* uart_parse_options - Parse serial port baud/parity/bits/flow control.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
* @parity: pointer to an 'int' variable for the parity.
* @bits: pointer to an 'int' variable for the number of data bits.
* @flow: pointer to an 'int' variable for the flow control character.
*
* uart_parse_options() decodes a string containing the serial console
* options. The format of the string is <baud><parity><bits><flow>,
* eg: 115200n8r
*/
void
uart_parse_options(const char *options, int *baud, int *parity,
int *bits, int *flow)
{
const char *s = options;
*baud = simple_strtoul(s, NULL, 10);
while (*s >= '0' && *s <= '9')
s++;
if (*s)
*parity = *s++;
if (*s)
*bits = *s++ - '0';
if (*s)
*flow = *s;
}
EXPORT_SYMBOL_GPL(uart_parse_options);
/**
* uart_set_options - setup the serial console parameters
* @port: pointer to the serial ports uart_port structure
* @co: console pointer
* @baud: baud rate
* @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
* @bits: number of data bits
* @flow: flow control character - 'r' (rts)
*
* Locking: Caller must hold console_list_lock in order to serialize
* early initialization of the serial-console lock.
*/
int
uart_set_options(struct uart_port *port, struct console *co,
int baud, int parity, int bits, int flow)
{
struct ktermios termios;
static struct ktermios dummy;
/*
* Ensure that the serial-console lock is initialised early.
*
* Note that the console-registered check is needed because
* kgdboc can call uart_set_options() for an already registered
* console via tty_find_polling_driver() and uart_poll_init().
*/
if (!uart_console_registered_locked(port) && !port->console_reinit)
uart_port_spin_lock_init(port);
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag |= CREAD | HUPCL | CLOCAL;
tty_termios_encode_baud_rate(&termios, baud, baud);
if (bits == 7)
termios.c_cflag |= CS7;
else
termios.c_cflag |= CS8;
switch (parity) {
case 'o': case 'O':
termios.c_cflag |= PARODD;
fallthrough;
case 'e': case 'E':
termios.c_cflag |= PARENB;
break;
}
if (flow == 'r')
termios.c_cflag |= CRTSCTS;
/*
* some uarts on other side don't support no flow control.
* So we set * DTR in host uart to make them happy
*/
port->mctrl |= TIOCM_DTR;
port->ops->set_termios(port, &termios, &dummy);
/*
* Allow the setting of the UART parameters with a NULL console
* too:
*/
if (co) {
co->cflag = termios.c_cflag;
co->ispeed = termios.c_ispeed;
co->ospeed = termios.c_ospeed;
}
return 0;
}
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
/**
* uart_change_pm - set power state of the port
*
* @state: port descriptor
* @pm_state: new state
*
* Locking: port->mutex has to be held
*/
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state)
{
struct uart_port *port = uart_port_check(state);
if (state->pm_state != pm_state) {
if (port && port->ops->pm)
port->ops->pm(port, pm_state, state->pm_state);
state->pm_state = pm_state;
}
}
struct uart_match {
struct uart_port *port;
struct uart_driver *driver;
};
static int serial_match_port(struct device *dev, void *data)
{
struct uart_match *match = data;
struct tty_driver *tty_drv = match->driver->tty_driver;
dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
match->port->line;
return dev->devt == devt; /* Actually, only one tty per port */
}
int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (tty_dev && device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
/*
* Nothing to do if the console is not suspending
* except stop_rx to prevent any asynchronous data
* over RX line. However ensure that we will be
* able to Re-start_rx later.
*/
if (!console_suspend_enabled && uart_console(uport)) {
if (uport->ops->start_rx) {
spin_lock_irq(&uport->lock);
uport->ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
}
goto unlock;
}
uport->suspended = 1;
if (tty_port_initialized(port)) {
const struct uart_ops *ops = uport->ops;
int tries;
unsigned int mctrl;
tty_port_set_suspended(port, true);
tty_port_set_initialized(port, false);
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
/* save mctrl so it can be restored on resume */
mctrl = uport->mctrl;
uport->mctrl = 0;
ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
/*
* Wait for the transmitter to empty.
*/
for (tries = 3; !ops->tx_empty(uport) && tries; tries--)
msleep(10);
if (!tries)
dev_err(uport->dev, "%s: Unable to drain transmitter\n",
uport->name);
ops->shutdown(uport);
uport->mctrl = mctrl;
}
/*
* Disable the console device before suspending.
*/
if (uart_console(uport))
console_stop(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
unlock:
mutex_unlock(&port->mutex);
return 0;
}
EXPORT_SYMBOL(uart_suspend_port);
int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
struct ktermios termios;
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq))))
disable_irq_wake(uport->irq);
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
uport->suspended = 0;
/*
* Re-enable the console device after suspending.
*/
if (uart_console(uport)) {
/*
* First try to use the console cflag setting.
*/
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = uport->cons->cflag;
termios.c_ispeed = uport->cons->ispeed;
termios.c_ospeed = uport->cons->ospeed;
/*
* If that's unset, use the tty termios setting.
*/
if (port->tty && termios.c_cflag == 0)
termios = port->tty->termios;
if (console_suspend_enabled)
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (!console_suspend_enabled && uport->ops->start_rx) {
spin_lock_irq(&uport->lock);
uport->ops->start_rx(uport);
spin_unlock_irq(&uport->lock);
}
if (console_suspend_enabled)
console_start(uport->cons);
}
if (tty_port_suspended(port)) {
const struct uart_ops *ops = uport->ops;
int ret;
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
struct tty_struct *tty = port->tty;
ret = ops->startup(uport);
if (ret == 0) {
if (tty)
uart_change_line_settings(tty, state, NULL);
spin_lock_irq(&uport->lock);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, uport->mctrl);
else
uart_rs485_config(uport);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
tty_port_set_initialized(port, true);
} else {
/*
* Failed to resume - maybe hardware went away?
* Clear the "initialized" flag so we won't try
* to call the low level drivers shutdown method.
*/
uart_shutdown(tty, state);
}
}
tty_port_set_suspended(port, false);
}
mutex_unlock(&port->mutex);
return 0;
}
EXPORT_SYMBOL(uart_resume_port);
static inline void
uart_report_port(struct uart_driver *drv, struct uart_port *port)
{
char address[64];
switch (port->iotype) {
case UPIO_PORT:
snprintf(address, sizeof(address), "I/O 0x%lx", port->iobase);
break;
case UPIO_HUB6:
snprintf(address, sizeof(address),
"I/O 0x%lx offset 0x%x", port->iobase, port->hub6);
break;
case UPIO_MEM:
case UPIO_MEM16:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_AU:
case UPIO_TSI:
snprintf(address, sizeof(address),
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
strscpy(address, "*unknown*", sizeof(address));
break;
}
pr_info("%s%s%s at %s (irq = %d, base_baud = %d) is a %s\n",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
port->name,
address, port->irq, port->uartclk / 16, uart_type(port));
/* The magic multiplier feature is a bit obscure, so report it too. */
if (port->flags & UPF_MAGIC_MULTIPLIER)
pr_info("%s%s%s extra baud rates supported: %d, %d",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
port->name,
port->uartclk / 8, port->uartclk / 4);
}
static void
uart_configure_port(struct uart_driver *drv, struct uart_state *state,
struct uart_port *port)
{
unsigned int flags;
/*
* If there isn't a port here, don't do anything further.
*/
if (!port->iobase && !port->mapbase && !port->membase)
return;
/*
* Now do the auto configuration stuff. Note that config_port
* is expected to claim the resources and map the port for us.
*/
flags = 0;
if (port->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
if (port->flags & UPF_BOOT_AUTOCONF) {
if (!(port->flags & UPF_FIXED_TYPE)) {
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
port->ops->config_port(port, flags);
}
if (port->type != PORT_UNKNOWN) {
unsigned long flags;
uart_report_port(drv, port);
/* Power up port for set_mctrl() */
uart_change_pm(state, UART_PM_STATE_ON);
/*
* Ensure that the modem control lines are de-activated.
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
if (!(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
else
uart_rs485_config(port);
spin_unlock_irqrestore(&port->lock, flags);
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
* It may be that the port was not available.
*/
if (port->cons && !console_is_registered(port->cons))
register_console(port->cons);
/*
* Power down all ports by default, except the
* console if we have one.
*/
if (!uart_console(port))
uart_change_pm(state, UART_PM_STATE_OFF);
}
}
#ifdef CONFIG_CONSOLE_POLL
static int uart_poll_init(struct tty_driver *driver, int line, char *options)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
enum uart_pm_state pm_state;
struct tty_port *tport;
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret = 0;
tport = &state->port;
mutex_lock(&tport->mutex);
port = uart_port_check(state);
if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) {
ret = -1;
goto out;
}
pm_state = state->pm_state;
uart_change_pm(state, UART_PM_STATE_ON);
if (port->ops->poll_init) {
/*
* We don't set initialized as we only initialized the hw,
* e.g. state->xmit is still uninitialized.
*/
if (!tty_port_initialized(tport))
ret = port->ops->poll_init(port);
}
if (!ret && options) {
uart_parse_options(options, &baud, &parity, &bits, &flow);
console_list_lock();
ret = uart_set_options(port, NULL, baud, parity, bits, flow);
console_list_unlock();
}
out:
if (ret)
uart_change_pm(state, pm_state);
mutex_unlock(&tport->mutex);
return ret;
}
static int uart_poll_get_char(struct tty_driver *driver, int line)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
int ret = -1;
port = uart_port_ref(state);
if (port) {
ret = port->ops->poll_get_char(port);
uart_port_deref(port);
}
return ret;
}
static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
port = uart_port_ref(state);
if (!port)
return;
if (ch == '\n')
port->ops->poll_put_char(port, '\r');
port->ops->poll_put_char(port, ch);
uart_port_deref(port);
}
#endif
static const struct tty_operations uart_ops = {
.install = uart_install,
.open = uart_open,
.close = uart_close,
.write = uart_write,
.put_char = uart_put_char,
.flush_chars = uart_flush_chars,
.write_room = uart_write_room,
.chars_in_buffer= uart_chars_in_buffer,
.flush_buffer = uart_flush_buffer,
.ioctl = uart_ioctl,
.throttle = uart_throttle,
.unthrottle = uart_unthrottle,
.send_xchar = uart_send_xchar,
.set_termios = uart_set_termios,
.set_ldisc = uart_set_ldisc,
.stop = uart_stop,
.start = uart_start,
.hangup = uart_hangup,
.break_ctl = uart_break_ctl,
.wait_until_sent= uart_wait_until_sent,
#ifdef CONFIG_PROC_FS
.proc_show = uart_proc_show,
#endif
.tiocmget = uart_tiocmget,
.tiocmset = uart_tiocmset,
.set_serial = uart_set_info_user,
.get_serial = uart_get_info_user,
.get_icount = uart_get_icount,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = uart_poll_init,
.poll_get_char = uart_poll_get_char,
.poll_put_char = uart_poll_put_char,
#endif
};
static const struct tty_port_operations uart_port_ops = {
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
.activate = uart_port_activate,
.shutdown = uart_tty_port_shutdown,
};
/**
* uart_register_driver - register a driver with the uart core layer
* @drv: low level driver structure
*
* Register a uart driver with the core driver. We in turn register with the
* tty layer, and initialise the core driver per-port state.
*
* We have a proc file in /proc/tty/driver which is named after the normal
* driver.
*
* @drv->port should be %NULL, and the per-port structures should be registered
* using uart_add_one_port() after this call has succeeded.
*
* Locking: none, Interrupts: enabled
*/
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
int i, retval = -ENOMEM;
BUG_ON(drv->state);
/*
* Maybe we should be using a slab cache for this, especially if
* we have a large number of ports to handle.
*/
drv->state = kcalloc(drv->nr, sizeof(struct uart_state), GFP_KERNEL);
if (!drv->state)
goto out;
normal = tty_alloc_driver(drv->nr, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(normal)) {
retval = PTR_ERR(normal);
goto out_kfree;
}
drv->tty_driver = normal;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);
/*
* Initialise the UART state(s).
*/
for (i = 0; i < drv->nr; i++) {
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
tty_port_init(port);
port->ops = &uart_port_ops;
}
retval = tty_register_driver(normal);
if (retval >= 0)
return retval;
for (i = 0; i < drv->nr; i++)
tty_port_destroy(&drv->state[i].port);
tty_driver_kref_put(normal);
out_kfree:
kfree(drv->state);
out:
return retval;
}
EXPORT_SYMBOL(uart_register_driver);
/**
* uart_unregister_driver - remove a driver from the uart core layer
* @drv: low level driver structure
*
* Remove all references to a driver from the core driver. The low level
* driver must have removed all its ports via the uart_remove_one_port() if it
* registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.)
*
* Locking: none, Interrupts: enabled
*/
void uart_unregister_driver(struct uart_driver *drv)
{
struct tty_driver *p = drv->tty_driver;
unsigned int i;
tty_unregister_driver(p);
tty_driver_kref_put(p);
for (i = 0; i < drv->nr; i++)
tty_port_destroy(&drv->state[i].port);
kfree(drv->state);
drv->state = NULL;
drv->tty_driver = NULL;
}
EXPORT_SYMBOL(uart_unregister_driver);
struct tty_driver *uart_console_device(struct console *co, int *index)
{
struct uart_driver *p = co->data;
*index = co->index;
return p->tty_driver;
}
EXPORT_SYMBOL_GPL(uart_console_device);
static ssize_t uartclk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.baud_base * 16);
}
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.type);
}
static ssize_t line_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.line);
}
static ssize_t port_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
unsigned long ioaddr;
uart_get_info(port, &tmp);
ioaddr = tmp.port;
if (HIGH_BITS_OFFSET)
ioaddr |= (unsigned long)tmp.port_high << HIGH_BITS_OFFSET;
return sprintf(buf, "0x%lX\n", ioaddr);
}
static ssize_t irq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.irq);
}
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "0x%X\n", tmp.flags);
}
static ssize_t xmit_fifo_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.xmit_fifo_size);
}
static ssize_t close_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.close_delay);
}
static ssize_t closing_wait_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.closing_wait);
}
static ssize_t custom_divisor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.custom_divisor);
}
static ssize_t io_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.io_type);
}
static ssize_t iomem_base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "0x%lX\n", (unsigned long)tmp.iomem_base);
}
static ssize_t iomem_reg_shift_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return sprintf(buf, "%d\n", tmp.iomem_reg_shift);
}
static ssize_t console_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tty_port *port = dev_get_drvdata(dev);
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
bool console = false;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (uport)
console = uart_console_registered(uport);
mutex_unlock(&port->mutex);
return sprintf(buf, "%c\n", console ? 'Y' : 'N');
}
static ssize_t console_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct tty_port *port = dev_get_drvdata(dev);
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
bool oldconsole, newconsole;
int ret;
ret = kstrtobool(buf, &newconsole);
if (ret)
return ret;
mutex_lock(&port->mutex);
uport = uart_port_check(state);
if (uport) {
oldconsole = uart_console_registered(uport);
if (oldconsole && !newconsole) {
ret = unregister_console(uport->cons);
} else if (!oldconsole && newconsole) {
if (uart_console(uport)) {
uport->console_reinit = 1;
register_console(uport->cons);
} else {
ret = -ENOENT;
}
}
} else {
ret = -ENXIO;
}
mutex_unlock(&port->mutex);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RO(uartclk);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(line);
static DEVICE_ATTR_RO(port);
static DEVICE_ATTR_RO(irq);
static DEVICE_ATTR_RO(flags);
static DEVICE_ATTR_RO(xmit_fifo_size);
static DEVICE_ATTR_RO(close_delay);
static DEVICE_ATTR_RO(closing_wait);
static DEVICE_ATTR_RO(custom_divisor);
static DEVICE_ATTR_RO(io_type);
static DEVICE_ATTR_RO(iomem_base);
static DEVICE_ATTR_RO(iomem_reg_shift);
static DEVICE_ATTR_RW(console);
static struct attribute *tty_dev_attrs[] = {
&dev_attr_uartclk.attr,
&dev_attr_type.attr,
&dev_attr_line.attr,
&dev_attr_port.attr,
&dev_attr_irq.attr,
&dev_attr_flags.attr,
&dev_attr_xmit_fifo_size.attr,
&dev_attr_close_delay.attr,
&dev_attr_closing_wait.attr,
&dev_attr_custom_divisor.attr,
&dev_attr_io_type.attr,
&dev_attr_iomem_base.attr,
&dev_attr_iomem_reg_shift.attr,
&dev_attr_console.attr,
NULL
};
static const struct attribute_group tty_dev_attr_group = {
.attrs = tty_dev_attrs,
};
/**
* serial_core_add_one_port - attach a driver-defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure to use for this port.
*
* Context: task context, might sleep
*
* This allows the driver @drv to register its own uart_port structure with the
* core driver. The main purpose is to allow the low level uart drivers to
* expand uart_port, rather than having yet more levels of structures.
* Caller must hold port_mutex.
*/
static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state;
struct tty_port *port;
int ret = 0;
struct device *tty_dev;
int num_groups;
if (uport->line >= drv->nr)
return -EINVAL;
state = drv->state + uport->line;
port = &state->port;
mutex_lock(&port->mutex);
if (state->uart_port) {
ret = -EINVAL;
goto out;
}
/* Link the port to the driver state table and vice versa */
atomic_set(&state->refcount, 1);
init_waitqueue_head(&state->remove_wait);
state->uart_port = uport;
uport->state = state;
state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
drv->tty_driver->name_base + uport->line);
if (!uport->name) {
ret = -ENOMEM;
goto out;
}
/*
* If this port is in use as a console then the spinlock is already
* initialised.
*/
if (!uart_console_registered(uport))
uart_port_spin_lock_init(uport);
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
num_groups = 2;
if (uport->attr_group)
num_groups++;
uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups),
GFP_KERNEL);
if (!uport->tty_groups) {
ret = -ENOMEM;
goto out;
}
uport->tty_groups[0] = &tty_dev_attr_group;
if (uport->attr_group)
uport->tty_groups[1] = uport->attr_group;
/*
* Register the port whether it's detected or not. This allows
* setserial to be used to alter this port's parameters.
*/
tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
uport->line, uport->dev, port, uport->tty_groups);
if (!IS_ERR(tty_dev)) {
device_set_wakeup_capable(tty_dev, 1);
} else {
dev_err(uport->dev, "Cannot register tty device on line %d\n",
uport->line);
}
out:
mutex_unlock(&port->mutex);
return ret;
}
/**
* serial_core_remove_one_port - detach a driver defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure for this port
*
* Context: task context, might sleep
*
* This unhooks (and hangs up) the specified port structure from the core
* driver. No further calls will be made to the low-level code for this port.
* Caller must hold port_mutex.
*/
static void serial_core_remove_one_port(struct uart_driver *drv,
struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct uart_port *uart_port;
struct tty_struct *tty;
mutex_lock(&port->mutex);
uart_port = uart_port_check(state);
if (uart_port != uport)
dev_alert(uport->dev, "Removing wrong port: %p != %p\n",
uart_port, uport);
if (!uart_port) {
mutex_unlock(&port->mutex);
return;
}
mutex_unlock(&port->mutex);
/*
* Remove the devices from the tty layer
*/
tty_port_unregister_device(port, drv->tty_driver, uport->line);
tty = tty_port_tty_get(port);
if (tty) {
tty_vhangup(port->tty);
tty_kref_put(tty);
}
/*
* If the port is used as a console, unregister it
*/
if (uart_console(uport))
unregister_console(uport->cons);
/*
* Free the port IO and memory resources, if any.
*/
if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
kfree(uport->tty_groups);
kfree(uport->name);
/*
* Indicate that there isn't a port here anymore.
*/
uport->type = PORT_UNKNOWN;
uport->port_dev = NULL;
mutex_lock(&port->mutex);
WARN_ON(atomic_dec_return(&state->refcount) < 0);
wait_event(state->remove_wait, !atomic_read(&state->refcount));
state->uart_port = NULL;
mutex_unlock(&port->mutex);
}
/**
* uart_match_port - are the two ports equivalent?
* @port1: first port
* @port2: second port
*
* This utility function can be used to determine whether two uart_port
* structures describe the same port.
*/
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2)
{
if (port1->iotype != port2->iotype)
return false;
switch (port1->iotype) {
case UPIO_PORT:
return port1->iobase == port2->iobase;
case UPIO_HUB6:
return port1->iobase == port2->iobase &&
port1->hub6 == port2->hub6;
case UPIO_MEM:
case UPIO_MEM16:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_AU:
case UPIO_TSI:
return port1->mapbase == port2->mapbase;
}
return false;
}
EXPORT_SYMBOL(uart_match_port);
static struct serial_ctrl_device *
serial_core_get_ctrl_dev(struct serial_port_device *port_dev)
{
struct device *dev = &port_dev->dev;
return to_serial_base_ctrl_device(dev->parent);
}
/*
* Find a registered serial core controller device if one exists. Returns
* the first device matching the ctrl_id. Caller must hold port_mutex.
*/
static struct serial_ctrl_device *serial_core_ctrl_find(struct uart_driver *drv,
struct device *phys_dev,
int ctrl_id)
{
struct uart_state *state;
int i;
lockdep_assert_held(&port_mutex);
for (i = 0; i < drv->nr; i++) {
state = drv->state + i;
if (!state->uart_port || !state->uart_port->port_dev)
continue;
if (state->uart_port->dev == phys_dev &&
state->uart_port->ctrl_id == ctrl_id)
return serial_core_get_ctrl_dev(state->uart_port->port_dev);
}
return NULL;
}
static struct serial_ctrl_device *serial_core_ctrl_device_add(struct uart_port *port)
{
return serial_base_ctrl_add(port, port->dev);
}
static int serial_core_port_device_add(struct serial_ctrl_device *ctrl_dev,
struct uart_port *port)
{
struct serial_port_device *port_dev;
port_dev = serial_base_port_add(port, ctrl_dev);
if (IS_ERR(port_dev))
return PTR_ERR(port_dev);
port->port_dev = port_dev;
return 0;
}
/*
* Initialize a serial core port device, and a controller device if needed.
*/
int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
{
struct serial_ctrl_device *ctrl_dev, *new_ctrl_dev = NULL;
int ret;
mutex_lock(&port_mutex);
/*
* Prevent serial_port_runtime_resume() from trying to use the port
* until serial_core_add_one_port() has completed
*/
port->flags |= UPF_DEAD;
/* Inititalize a serial core controller device if needed */
ctrl_dev = serial_core_ctrl_find(drv, port->dev, port->ctrl_id);
if (!ctrl_dev) {
new_ctrl_dev = serial_core_ctrl_device_add(port);
if (IS_ERR(new_ctrl_dev)) {
ret = PTR_ERR(new_ctrl_dev);
goto err_unlock;
}
ctrl_dev = new_ctrl_dev;
}
/*
* Initialize a serial core port device. Tag the port dead to prevent
* serial_port_runtime_resume() trying to do anything until port has
* been registered. It gets cleared by serial_core_add_one_port().
*/
ret = serial_core_port_device_add(ctrl_dev, port);
if (ret)
goto err_unregister_ctrl_dev;
ret = serial_core_add_one_port(drv, port);
if (ret)
goto err_unregister_port_dev;
port->flags &= ~UPF_DEAD;
mutex_unlock(&port_mutex);
return 0;
err_unregister_port_dev:
serial_base_port_device_remove(port->port_dev);
err_unregister_ctrl_dev:
serial_base_ctrl_device_remove(new_ctrl_dev);
err_unlock:
mutex_unlock(&port_mutex);
return ret;
}
/*
* Removes a serial core port device, and the related serial core controller
* device if the last instance.
*/
void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port)
{
struct device *phys_dev = port->dev;
struct serial_port_device *port_dev = port->port_dev;
struct serial_ctrl_device *ctrl_dev = serial_core_get_ctrl_dev(port_dev);
int ctrl_id = port->ctrl_id;
mutex_lock(&port_mutex);
port->flags |= UPF_DEAD;
serial_core_remove_one_port(drv, port);
/* Note that struct uart_port *port is no longer valid at this point */
serial_base_port_device_remove(port_dev);
/* Drop the serial core controller device if no ports are using it */
if (!serial_core_ctrl_find(drv, phys_dev, ctrl_id))
serial_base_ctrl_device_remove(ctrl_dev);
mutex_unlock(&port_mutex);
}
/**
* uart_handle_dcd_change - handle a change of carrier detect state
* @uport: uart_port structure for the open port
* @active: new carrier detect status
*
* Caller must hold uport->lock.
*/
void uart_handle_dcd_change(struct uart_port *uport, bool active)
{
struct tty_port *port = &uport->state->port;
struct tty_struct *tty = port->tty;
struct tty_ldisc *ld;
lockdep_assert_held_once(&uport->lock);
if (tty) {
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->dcd_change)
ld->ops->dcd_change(tty, active);
tty_ldisc_deref(ld);
}
}
uport->icount.dcd++;
if (uart_dcd_enabled(uport)) {
if (active)
wake_up_interruptible(&port->open_wait);
else if (tty)
tty_hangup(tty);
}
}
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
* uart_handle_cts_change - handle a change of clear-to-send state
* @uport: uart_port structure for the open port
* @active: new clear-to-send status
*
* Caller must hold uport->lock.
*/
void uart_handle_cts_change(struct uart_port *uport, bool active)
{
lockdep_assert_held_once(&uport->lock);
uport->icount.cts++;
if (uart_softcts_mode(uport)) {
if (uport->hw_stopped) {
if (active) {
uport->hw_stopped = false;
uport->ops->start_tx(uport);
uart_write_wakeup(uport);
}
} else {
if (!active) {
uport->hw_stopped = true;
uport->ops->stop_tx(uport);
}
}
}
}
EXPORT_SYMBOL_GPL(uart_handle_cts_change);
/**
* uart_insert_char - push a char to the uart layer
*
* User is responsible to call tty_flip_buffer_push when they are done with
* insertion.
*
* @port: corresponding port
* @status: state of the serial port RX buffer (LSR for 8250)
* @overrun: mask of overrun bits in @status
* @ch: character to push
* @flag: flag for the character (see TTY_NORMAL and friends)
*/
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, u8 ch, u8 flag)
{
struct tty_port *tport = &port->state->port;
if ((status & port->ignore_status_mask & ~overrun) == 0)
if (tty_insert_flip_char(tport, ch, flag) == 0)
++port->icount.buf_overrun;
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN) == 0)
++port->icount.buf_overrun;
}
EXPORT_SYMBOL_GPL(uart_insert_char);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
static const u8 sysrq_toggle_seq[] = CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE;
static void uart_sysrq_on(struct work_struct *w)
{
int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
sysrq_toggle_support(1);
pr_info("SysRq is enabled by magic sequence '%*pE' on serial\n",
sysrq_toggle_seq_len, sysrq_toggle_seq);
}
static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
/**
* uart_try_toggle_sysrq - Enables SysRq from serial line
* @port: uart_port structure where char(s) after BREAK met
* @ch: new character in the sequence after received BREAK
*
* Enables magic SysRq when the required sequence is met on port
* (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
*
* Returns: %false if @ch is out of enabling sequence and should be
* handled some other way, %true if @ch was consumed.
*/
bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch)
{
int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
if (!sysrq_toggle_seq_len)
return false;
BUILD_BUG_ON(ARRAY_SIZE(sysrq_toggle_seq) >= U8_MAX);
if (sysrq_toggle_seq[port->sysrq_seq] != ch) {
port->sysrq_seq = 0;
return false;
}
if (++port->sysrq_seq < sysrq_toggle_seq_len) {
port->sysrq = jiffies + SYSRQ_TIMEOUT;
return true;
}
schedule_work(&sysrq_enable_work);
port->sysrq = 0;
return true;
}
EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq);
#endif
/**
* uart_get_rs485_mode() - retrieve rs485 properties for given uart
* @port: uart device's target port
*
* This function implements the device tree binding described in
* Documentation/devicetree/bindings/serial/rs485.txt.
*/
int uart_get_rs485_mode(struct uart_port *port)
{
struct serial_rs485 *rs485conf = &port->rs485;
struct device *dev = port->dev;
u32 rs485_delay[2];
int ret;
int rx_during_tx_gpio_flag;
ret = device_property_read_u32_array(dev, "rs485-rts-delay",
rs485_delay, 2);
if (!ret) {
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
} else {
rs485conf->delay_rts_before_send = 0;
rs485conf->delay_rts_after_send = 0;
}
uart_sanitize_serial_rs485_delays(port, rs485conf);
/*
* Clear full-duplex and enabled flags, set RTS polarity to active high
* to get to a defined state with the following properties:
*/
rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED |
SER_RS485_TERMINATE_BUS |
SER_RS485_RTS_AFTER_SEND);
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
if (device_property_read_bool(dev, "rs485-rx-during-tx"))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (device_property_read_bool(dev, "linux,rs485-enabled-at-boot-time"))
rs485conf->flags |= SER_RS485_ENABLED;
if (device_property_read_bool(dev, "rs485-rts-active-low")) {
rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
}
/*
* Disabling termination by default is the safe choice: Else if many
* bus participants enable it, no communication is possible at all.
* Works fine for short cables and users may enable for longer cables.
*/
port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term",
GPIOD_OUT_LOW);
if (IS_ERR(port->rs485_term_gpio)) {
ret = PTR_ERR(port->rs485_term_gpio);
port->rs485_term_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
if (port->rs485_term_gpio)
port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
rx_during_tx_gpio_flag = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
port->rs485_rx_during_tx_gpio = devm_gpiod_get_optional(dev,
"rs485-rx-during-tx",
rx_during_tx_gpio_flag);
if (IS_ERR(port->rs485_rx_during_tx_gpio)) {
ret = PTR_ERR(port->rs485_rx_during_tx_gpio);
port->rs485_rx_during_tx_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-rx-during-tx-gpios\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
/* Compile-time assertions for serial_rs485 layout */
static_assert(offsetof(struct serial_rs485, padding) ==
(offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32)));
static_assert(offsetof(struct serial_rs485, padding1) ==
offsetof(struct serial_rs485, padding[1]));
static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) ==
sizeof(struct serial_rs485));
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/serial_core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* altera_uart.c -- Altera UART driver
*
* Based on mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <[email protected]>
* (C) Copyright 2008, Thomas Chou <[email protected]>
* (C) Copyright 2010, Tobias Klauser <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/altera_uart.h>
#define DRV_NAME "altera_uart"
#define SERIAL_ALTERA_MAJOR 204
#define SERIAL_ALTERA_MINOR 213
/*
* Altera UART register definitions according to the Nios UART datasheet:
* http://www.altera.com/literature/ds/ds_nios_uart.pdf
*/
#define ALTERA_UART_SIZE 32
#define ALTERA_UART_RXDATA_REG 0
#define ALTERA_UART_TXDATA_REG 4
#define ALTERA_UART_STATUS_REG 8
#define ALTERA_UART_CONTROL_REG 12
#define ALTERA_UART_DIVISOR_REG 16
#define ALTERA_UART_EOP_REG 20
#define ALTERA_UART_STATUS_PE_MSK 0x0001 /* parity error */
#define ALTERA_UART_STATUS_FE_MSK 0x0002 /* framing error */
#define ALTERA_UART_STATUS_BRK_MSK 0x0004 /* break */
#define ALTERA_UART_STATUS_ROE_MSK 0x0008 /* RX overrun error */
#define ALTERA_UART_STATUS_TOE_MSK 0x0010 /* TX overrun error */
#define ALTERA_UART_STATUS_TMT_MSK 0x0020 /* TX shift register state */
#define ALTERA_UART_STATUS_TRDY_MSK 0x0040 /* TX ready */
#define ALTERA_UART_STATUS_RRDY_MSK 0x0080 /* RX ready */
#define ALTERA_UART_STATUS_E_MSK 0x0100 /* exception condition */
#define ALTERA_UART_STATUS_DCTS_MSK 0x0400 /* CTS logic-level change */
#define ALTERA_UART_STATUS_CTS_MSK 0x0800 /* CTS logic state */
#define ALTERA_UART_STATUS_EOP_MSK 0x1000 /* EOP written/read */
/* Enable interrupt on... */
#define ALTERA_UART_CONTROL_PE_MSK 0x0001 /* ...parity error */
#define ALTERA_UART_CONTROL_FE_MSK 0x0002 /* ...framing error */
#define ALTERA_UART_CONTROL_BRK_MSK 0x0004 /* ...break */
#define ALTERA_UART_CONTROL_ROE_MSK 0x0008 /* ...RX overrun */
#define ALTERA_UART_CONTROL_TOE_MSK 0x0010 /* ...TX overrun */
#define ALTERA_UART_CONTROL_TMT_MSK 0x0020 /* ...TX shift register empty */
#define ALTERA_UART_CONTROL_TRDY_MSK 0x0040 /* ...TX ready */
#define ALTERA_UART_CONTROL_RRDY_MSK 0x0080 /* ...RX ready */
#define ALTERA_UART_CONTROL_E_MSK 0x0100 /* ...exception*/
#define ALTERA_UART_CONTROL_TRBK_MSK 0x0200 /* TX break */
#define ALTERA_UART_CONTROL_DCTS_MSK 0x0400 /* Interrupt on CTS change */
#define ALTERA_UART_CONTROL_RTS_MSK 0x0800 /* RTS signal */
#define ALTERA_UART_CONTROL_EOP_MSK 0x1000 /* Interrupt on EOP */
/*
* Local per-uart structure.
*/
struct altera_uart {
struct uart_port port;
struct timer_list tmr;
unsigned int sigs; /* Local copy of line sigs */
unsigned short imr; /* Local IMR mirror */
};
static u32 altera_uart_readl(struct uart_port *port, int reg)
{
return readl(port->membase + (reg << port->regshift));
}
static void altera_uart_writel(struct uart_port *port, u32 dat, int reg)
{
writel(dat, port->membase + (reg << port->regshift));
}
static unsigned int altera_uart_tx_empty(struct uart_port *port)
{
return (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
}
static unsigned int altera_uart_get_mctrl(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned int sigs;
sigs = (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
sigs |= (pp->sigs & TIOCM_RTS);
return sigs;
}
static void altera_uart_update_ctrl_reg(struct altera_uart *pp)
{
unsigned short imr = pp->imr;
/*
* If the device doesn't have an irq, ensure that the irq bits are
* masked out to keep the irq line inactive.
*/
if (!pp->port.irq)
imr &= ALTERA_UART_CONTROL_TRBK_MSK | ALTERA_UART_CONTROL_RTS_MSK;
altera_uart_writel(&pp->port, imr, ALTERA_UART_CONTROL_REG);
}
static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
pp->sigs = sigs;
if (sigs & TIOCM_RTS)
pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
else
pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
altera_uart_update_ctrl_reg(pp);
}
static void altera_uart_start_tx(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
altera_uart_update_ctrl_reg(pp);
}
static void altera_uart_stop_tx(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
altera_uart_update_ctrl_reg(pp);
}
static void altera_uart_stop_rx(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
altera_uart_update_ctrl_reg(pp);
}
static void altera_uart_break_ctl(struct uart_port *port, int break_state)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (break_state == -1)
pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
else
pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
altera_uart_update_ctrl_reg(pp);
spin_unlock_irqrestore(&port->lock, flags);
}
static void altera_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
baudclk = port->uartclk / baud;
if (old)
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
spin_unlock_irqrestore(&port->lock, flags);
/*
* FIXME: port->read_status_mask and port->ignore_status_mask
* need to be initialized based on termios settings for
* INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
*/
}
static void altera_uart_rx_chars(struct uart_port *port)
{
unsigned short status;
u8 ch, flag;
while ((status = altera_uart_readl(port, ALTERA_UART_STATUS_REG)) &
ALTERA_UART_STATUS_RRDY_MSK) {
ch = altera_uart_readl(port, ALTERA_UART_RXDATA_REG);
flag = TTY_NORMAL;
port->icount.rx++;
if (status & ALTERA_UART_STATUS_E_MSK) {
altera_uart_writel(port, status,
ALTERA_UART_STATUS_REG);
if (status & ALTERA_UART_STATUS_BRK_MSK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (status & ALTERA_UART_STATUS_PE_MSK) {
port->icount.parity++;
} else if (status & ALTERA_UART_STATUS_ROE_MSK) {
port->icount.overrun++;
} else if (status & ALTERA_UART_STATUS_FE_MSK) {
port->icount.frame++;
}
status &= port->read_status_mask;
if (status & ALTERA_UART_STATUS_BRK_MSK)
flag = TTY_BREAK;
else if (status & ALTERA_UART_STATUS_PE_MSK)
flag = TTY_PARITY;
else if (status & ALTERA_UART_STATUS_FE_MSK)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch,
flag);
}
tty_flip_buffer_push(&port->state->port);
}
static void altera_uart_tx_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx(port, ch,
altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_TRDY_MSK,
altera_uart_writel(port, ch, ALTERA_UART_TXDATA_REG));
}
static irqreturn_t altera_uart_interrupt(int irq, void *data)
{
struct uart_port *port = data;
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
unsigned int isr;
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
spin_lock_irqsave(&port->lock, flags);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
altera_uart_tx_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_RETVAL(isr);
}
static void altera_uart_timer(struct timer_list *t)
{
struct altera_uart *pp = from_timer(pp, t, tmr);
struct uart_port *port = &pp->port;
altera_uart_interrupt(0, port);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
}
static void altera_uart_config_port(struct uart_port *port, int flags)
{
port->type = PORT_ALTERA_UART;
/* Clear mask, so no surprise interrupts. */
altera_uart_writel(port, 0, ALTERA_UART_CONTROL_REG);
/* Clear status register */
altera_uart_writel(port, 0, ALTERA_UART_STATUS_REG);
}
static int altera_uart_startup(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
if (!port->irq) {
timer_setup(&pp->tmr, altera_uart_timer, 0);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
} else {
int ret;
ret = request_irq(port->irq, altera_uart_interrupt, 0,
DRV_NAME, port);
if (ret) {
pr_err(DRV_NAME ": unable to attach Altera UART %d "
"interrupt vector=%d\n", port->line, port->irq);
return ret;
}
}
spin_lock_irqsave(&port->lock, flags);
/* Enable RX interrupts now */
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
altera_uart_update_ctrl_reg(pp);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void altera_uart_shutdown(struct uart_port *port)
{
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable all interrupts now */
pp->imr = 0;
altera_uart_update_ctrl_reg(pp);
spin_unlock_irqrestore(&port->lock, flags);
if (port->irq)
free_irq(port->irq, port);
else
del_timer_sync(&pp->tmr);
}
static const char *altera_uart_type(struct uart_port *port)
{
return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL;
}
static int altera_uart_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
static void altera_uart_release_port(struct uart_port *port)
{
/* Nothing to release... */
}
static int altera_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART))
return -EINVAL;
return 0;
}
#ifdef CONFIG_CONSOLE_POLL
static int altera_uart_poll_get_char(struct uart_port *port)
{
while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_RRDY_MSK))
cpu_relax();
return altera_uart_readl(port, ALTERA_UART_RXDATA_REG);
}
static void altera_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_TRDY_MSK))
cpu_relax();
altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
}
#endif
/*
* Define the basic serial functions we support.
*/
static const struct uart_ops altera_uart_ops = {
.tx_empty = altera_uart_tx_empty,
.get_mctrl = altera_uart_get_mctrl,
.set_mctrl = altera_uart_set_mctrl,
.start_tx = altera_uart_start_tx,
.stop_tx = altera_uart_stop_tx,
.stop_rx = altera_uart_stop_rx,
.break_ctl = altera_uart_break_ctl,
.startup = altera_uart_startup,
.shutdown = altera_uart_shutdown,
.set_termios = altera_uart_set_termios,
.type = altera_uart_type,
.request_port = altera_uart_request_port,
.release_port = altera_uart_release_port,
.config_port = altera_uart_config_port,
.verify_port = altera_uart_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = altera_uart_poll_get_char,
.poll_put_char = altera_uart_poll_put_char,
#endif
};
static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS];
#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
static void altera_uart_console_putc(struct uart_port *port, unsigned char c)
{
while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_TRDY_MSK))
cpu_relax();
altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
}
static void altera_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &(altera_uart_ports + co->index)->port;
uart_console_write(port, s, count, altera_uart_console_putc);
}
static int __init altera_uart_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
return -EINVAL;
port = &altera_uart_ports[co->index].port;
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver altera_uart_driver;
static struct console altera_uart_console = {
.name = "ttyAL",
.write = altera_uart_console_write,
.device = uart_console_device,
.setup = altera_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &altera_uart_driver,
};
static int __init altera_uart_console_init(void)
{
register_console(&altera_uart_console);
return 0;
}
console_initcall(altera_uart_console_init);
#define ALTERA_UART_CONSOLE (&altera_uart_console)
static void altera_uart_earlycon_write(struct console *co, const char *s,
unsigned int count)
{
struct earlycon_device *dev = co->data;
uart_console_write(&dev->port, s, count, altera_uart_console_putc);
}
static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
const char *options)
{
struct uart_port *port = &dev->port;
if (!port->membase)
return -ENODEV;
/* Enable RX interrupts now */
altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK,
ALTERA_UART_CONTROL_REG);
if (dev->baud) {
unsigned int baudclk = port->uartclk / dev->baud;
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
}
dev->con->write = altera_uart_earlycon_write;
return 0;
}
OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup);
#else
#define ALTERA_UART_CONSOLE NULL
#endif /* CONFIG_SERIAL_ALTERA_UART_CONSOLE */
/*
* Define the altera_uart UART driver structure.
*/
static struct uart_driver altera_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRV_NAME,
.dev_name = "ttyAL",
.major = SERIAL_ALTERA_MAJOR,
.minor = SERIAL_ALTERA_MINOR,
.nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
.cons = ALTERA_UART_CONSOLE,
};
static int altera_uart_probe(struct platform_device *pdev)
{
struct altera_uart_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_mem;
int i = pdev->id;
int ret;
/* if id is -1 scan for a free id and use that one */
if (i == -1) {
for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++)
if (altera_uart_ports[i].port.mapbase == 0)
break;
}
if (i < 0 || i >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
return -EINVAL;
port = &altera_uart_ports[i].port;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem)
port->mapbase = res_mem->start;
else if (platp)
port->mapbase = platp->mapbase;
else
return -EINVAL;
ret = platform_get_irq_optional(pdev, 0);
if (ret < 0 && ret != -ENXIO)
return ret;
if (ret > 0)
port->irq = ret;
else if (platp)
port->irq = platp->irq;
/* Check platform data first so we can override device node data */
if (platp)
port->uartclk = platp->uartclk;
else {
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&port->uartclk);
if (ret)
return ret;
}
port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
if (!port->membase)
return -ENOMEM;
if (platp)
port->regshift = platp->bus_shift;
else
port->regshift = 0;
port->line = i;
port->type = PORT_ALTERA_UART;
port->iotype = SERIAL_IO_MEM;
port->ops = &altera_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
platform_set_drvdata(pdev, port);
uart_add_one_port(&altera_uart_driver, port);
return 0;
}
static int altera_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
if (port) {
uart_remove_one_port(&altera_uart_driver, port);
port->mapbase = 0;
iounmap(port->membase);
}
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id altera_uart_match[] = {
{ .compatible = "ALTR,uart-1.0", },
{ .compatible = "altr,uart-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_uart_match);
#endif /* CONFIG_OF */
static struct platform_driver altera_uart_platform_driver = {
.probe = altera_uart_probe,
.remove = altera_uart_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(altera_uart_match),
},
};
static int __init altera_uart_init(void)
{
int rc;
rc = uart_register_driver(&altera_uart_driver);
if (rc)
return rc;
rc = platform_driver_register(&altera_uart_platform_driver);
if (rc)
uart_unregister_driver(&altera_uart_driver);
return rc;
}
static void __exit altera_uart_exit(void)
{
platform_driver_unregister(&altera_uart_platform_driver);
uart_unregister_driver(&altera_uart_driver);
}
module_init(altera_uart_init);
module_exit(altera_uart_exit);
MODULE_DESCRIPTION("Altera UART driver");
MODULE_AUTHOR("Thomas Chou <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR);
| linux-master | drivers/tty/serial/altera_uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Derived from many drivers using generic_serial interface.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*
* Serial driver for BCM63xx integrated UART.
*
* Hardware flow control was _not_ tested since I only have RX/TX on
* my board.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/clk.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/sysrq.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_bcm63xx.h>
#include <linux/io.h>
#include <linux/of.h>
#define BCM63XX_NR_UARTS 2
static struct uart_port ports[BCM63XX_NR_UARTS];
/*
* rx interrupt mask / stat
*
* mask:
* - rx fifo full
* - rx fifo above threshold
* - rx fifo not empty for too long
*/
#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
UART_IR_MASK(UART_IR_RXTHRESH) | \
UART_IR_MASK(UART_IR_RXTIMEOUT))
#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
UART_IR_STAT(UART_IR_RXTHRESH) | \
UART_IR_STAT(UART_IR_RXTIMEOUT))
/*
* tx interrupt mask / stat
*
* mask:
* - tx fifo empty
* - tx fifo below threshold
*/
#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
UART_IR_MASK(UART_IR_TXTRESH))
#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
UART_IR_STAT(UART_IR_TXTRESH))
/*
* external input interrupt
*
* mask: any edge on CTS, DCD
*/
#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
/*
* handy uart register accessor
*/
static inline unsigned int bcm_uart_readl(struct uart_port *port,
unsigned int offset)
{
return __raw_readl(port->membase + offset);
}
static inline void bcm_uart_writel(struct uart_port *port,
unsigned int value, unsigned int offset)
{
__raw_writel(value, port->membase + offset);
}
/*
* serial core request to check if uart tx fifo is empty
*/
static unsigned int bcm_uart_tx_empty(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
}
/*
* serial core request to set RTS and DTR pin state and loopback mode
*/
static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int val;
val = bcm_uart_readl(port, UART_MCTL_REG);
val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
/* invert of written value is reflected on the pin */
if (!(mctrl & TIOCM_DTR))
val |= UART_MCTL_DTR_MASK;
if (!(mctrl & TIOCM_RTS))
val |= UART_MCTL_RTS_MASK;
bcm_uart_writel(port, val, UART_MCTL_REG);
val = bcm_uart_readl(port, UART_CTL_REG);
if (mctrl & TIOCM_LOOP)
val |= UART_CTL_LOOPBACK_MASK;
else
val &= ~UART_CTL_LOOPBACK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* serial core request to return RI, CTS, DCD and DSR pin state
*/
static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
{
unsigned int val, mctrl;
mctrl = 0;
val = bcm_uart_readl(port, UART_EXTINP_REG);
if (val & UART_EXTINP_RI_MASK)
mctrl |= TIOCM_RI;
if (val & UART_EXTINP_CTS_MASK)
mctrl |= TIOCM_CTS;
if (val & UART_EXTINP_DCD_MASK)
mctrl |= TIOCM_CD;
if (val & UART_EXTINP_DSR_MASK)
mctrl |= TIOCM_DSR;
return mctrl;
}
/*
* serial core request to disable tx ASAP (used for flow control)
*/
static void bcm_uart_stop_tx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~(UART_CTL_TXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to (re)enable tx
*/
static void bcm_uart_start_tx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val |= UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_TXEN_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* serial core request to stop rx, called before port shutdown
*/
static void bcm_uart_stop_rx(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_RX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to enable modem status interrupt reporting
*/
static void bcm_uart_enable_ms(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
val |= UART_IR_MASK(UART_IR_EXTIP);
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* serial core request to start/stop emitting break char
*/
static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&port->lock, flags);
val = bcm_uart_readl(port, UART_CTL_REG);
if (ctl)
val |= UART_CTL_XMITBRK_MASK;
else
val &= ~UART_CTL_XMITBRK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* return port type in string format
*/
static const char *bcm_uart_type(struct uart_port *port)
{
return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
}
/*
* read all chars in rx fifo and send them to core
*/
static void bcm_uart_do_rx(struct uart_port *port)
{
struct tty_port *tty_port = &port->state->port;
unsigned int max_count;
/* limit number of char read in interrupt, should not be
* higher than fifo size anyway since we're much faster than
* serial port */
max_count = 32;
do {
unsigned int iestat, c, cstat;
char flag;
/* get overrun/fifo empty information from ier
* register */
iestat = bcm_uart_readl(port, UART_IR_REG);
if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
unsigned int val;
/* fifo reset is required to clear
* interrupt */
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_RSTRXFIFO_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
port->icount.overrun++;
tty_insert_flip_char(tty_port, 0, TTY_OVERRUN);
}
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
break;
cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
port->icount.rx++;
flag = TTY_NORMAL;
c &= 0xff;
if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
/* do stats first */
if (cstat & UART_FIFO_BRKDET_MASK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (cstat & UART_FIFO_PARERR_MASK)
port->icount.parity++;
if (cstat & UART_FIFO_FRAMEERR_MASK)
port->icount.frame++;
/* update flag wrt read_status_mask */
cstat &= port->read_status_mask;
if (cstat & UART_FIFO_BRKDET_MASK)
flag = TTY_BREAK;
if (cstat & UART_FIFO_FRAMEERR_MASK)
flag = TTY_FRAME;
if (cstat & UART_FIFO_PARERR_MASK)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(port, c))
continue;
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty_port, c, flag);
} while (--max_count);
tty_flip_buffer_push(tty_port);
}
/*
* fill tx fifo with chars to send, stop when fifo is about to be full
* or when all chars have been sent.
*/
static void bcm_uart_do_tx(struct uart_port *port)
{
unsigned int val;
bool pending;
u8 ch;
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
pending = uart_port_tx_limited(port, ch, port->fifosize - val,
true,
bcm_uart_writel(port, ch, UART_FIFO_REG),
({}));
if (pending)
return;
/* nothing to send, disable transmit interrupt */
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
}
/*
* process uart interrupt
*/
static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
{
struct uart_port *port;
unsigned int irqstat;
port = dev_id;
spin_lock(&port->lock);
irqstat = bcm_uart_readl(port, UART_IR_REG);
if (irqstat & UART_RX_INT_STAT)
bcm_uart_do_rx(port);
if (irqstat & UART_TX_INT_STAT)
bcm_uart_do_tx(port);
if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
unsigned int estat;
estat = bcm_uart_readl(port, UART_EXTINP_REG);
if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
uart_handle_cts_change(port,
estat & UART_EXTINP_CTS_MASK);
if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
uart_handle_dcd_change(port,
estat & UART_EXTINP_DCD_MASK);
}
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
/*
* enable rx & tx operation on uart
*/
static void bcm_uart_enable(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* disable rx & tx operation on uart
*/
static void bcm_uart_disable(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
UART_CTL_RXEN_MASK);
bcm_uart_writel(port, val, UART_CTL_REG);
}
/*
* clear all unread data in rx fifo and unsent data in tx fifo
*/
static void bcm_uart_flush(struct uart_port *port)
{
unsigned int val;
/* empty rx and tx fifo */
val = bcm_uart_readl(port, UART_CTL_REG);
val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
/* read any pending char to make sure all irq status are
* cleared */
(void)bcm_uart_readl(port, UART_FIFO_REG);
}
/*
* serial core request to initialize uart and start rx operation
*/
static int bcm_uart_startup(struct uart_port *port)
{
unsigned int val;
int ret;
/* mask all irq and flush port */
bcm_uart_disable(port);
bcm_uart_writel(port, 0, UART_IR_REG);
bcm_uart_flush(port);
/* clear any pending external input interrupt */
(void)bcm_uart_readl(port, UART_EXTINP_REG);
/* set rx/tx fifo thresh to fifo half size */
val = bcm_uart_readl(port, UART_MCTL_REG);
val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
bcm_uart_writel(port, val, UART_MCTL_REG);
/* set rx fifo timeout to 1 char time */
val = bcm_uart_readl(port, UART_CTL_REG);
val &= ~UART_CTL_RXTMOUTCNT_MASK;
val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
bcm_uart_writel(port, val, UART_CTL_REG);
/* report any edge on dcd and cts */
val = UART_EXTINP_INT_MASK;
val |= UART_EXTINP_DCD_NOSENSE_MASK;
val |= UART_EXTINP_CTS_NOSENSE_MASK;
bcm_uart_writel(port, val, UART_EXTINP_REG);
/* register irq and enable rx interrupts */
ret = request_irq(port->irq, bcm_uart_interrupt, 0,
dev_name(port->dev), port);
if (ret)
return ret;
bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
bcm_uart_enable(port);
return 0;
}
/*
* serial core request to flush & disable uart
*/
static void bcm_uart_shutdown(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
bcm_uart_writel(port, 0, UART_IR_REG);
spin_unlock_irqrestore(&port->lock, flags);
bcm_uart_disable(port);
bcm_uart_flush(port);
free_irq(port->irq, port);
}
/*
* serial core request to change current uart setting
*/
static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
const struct ktermios *old)
{
unsigned int ctl, baud, quot, ier;
unsigned long flags;
int tries;
spin_lock_irqsave(&port->lock, flags);
/* Drain the hot tub fully before we power it off for the winter. */
for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
mdelay(10);
/* disable uart while changing speed */
bcm_uart_disable(port);
bcm_uart_flush(port);
/* update Control register */
ctl = bcm_uart_readl(port, UART_CTL_REG);
ctl &= ~UART_CTL_BITSPERSYM_MASK;
switch (new->c_cflag & CSIZE) {
case CS5:
ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
break;
case CS6:
ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
break;
case CS7:
ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
break;
default:
ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
break;
}
ctl &= ~UART_CTL_STOPBITS_MASK;
if (new->c_cflag & CSTOPB)
ctl |= UART_CTL_STOPBITS_2;
else
ctl |= UART_CTL_STOPBITS_1;
ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
if (new->c_cflag & PARENB)
ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
if (new->c_cflag & PARODD)
ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
bcm_uart_writel(port, ctl, UART_CTL_REG);
/* update Baudword register */
baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud) - 1;
bcm_uart_writel(port, quot, UART_BAUD_REG);
/* update Interrupt register */
ier = bcm_uart_readl(port, UART_IR_REG);
ier &= ~UART_IR_MASK(UART_IR_EXTIP);
if (UART_ENABLE_MS(port, new->c_cflag))
ier |= UART_IR_MASK(UART_IR_EXTIP);
bcm_uart_writel(port, ier, UART_IR_REG);
/* update read/ignore mask */
port->read_status_mask = UART_FIFO_VALID_MASK;
if (new->c_iflag & INPCK) {
port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
port->read_status_mask |= UART_FIFO_PARERR_MASK;
}
if (new->c_iflag & (IGNBRK | BRKINT))
port->read_status_mask |= UART_FIFO_BRKDET_MASK;
port->ignore_status_mask = 0;
if (new->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
if (new->c_iflag & IGNBRK)
port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
if (!(new->c_cflag & CREAD))
port->ignore_status_mask |= UART_FIFO_VALID_MASK;
uart_update_timeout(port, new->c_cflag, baud);
bcm_uart_enable(port);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* serial core request to claim uart iomem
*/
static int bcm_uart_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
/*
* serial core request to release uart iomem
*/
static void bcm_uart_release_port(struct uart_port *port)
{
/* Nothing to release ... */
}
/*
* serial core request to do any port required autoconfiguration
*/
static void bcm_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
if (bcm_uart_request_port(port))
return;
port->type = PORT_BCM63XX;
}
}
/*
* serial core request to check that port information in serinfo are
* suitable
*/
static int bcm_uart_verify_port(struct uart_port *port,
struct serial_struct *serinfo)
{
if (port->type != PORT_BCM63XX)
return -EINVAL;
if (port->irq != serinfo->irq)
return -EINVAL;
if (port->iotype != serinfo->io_type)
return -EINVAL;
if (port->mapbase != (unsigned long)serinfo->iomem_base)
return -EINVAL;
return 0;
}
#ifdef CONFIG_CONSOLE_POLL
/*
* return true when outstanding tx equals fifo size
*/
static bool bcm_uart_tx_full(struct uart_port *port)
{
unsigned int val;
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
return !(port->fifosize - val);
}
static int bcm_uart_poll_get_char(struct uart_port *port)
{
unsigned int iestat;
iestat = bcm_uart_readl(port, UART_IR_REG);
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
return NO_POLL_CHAR;
return bcm_uart_readl(port, UART_FIFO_REG);
}
static void bcm_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
while (bcm_uart_tx_full(port)) {
cpu_relax();
}
bcm_uart_writel(port, c, UART_FIFO_REG);
}
#endif
/* serial core callbacks */
static const struct uart_ops bcm_uart_ops = {
.tx_empty = bcm_uart_tx_empty,
.get_mctrl = bcm_uart_get_mctrl,
.set_mctrl = bcm_uart_set_mctrl,
.start_tx = bcm_uart_start_tx,
.stop_tx = bcm_uart_stop_tx,
.stop_rx = bcm_uart_stop_rx,
.enable_ms = bcm_uart_enable_ms,
.break_ctl = bcm_uart_break_ctl,
.startup = bcm_uart_startup,
.shutdown = bcm_uart_shutdown,
.set_termios = bcm_uart_set_termios,
.type = bcm_uart_type,
.release_port = bcm_uart_release_port,
.request_port = bcm_uart_request_port,
.config_port = bcm_uart_config_port,
.verify_port = bcm_uart_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = bcm_uart_poll_get_char,
.poll_put_char = bcm_uart_poll_put_char,
#endif
};
#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
static void wait_for_xmitr(struct uart_port *port)
{
unsigned int tmout;
/* Wait up to 10ms for the character(s) to be sent. */
tmout = 10000;
while (--tmout) {
unsigned int val;
val = bcm_uart_readl(port, UART_IR_REG);
if (val & UART_IR_STAT(UART_IR_TXEMPTY))
break;
udelay(1);
}
/* Wait up to 1s for flow control if necessary */
if (port->flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout) {
unsigned int val;
val = bcm_uart_readl(port, UART_EXTINP_REG);
if (val & UART_EXTINP_CTS_MASK)
break;
udelay(1);
}
}
}
/*
* output given char
*/
static void bcm_console_putchar(struct uart_port *port, unsigned char ch)
{
wait_for_xmitr(port);
bcm_uart_writel(port, ch, UART_FIFO_REG);
}
/*
* console core request to output given string
*/
static void bcm_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port;
unsigned long flags;
int locked;
port = &ports[co->index];
local_irq_save(flags);
if (port->sysrq) {
/* bcm_uart_interrupt() already took the lock */
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else {
spin_lock(&port->lock);
locked = 1;
}
/* call helper to deal with \r\n */
uart_console_write(port, s, count, bcm_console_putchar);
/* and wait for char to be transmitted */
wait_for_xmitr(port);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
}
/*
* console core request to setup given console, find matching uart
* port and setup it.
*/
static int bcm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
return -EINVAL;
port = &ports[co->index];
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver bcm_uart_driver;
static struct console bcm63xx_console = {
.name = "ttyS",
.write = bcm_console_write,
.device = uart_console_device,
.setup = bcm_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bcm_uart_driver,
};
static int __init bcm63xx_console_init(void)
{
register_console(&bcm63xx_console);
return 0;
}
console_initcall(bcm63xx_console_init);
static void bcm_early_write(struct console *con, const char *s, unsigned n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, bcm_console_putchar);
wait_for_xmitr(&dev->port);
}
static int __init bcm_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = bcm_early_write;
return 0;
}
OF_EARLYCON_DECLARE(bcm63xx_uart, "brcm,bcm6345-uart", bcm_early_console_setup);
#define BCM63XX_CONSOLE (&bcm63xx_console)
#else
#define BCM63XX_CONSOLE NULL
#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
static struct uart_driver bcm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "bcm63xx_uart",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = BCM63XX_NR_UARTS,
.cons = BCM63XX_CONSOLE,
};
/*
* platform driver probe/remove callback
*/
static int bcm_uart_probe(struct platform_device *pdev)
{
struct resource *res_mem;
struct uart_port *port;
struct clk *clk;
int ret;
if (pdev->dev.of_node) {
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0)
pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
}
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
port = &ports[pdev->id];
if (port->membase)
return -EBUSY;
memset(port, 0, sizeof(*port));
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
port->mapbase = res_mem->start;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
port->irq = ret;
clk = clk_get(&pdev->dev, "refclk");
if (IS_ERR(clk) && pdev->dev.of_node)
clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(clk))
return -ENODEV;
port->iotype = UPIO_MEM;
port->ops = &bcm_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
port->line = pdev->id;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_BCM63XX_CONSOLE);
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
ports[pdev->id].membase = NULL;
return ret;
}
platform_set_drvdata(pdev, port);
return 0;
}
static int bcm_uart_remove(struct platform_device *pdev)
{
struct uart_port *port;
port = platform_get_drvdata(pdev);
uart_remove_one_port(&bcm_uart_driver, port);
/* mark port as free */
ports[pdev->id].membase = NULL;
return 0;
}
static const struct of_device_id bcm63xx_of_match[] = {
{ .compatible = "brcm,bcm6345-uart" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bcm63xx_of_match);
/*
* platform driver stuff
*/
static struct platform_driver bcm_uart_platform_driver = {
.probe = bcm_uart_probe,
.remove = bcm_uart_remove,
.driver = {
.name = "bcm63xx_uart",
.of_match_table = bcm63xx_of_match,
},
};
static int __init bcm_uart_init(void)
{
int ret;
ret = uart_register_driver(&bcm_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&bcm_uart_platform_driver);
if (ret)
uart_unregister_driver(&bcm_uart_driver);
return ret;
}
static void __exit bcm_uart_exit(void)
{
platform_driver_unregister(&bcm_uart_platform_driver);
uart_unregister_driver(&bcm_uart_driver);
}
module_init(bcm_uart_init);
module_exit(bcm_uart_exit);
MODULE_AUTHOR("Maxime Bizon <[email protected]>");
MODULE_DESCRIPTION("Broadcom 63xx integrated uart driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/bcm63xx_uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Socionext Inc.
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define USIO_NAME "mlb-usio-uart"
#define USIO_UART_DEV_NAME "ttyUSI"
static struct uart_port mlb_usio_ports[CONFIG_SERIAL_MILBEAUT_USIO_PORTS];
#define RX 0
#define TX 1
static int mlb_usio_irq[CONFIG_SERIAL_MILBEAUT_USIO_PORTS][2];
#define MLB_USIO_REG_SMR 0
#define MLB_USIO_REG_SCR 1
#define MLB_USIO_REG_ESCR 2
#define MLB_USIO_REG_SSR 3
#define MLB_USIO_REG_DR 4
#define MLB_USIO_REG_BGR 6
#define MLB_USIO_REG_FCR 12
#define MLB_USIO_REG_FBYTE 14
#define MLB_USIO_SMR_SOE BIT(0)
#define MLB_USIO_SMR_SBL BIT(3)
#define MLB_USIO_SCR_TXE BIT(0)
#define MLB_USIO_SCR_RXE BIT(1)
#define MLB_USIO_SCR_TBIE BIT(2)
#define MLB_USIO_SCR_TIE BIT(3)
#define MLB_USIO_SCR_RIE BIT(4)
#define MLB_USIO_SCR_UPCL BIT(7)
#define MLB_USIO_ESCR_L_8BIT 0
#define MLB_USIO_ESCR_L_5BIT 1
#define MLB_USIO_ESCR_L_6BIT 2
#define MLB_USIO_ESCR_L_7BIT 3
#define MLB_USIO_ESCR_P BIT(3)
#define MLB_USIO_ESCR_PEN BIT(4)
#define MLB_USIO_ESCR_FLWEN BIT(7)
#define MLB_USIO_SSR_TBI BIT(0)
#define MLB_USIO_SSR_TDRE BIT(1)
#define MLB_USIO_SSR_RDRF BIT(2)
#define MLB_USIO_SSR_ORE BIT(3)
#define MLB_USIO_SSR_FRE BIT(4)
#define MLB_USIO_SSR_PE BIT(5)
#define MLB_USIO_SSR_REC BIT(7)
#define MLB_USIO_SSR_BRK BIT(8)
#define MLB_USIO_FCR_FE1 BIT(0)
#define MLB_USIO_FCR_FE2 BIT(1)
#define MLB_USIO_FCR_FCL1 BIT(2)
#define MLB_USIO_FCR_FCL2 BIT(3)
#define MLB_USIO_FCR_FSET BIT(4)
#define MLB_USIO_FCR_FTIE BIT(9)
#define MLB_USIO_FCR_FDRQ BIT(10)
#define MLB_USIO_FCR_FRIIE BIT(11)
static void mlb_usio_stop_tx(struct uart_port *port)
{
writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE,
port->membase + MLB_USIO_REG_FCR);
writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_TBIE,
port->membase + MLB_USIO_REG_SCR);
}
static void mlb_usio_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
int count;
writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE,
port->membase + MLB_USIO_REG_FCR);
writeb(readb(port->membase + MLB_USIO_REG_SCR) &
~(MLB_USIO_SCR_TIE | MLB_USIO_SCR_TBIE),
port->membase + MLB_USIO_REG_SCR);
if (port->x_char) {
writew(port->x_char, port->membase + MLB_USIO_REG_DR);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
mlb_usio_stop_tx(port);
return;
}
count = port->fifosize -
(readw(port->membase + MLB_USIO_REG_FBYTE) & 0xff);
do {
writew(xmit->buf[xmit->tail], port->membase + MLB_USIO_REG_DR);
uart_xmit_advance(port, 1);
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FDRQ,
port->membase + MLB_USIO_REG_FCR);
writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE,
port->membase + MLB_USIO_REG_SCR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
mlb_usio_stop_tx(port);
}
static void mlb_usio_start_tx(struct uart_port *port)
{
u16 fcr = readw(port->membase + MLB_USIO_REG_FCR);
writew(fcr | MLB_USIO_FCR_FTIE, port->membase + MLB_USIO_REG_FCR);
if (!(fcr & MLB_USIO_FCR_FDRQ))
return;
writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE,
port->membase + MLB_USIO_REG_SCR);
if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
mlb_usio_tx_chars(port);
}
static void mlb_usio_stop_rx(struct uart_port *port)
{
writeb(readb(port->membase + MLB_USIO_REG_SCR) & ~MLB_USIO_SCR_RIE,
port->membase + MLB_USIO_REG_SCR);
}
static void mlb_usio_enable_ms(struct uart_port *port)
{
writeb(readb(port->membase + MLB_USIO_REG_SCR) |
MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE,
port->membase + MLB_USIO_REG_SCR);
}
static void mlb_usio_rx_chars(struct uart_port *port)
{
struct tty_port *ttyport = &port->state->port;
u8 flag = 0, ch = 0;
u8 status;
int max_count = 2;
while (max_count--) {
status = readb(port->membase + MLB_USIO_REG_SSR);
if (!(status & MLB_USIO_SSR_RDRF))
break;
if (!(status & (MLB_USIO_SSR_ORE | MLB_USIO_SSR_FRE |
MLB_USIO_SSR_PE))) {
ch = readw(port->membase + MLB_USIO_REG_DR);
flag = TTY_NORMAL;
port->icount.rx++;
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, status, MLB_USIO_SSR_ORE,
ch, flag);
continue;
}
if (status & MLB_USIO_SSR_PE)
port->icount.parity++;
if (status & MLB_USIO_SSR_ORE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & MLB_USIO_SSR_BRK) {
flag = TTY_BREAK;
ch = 0;
} else
if (status & MLB_USIO_SSR_PE) {
flag = TTY_PARITY;
ch = 0;
} else
if (status & MLB_USIO_SSR_FRE) {
flag = TTY_FRAME;
ch = 0;
}
if (flag)
uart_insert_char(port, status, MLB_USIO_SSR_ORE,
ch, flag);
writeb(readb(port->membase + MLB_USIO_REG_SSR) |
MLB_USIO_SSR_REC,
port->membase + MLB_USIO_REG_SSR);
max_count = readw(port->membase + MLB_USIO_REG_FBYTE) >> 8;
writew(readw(port->membase + MLB_USIO_REG_FCR) |
MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
port->membase + MLB_USIO_REG_FCR);
}
tty_flip_buffer_push(ttyport);
}
static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
spin_lock(&port->lock);
mlb_usio_rx_chars(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
spin_lock(&port->lock);
if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
mlb_usio_tx_chars(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static unsigned int mlb_usio_tx_empty(struct uart_port *port)
{
return (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI) ?
TIOCSER_TEMT : 0;
}
static void mlb_usio_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static unsigned int mlb_usio_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
static void mlb_usio_break_ctl(struct uart_port *port, int break_state)
{
}
static int mlb_usio_startup(struct uart_port *port)
{
const char *portname = to_platform_device(port->dev)->name;
unsigned long flags;
int ret, index = port->line;
unsigned char escr;
ret = request_irq(mlb_usio_irq[index][RX], mlb_usio_rx_irq,
0, portname, port);
if (ret)
return ret;
ret = request_irq(mlb_usio_irq[index][TX], mlb_usio_tx_irq,
0, portname, port);
if (ret) {
free_irq(mlb_usio_irq[index][RX], port);
return ret;
}
escr = readb(port->membase + MLB_USIO_REG_ESCR);
if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
escr |= MLB_USIO_ESCR_FLWEN;
spin_lock_irqsave(&port->lock, flags);
writeb(0, port->membase + MLB_USIO_REG_SCR);
writeb(escr, port->membase + MLB_USIO_REG_ESCR);
writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR);
writew(0, port->membase + MLB_USIO_REG_FCR);
writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2,
port->membase + MLB_USIO_REG_FCR);
writew(MLB_USIO_FCR_FE1 | MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
port->membase + MLB_USIO_REG_FCR);
writew(0, port->membase + MLB_USIO_REG_FBYTE);
writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void mlb_usio_shutdown(struct uart_port *port)
{
int index = port->line;
free_irq(mlb_usio_irq[index][RX], port);
free_irq(mlb_usio_irq[index][TX], port);
}
static void mlb_usio_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int escr, smr = MLB_USIO_SMR_SOE;
unsigned long flags, baud, quot;
switch (termios->c_cflag & CSIZE) {
case CS5:
escr = MLB_USIO_ESCR_L_5BIT;
break;
case CS6:
escr = MLB_USIO_ESCR_L_6BIT;
break;
case CS7:
escr = MLB_USIO_ESCR_L_7BIT;
break;
case CS8:
default:
escr = MLB_USIO_ESCR_L_8BIT;
break;
}
if (termios->c_cflag & CSTOPB)
smr |= MLB_USIO_SMR_SBL;
if (termios->c_cflag & PARENB) {
escr |= MLB_USIO_ESCR_PEN;
if (termios->c_cflag & PARODD)
escr |= MLB_USIO_ESCR_P;
}
/* Set hard flow control */
if (of_property_read_bool(port->dev->of_node, "auto-flow-control") ||
(termios->c_cflag & CRTSCTS))
escr |= MLB_USIO_ESCR_FLWEN;
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
if (baud > 1)
quot = port->uartclk / baud - 1;
else
quot = 0;
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
MLB_USIO_SSR_TDRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE;
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= MLB_USIO_SSR_FRE | MLB_USIO_SSR_PE;
if ((termios->c_iflag & IGNBRK) && (termios->c_iflag & IGNPAR))
port->ignore_status_mask |= MLB_USIO_SSR_ORE;
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= MLB_USIO_SSR_RDRF;
writeb(0, port->membase + MLB_USIO_REG_SCR);
writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
writeb(MLB_USIO_SSR_REC, port->membase + MLB_USIO_REG_SSR);
writew(0, port->membase + MLB_USIO_REG_FCR);
writeb(smr, port->membase + MLB_USIO_REG_SMR);
writeb(escr, port->membase + MLB_USIO_REG_ESCR);
writew(quot, port->membase + MLB_USIO_REG_BGR);
writew(0, port->membase + MLB_USIO_REG_FCR);
writew(MLB_USIO_FCR_FCL1 | MLB_USIO_FCR_FCL2 | MLB_USIO_FCR_FE1 |
MLB_USIO_FCR_FE2 | MLB_USIO_FCR_FRIIE,
port->membase + MLB_USIO_REG_FCR);
writew(0, port->membase + MLB_USIO_REG_FBYTE);
writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *mlb_usio_type(struct uart_port *port)
{
return ((port->type == PORT_MLB_USIO) ? USIO_NAME : NULL);
}
static void mlb_usio_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_MLB_USIO;
}
static const struct uart_ops mlb_usio_ops = {
.tx_empty = mlb_usio_tx_empty,
.set_mctrl = mlb_usio_set_mctrl,
.get_mctrl = mlb_usio_get_mctrl,
.stop_tx = mlb_usio_stop_tx,
.start_tx = mlb_usio_start_tx,
.stop_rx = mlb_usio_stop_rx,
.enable_ms = mlb_usio_enable_ms,
.break_ctl = mlb_usio_break_ctl,
.startup = mlb_usio_startup,
.shutdown = mlb_usio_shutdown,
.set_termios = mlb_usio_set_termios,
.type = mlb_usio_type,
.config_port = mlb_usio_config_port,
};
#ifdef CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE
static void mlb_usio_console_putchar(struct uart_port *port, unsigned char c)
{
while (!(readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TDRE))
cpu_relax();
writew(c, port->membase + MLB_USIO_REG_DR);
}
static void mlb_usio_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &mlb_usio_ports[co->index];
uart_console_write(port, s, count, mlb_usio_console_putchar);
}
static int __init mlb_usio_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 115200;
int parity = 'n';
int flow = 'n';
int bits = 8;
if (co->index >= CONFIG_SERIAL_MILBEAUT_USIO_PORTS)
return -ENODEV;
port = &mlb_usio_ports[co->index];
if (!port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
flow = 'r';
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver mlb_usio_uart_driver;
static struct console mlb_usio_console = {
.name = USIO_UART_DEV_NAME,
.write = mlb_usio_console_write,
.device = uart_console_device,
.setup = mlb_usio_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mlb_usio_uart_driver,
};
static int __init mlb_usio_console_init(void)
{
register_console(&mlb_usio_console);
return 0;
}
console_initcall(mlb_usio_console_init);
static void mlb_usio_early_console_write(struct console *co, const char *s,
u_int count)
{
struct earlycon_device *dev = co->data;
uart_console_write(&dev->port, s, count, mlb_usio_console_putchar);
}
static int __init mlb_usio_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = mlb_usio_early_console_write;
return 0;
}
OF_EARLYCON_DECLARE(mlb_usio, "socionext,milbeaut-usio-uart",
mlb_usio_early_console_setup);
#define USIO_CONSOLE (&mlb_usio_console)
#else
#define USIO_CONSOLE NULL
#endif
static struct uart_driver mlb_usio_uart_driver = {
.owner = THIS_MODULE,
.driver_name = USIO_NAME,
.dev_name = USIO_UART_DEV_NAME,
.cons = USIO_CONSOLE,
.nr = CONFIG_SERIAL_MILBEAUT_USIO_PORTS,
};
static int mlb_usio_probe(struct platform_device *pdev)
{
struct clk *clk = devm_clk_get(&pdev->dev, NULL);
struct uart_port *port;
struct resource *res;
int index = 0;
int ret;
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Missing clock\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev, "Clock enable failed: %d\n", ret);
return ret;
}
of_property_read_u32(pdev->dev.of_node, "index", &index);
port = &mlb_usio_ports[index];
port->private_data = (void *)clk;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Missing regs\n");
ret = -ENODEV;
goto failed;
}
port->membase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
ret = platform_get_irq_byname(pdev, "rx");
mlb_usio_irq[index][RX] = ret;
ret = platform_get_irq_byname(pdev, "tx");
mlb_usio_irq[index][TX] = ret;
port->irq = mlb_usio_irq[index][RX];
port->uartclk = clk_get_rate(clk);
port->fifosize = 128;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE);
port->iotype = UPIO_MEM32;
port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI;
port->line = index;
port->ops = &mlb_usio_ops;
port->dev = &pdev->dev;
ret = uart_add_one_port(&mlb_usio_uart_driver, port);
if (ret) {
dev_err(&pdev->dev, "Adding port failed: %d\n", ret);
goto failed;
}
return 0;
failed:
clk_disable_unprepare(clk);
return ret;
}
static int mlb_usio_remove(struct platform_device *pdev)
{
struct uart_port *port = &mlb_usio_ports[pdev->id];
struct clk *clk = port->private_data;
uart_remove_one_port(&mlb_usio_uart_driver, port);
clk_disable_unprepare(clk);
return 0;
}
static const struct of_device_id mlb_usio_dt_ids[] = {
{ .compatible = "socionext,milbeaut-usio-uart" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mlb_usio_dt_ids);
static struct platform_driver mlb_usio_driver = {
.probe = mlb_usio_probe,
.remove = mlb_usio_remove,
.driver = {
.name = USIO_NAME,
.of_match_table = mlb_usio_dt_ids,
},
};
static int __init mlb_usio_init(void)
{
int ret = uart_register_driver(&mlb_usio_uart_driver);
if (ret) {
pr_err("%s: uart registration failed: %d\n", __func__, ret);
return ret;
}
ret = platform_driver_register(&mlb_usio_driver);
if (ret) {
uart_unregister_driver(&mlb_usio_uart_driver);
pr_err("%s: drv registration failed: %d\n", __func__, ret);
return ret;
}
return 0;
}
static void __exit mlb_usio_exit(void)
{
platform_driver_unregister(&mlb_usio_driver);
uart_unregister_driver(&mlb_usio_uart_driver);
}
module_init(mlb_usio_init);
module_exit(mlb_usio_exit);
MODULE_AUTHOR("SOCIONEXT");
MODULE_DESCRIPTION("MILBEAUT_USIO/UART Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/milbeaut_usio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/console.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define TCU_MBOX_BYTE(i, x) ((x) << (i * 8))
#define TCU_MBOX_BYTE_V(x, i) (((x) >> (i * 8)) & 0xff)
#define TCU_MBOX_NUM_BYTES(x) ((x) << 24)
#define TCU_MBOX_NUM_BYTES_V(x) (((x) >> 24) & 0x3)
struct tegra_tcu {
struct uart_driver driver;
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
struct console console;
#endif
struct uart_port port;
struct mbox_client tx_client, rx_client;
struct mbox_chan *tx, *rx;
};
static unsigned int tegra_tcu_uart_tx_empty(struct uart_port *port)
{
return TIOCSER_TEMT;
}
static void tegra_tcu_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static unsigned int tegra_tcu_uart_get_mctrl(struct uart_port *port)
{
return 0;
}
static void tegra_tcu_uart_stop_tx(struct uart_port *port)
{
}
static void tegra_tcu_write_one(struct tegra_tcu *tcu, u32 value,
unsigned int count)
{
void *msg;
value |= TCU_MBOX_NUM_BYTES(count);
msg = (void *)(unsigned long)value;
mbox_send_message(tcu->tx, msg);
mbox_flush(tcu->tx, 1000);
}
static void tegra_tcu_write(struct tegra_tcu *tcu, const char *s,
unsigned int count)
{
unsigned int written = 0, i = 0;
bool insert_nl = false;
u32 value = 0;
while (i < count) {
if (insert_nl) {
value |= TCU_MBOX_BYTE(written++, '\n');
insert_nl = false;
i++;
} else if (s[i] == '\n') {
value |= TCU_MBOX_BYTE(written++, '\r');
insert_nl = true;
} else {
value |= TCU_MBOX_BYTE(written++, s[i++]);
}
if (written == 3) {
tegra_tcu_write_one(tcu, value, 3);
value = written = 0;
}
}
if (written)
tegra_tcu_write_one(tcu, value, written);
}
static void tegra_tcu_uart_start_tx(struct uart_port *port)
{
struct tegra_tcu *tcu = port->private_data;
struct circ_buf *xmit = &port->state->xmit;
unsigned long count;
for (;;) {
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!count)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
}
static void tegra_tcu_uart_stop_rx(struct uart_port *port)
{
}
static void tegra_tcu_uart_break_ctl(struct uart_port *port, int ctl)
{
}
static int tegra_tcu_uart_startup(struct uart_port *port)
{
return 0;
}
static void tegra_tcu_uart_shutdown(struct uart_port *port)
{
}
static void tegra_tcu_uart_set_termios(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
}
static const struct uart_ops tegra_tcu_uart_ops = {
.tx_empty = tegra_tcu_uart_tx_empty,
.set_mctrl = tegra_tcu_uart_set_mctrl,
.get_mctrl = tegra_tcu_uart_get_mctrl,
.stop_tx = tegra_tcu_uart_stop_tx,
.start_tx = tegra_tcu_uart_start_tx,
.stop_rx = tegra_tcu_uart_stop_rx,
.break_ctl = tegra_tcu_uart_break_ctl,
.startup = tegra_tcu_uart_startup,
.shutdown = tegra_tcu_uart_shutdown,
.set_termios = tegra_tcu_uart_set_termios,
};
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
static void tegra_tcu_console_write(struct console *cons, const char *s,
unsigned int count)
{
struct tegra_tcu *tcu = container_of(cons, struct tegra_tcu, console);
tegra_tcu_write(tcu, s, count);
}
static int tegra_tcu_console_setup(struct console *cons, char *options)
{
return 0;
}
#endif
static void tegra_tcu_receive(struct mbox_client *cl, void *msg)
{
struct tegra_tcu *tcu = container_of(cl, struct tegra_tcu, rx_client);
struct tty_port *port = &tcu->port.state->port;
u32 value = (u32)(unsigned long)msg;
unsigned int num_bytes, i;
num_bytes = TCU_MBOX_NUM_BYTES_V(value);
for (i = 0; i < num_bytes; i++)
tty_insert_flip_char(port, TCU_MBOX_BYTE_V(value, i),
TTY_NORMAL);
tty_flip_buffer_push(port);
}
static int tegra_tcu_probe(struct platform_device *pdev)
{
struct uart_port *port;
struct tegra_tcu *tcu;
int err;
tcu = devm_kzalloc(&pdev->dev, sizeof(*tcu), GFP_KERNEL);
if (!tcu)
return -ENOMEM;
tcu->tx_client.dev = &pdev->dev;
tcu->rx_client.dev = &pdev->dev;
tcu->rx_client.rx_callback = tegra_tcu_receive;
tcu->tx = mbox_request_channel_byname(&tcu->tx_client, "tx");
if (IS_ERR(tcu->tx)) {
err = PTR_ERR(tcu->tx);
dev_err(&pdev->dev, "failed to get tx mailbox: %d\n", err);
return err;
}
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
/* setup the console */
strcpy(tcu->console.name, "ttyTCU");
tcu->console.device = uart_console_device;
tcu->console.flags = CON_PRINTBUFFER | CON_ANYTIME;
tcu->console.index = -1;
tcu->console.write = tegra_tcu_console_write;
tcu->console.setup = tegra_tcu_console_setup;
tcu->console.data = &tcu->driver;
#endif
/* setup the driver */
tcu->driver.owner = THIS_MODULE;
tcu->driver.driver_name = "tegra-tcu";
tcu->driver.dev_name = "ttyTCU";
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
tcu->driver.cons = &tcu->console;
#endif
tcu->driver.nr = 1;
err = uart_register_driver(&tcu->driver);
if (err) {
dev_err(&pdev->dev, "failed to register UART driver: %d\n",
err);
goto free_tx;
}
/* setup the port */
port = &tcu->port;
spin_lock_init(&port->lock);
port->dev = &pdev->dev;
port->type = PORT_TEGRA_TCU;
port->ops = &tegra_tcu_uart_ops;
port->fifosize = 1;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->private_data = tcu;
err = uart_add_one_port(&tcu->driver, port);
if (err) {
dev_err(&pdev->dev, "failed to add UART port: %d\n", err);
goto unregister_uart;
}
/*
* Request RX channel after creating port to ensure tcu->port
* is ready for any immediate incoming bytes.
*/
tcu->rx = mbox_request_channel_byname(&tcu->rx_client, "rx");
if (IS_ERR(tcu->rx)) {
err = PTR_ERR(tcu->rx);
dev_err(&pdev->dev, "failed to get rx mailbox: %d\n", err);
goto remove_uart_port;
}
platform_set_drvdata(pdev, tcu);
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
register_console(&tcu->console);
#endif
return 0;
remove_uart_port:
uart_remove_one_port(&tcu->driver, &tcu->port);
unregister_uart:
uart_unregister_driver(&tcu->driver);
free_tx:
mbox_free_channel(tcu->tx);
return err;
}
static int tegra_tcu_remove(struct platform_device *pdev)
{
struct tegra_tcu *tcu = platform_get_drvdata(pdev);
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
unregister_console(&tcu->console);
#endif
mbox_free_channel(tcu->rx);
uart_remove_one_port(&tcu->driver, &tcu->port);
uart_unregister_driver(&tcu->driver);
mbox_free_channel(tcu->tx);
return 0;
}
static const struct of_device_id tegra_tcu_match[] = {
{ .compatible = "nvidia,tegra194-tcu" },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_tcu_match);
static struct platform_driver tegra_tcu_driver = {
.driver = {
.name = "tegra-tcu",
.of_match_table = tegra_tcu_match,
},
.probe = tegra_tcu_probe,
.remove = tegra_tcu_remove,
};
module_platform_driver(tegra_tcu_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NVIDIA Tegra Combined UART driver");
| linux-master | drivers/tty/serial/tegra-tcu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RISC-V SBI based earlycon
*
* Copyright (C) 2018 Anup Patel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <asm/sbi.h>
static void sbi_putc(struct uart_port *port, unsigned char c)
{
sbi_console_putchar(c);
}
static void sbi_console_write(struct console *con,
const char *s, unsigned n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, sbi_putc);
}
static int __init early_sbi_setup(struct earlycon_device *device,
const char *opt)
{
device->con->write = sbi_console_write;
return 0;
}
EARLYCON_DECLARE(sbi, early_sbi_setup);
| linux-master | drivers/tty/serial/earlycon-riscv-sbi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver core for Samsung SoC onboard UARTs.
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
*/
/* Note on 2410 error handling
*
* The s3c2410 manual has a love/hate affair with the contents of the
* UERSTAT register in the UART blocks, and keeps marking some of the
* error bits as reserved. Having checked with the s3c2410x01,
* it copes with BREAKs properly, so I am happy to ignore the RESERVED
* feature from the latter versions of the manual.
*
* If it becomes aparrent that latter versions of the 2410 remove these
* bits, then action will have to be taken to differentiate the versions
* and change the policy on BREAK
*
* BJD, 04-Nov-2004
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/serial_s3c.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
#include <asm/irq.h>
/* UART name and device definitions */
#define S3C24XX_SERIAL_NAME "ttySAC"
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
#ifdef CONFIG_ARM64
#define UART_NR 12
#else
#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
#endif
#define S3C24XX_TX_PIO 1
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
#define S3C24XX_RX_DMA 2
/* flag to ignore all characters coming in */
#define RXSTAT_DUMMY_READ (0x10000000)
enum s3c24xx_port_type {
TYPE_S3C24XX,
TYPE_S3C6400,
TYPE_APPLE_S5L,
};
struct s3c24xx_uart_info {
const char *name;
enum s3c24xx_port_type type;
unsigned int port_type;
unsigned int fifosize;
unsigned long rx_fifomask;
unsigned long rx_fifoshift;
unsigned long rx_fifofull;
unsigned long tx_fifomask;
unsigned long tx_fifoshift;
unsigned long tx_fifofull;
unsigned int def_clk_sel;
unsigned long num_clks;
unsigned long clksel_mask;
unsigned long clksel_shift;
unsigned long ucon_mask;
/* uart port features */
unsigned int has_divslot:1;
};
struct s3c24xx_serial_drv_data {
const struct s3c24xx_uart_info info;
const struct s3c2410_uartcfg def_cfg;
const unsigned int fifosize[UART_NR];
};
struct s3c24xx_uart_dma {
unsigned int rx_chan_id;
unsigned int tx_chan_id;
struct dma_slave_config rx_conf;
struct dma_slave_config tx_conf;
struct dma_chan *rx_chan;
struct dma_chan *tx_chan;
dma_addr_t rx_addr;
dma_addr_t tx_addr;
dma_cookie_t rx_cookie;
dma_cookie_t tx_cookie;
char *rx_buf;
dma_addr_t tx_transfer_addr;
size_t rx_size;
size_t tx_size;
struct dma_async_tx_descriptor *tx_desc;
struct dma_async_tx_descriptor *rx_desc;
int tx_bytes_requested;
int rx_bytes_requested;
};
struct s3c24xx_uart_port {
unsigned char rx_claimed;
unsigned char tx_claimed;
unsigned char rx_enabled;
unsigned char tx_enabled;
unsigned int pm_level;
unsigned long baudclk_rate;
unsigned int min_dma_size;
unsigned int rx_irq;
unsigned int tx_irq;
unsigned int tx_in_progress;
unsigned int tx_mode;
unsigned int rx_mode;
const struct s3c24xx_uart_info *info;
struct clk *clk;
struct clk *baudclk;
struct uart_port port;
const struct s3c24xx_serial_drv_data *drv_data;
/* reference to platform data */
const struct s3c2410_uartcfg *cfg;
struct s3c24xx_uart_dma *dma;
};
static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport);
/* conversion functions */
#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
/* register access controls */
#define portaddr(port, reg) ((port)->membase + (reg))
#define portaddrl(port, reg) \
((unsigned long *)(unsigned long)((port)->membase + (reg)))
static u32 rd_reg(const struct uart_port *port, u32 reg)
{
switch (port->iotype) {
case UPIO_MEM:
return readb_relaxed(portaddr(port, reg));
case UPIO_MEM32:
return readl_relaxed(portaddr(port, reg));
default:
return 0;
}
return 0;
}
#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
static void wr_reg(const struct uart_port *port, u32 reg, u32 val)
{
switch (port->iotype) {
case UPIO_MEM:
writeb_relaxed(val, portaddr(port, reg));
break;
case UPIO_MEM32:
writel_relaxed(val, portaddr(port, reg));
break;
}
}
#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
/* Byte-order aware bit setting/clearing functions. */
static inline void s3c24xx_set_bit(const struct uart_port *port, int idx,
unsigned int reg)
{
unsigned long flags;
u32 val;
local_irq_save(flags);
val = rd_regl(port, reg);
val |= (1 << idx);
wr_regl(port, reg, val);
local_irq_restore(flags);
}
static inline void s3c24xx_clear_bit(const struct uart_port *port, int idx,
unsigned int reg)
{
unsigned long flags;
u32 val;
local_irq_save(flags);
val = rd_regl(port, reg);
val &= ~(1 << idx);
wr_regl(port, reg, val);
local_irq_restore(flags);
}
static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
{
return container_of(port, struct s3c24xx_uart_port, port);
}
/* translate a port to the device name */
static inline const char *s3c24xx_serial_portname(const struct uart_port *port)
{
return to_platform_device(port->dev)->name;
}
static int s3c24xx_serial_txempty_nofifo(const struct uart_port *port)
{
return rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE;
}
static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon, ufcon;
int count = 10000;
spin_lock_irqsave(&port->lock, flags);
while (--count && !s3c24xx_serial_txempty_nofifo(port))
udelay(100);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
ucon = rd_regl(port, S3C2410_UCON);
ucon |= S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon;
spin_lock_irqsave(&port->lock, flags);
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 0;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_stop_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct s3c24xx_uart_dma *dma = ourport->dma;
struct dma_tx_state state;
int count;
if (!ourport->tx_enabled)
return;
switch (ourport->info->type) {
case TYPE_S3C6400:
s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
break;
case TYPE_APPLE_S5L:
s3c24xx_clear_bit(port, APPLE_S5L_UCON_TXTHRESH_ENA, S3C2410_UCON);
break;
default:
disable_irq_nosync(ourport->tx_irq);
break;
}
if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) {
dmaengine_pause(dma->tx_chan);
dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
dmaengine_terminate_all(dma->tx_chan);
dma_sync_single_for_cpu(dma->tx_chan->device->dev,
dma->tx_transfer_addr, dma->tx_size,
DMA_TO_DEVICE);
async_tx_ack(dma->tx_desc);
count = dma->tx_bytes_requested - state.residue;
uart_xmit_advance(port, count);
}
ourport->tx_enabled = 0;
ourport->tx_in_progress = 0;
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_enable(port);
ourport->tx_mode = 0;
}
static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport);
static void s3c24xx_serial_tx_dma_complete(void *args)
{
struct s3c24xx_uart_port *ourport = args;
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct dma_tx_state state;
unsigned long flags;
int count;
dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
count = dma->tx_bytes_requested - state.residue;
async_tx_ack(dma->tx_desc);
dma_sync_single_for_cpu(dma->tx_chan->device->dev,
dma->tx_transfer_addr, dma->tx_size,
DMA_TO_DEVICE);
spin_lock_irqsave(&port->lock, flags);
uart_xmit_advance(port, count);
ourport->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
s3c24xx_serial_start_next_tx(ourport);
spin_unlock_irqrestore(&port->lock, flags);
}
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
{
const struct uart_port *port = &ourport->port;
u32 ucon;
/* Mask Tx interrupt */
switch (ourport->info->type) {
case TYPE_S3C6400:
s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
break;
case TYPE_APPLE_S5L:
WARN_ON(1); // No DMA
break;
default:
disable_irq_nosync(ourport->tx_irq);
break;
}
/* Enable tx dma mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
ucon |= S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
ourport->tx_mode = S3C24XX_TX_DMA;
}
static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
{
const struct uart_port *port = &ourport->port;
u32 ucon, ufcon;
/* Set ufcon txtrig */
ourport->tx_in_progress = S3C24XX_TX_PIO;
ufcon = rd_regl(port, S3C2410_UFCON);
wr_regl(port, S3C2410_UFCON, ufcon);
/* Enable tx pio mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXMODE_MASK);
ucon |= S3C64XX_UCON_TXMODE_CPU;
wr_regl(port, S3C2410_UCON, ucon);
/* Unmask Tx interrupt */
switch (ourport->info->type) {
case TYPE_S3C6400:
s3c24xx_clear_bit(port, S3C64XX_UINTM_TXD,
S3C64XX_UINTM);
break;
case TYPE_APPLE_S5L:
ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK;
wr_regl(port, S3C2410_UCON, ucon);
break;
default:
enable_irq(ourport->tx_irq);
break;
}
ourport->tx_mode = S3C24XX_TX_PIO;
/*
* The Apple version only has edge triggered TX IRQs, so we need
* to kick off the process by sending some characters here.
*/
if (ourport->info->type == TYPE_APPLE_S5L)
s3c24xx_serial_tx_chars(ourport);
}
static void s3c24xx_serial_start_tx_pio(struct s3c24xx_uart_port *ourport)
{
if (ourport->tx_mode != S3C24XX_TX_PIO)
enable_tx_pio(ourport);
}
static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
unsigned int count)
{
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
struct s3c24xx_uart_dma *dma = ourport->dma;
if (ourport->tx_mode != S3C24XX_TX_DMA)
enable_tx_dma(ourport);
dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
dma_sync_single_for_device(dma->tx_chan->device->dev,
dma->tx_transfer_addr, dma->tx_size,
DMA_TO_DEVICE);
dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
dma->tx_transfer_addr, dma->tx_size,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!dma->tx_desc) {
dev_err(ourport->port.dev, "Unable to get desc for Tx\n");
return -EIO;
}
dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete;
dma->tx_desc->callback_param = ourport;
dma->tx_bytes_requested = dma->tx_size;
ourport->tx_in_progress = S3C24XX_TX_DMA;
dma->tx_cookie = dmaengine_submit(dma->tx_desc);
dma_async_issue_pending(dma->tx_chan);
return 0;
}
static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long count;
/* Get data size up to the end of buffer */
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!count) {
s3c24xx_serial_stop_tx(port);
return;
}
if (!ourport->dma || !ourport->dma->tx_chan ||
count < ourport->min_dma_size ||
xmit->tail & (dma_get_cache_alignment() - 1))
s3c24xx_serial_start_tx_pio(ourport);
else
s3c24xx_serial_start_tx_dma(ourport, count);
}
static void s3c24xx_serial_start_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct circ_buf *xmit = &port->state->xmit;
if (!ourport->tx_enabled) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
ourport->tx_enabled = 1;
if (!ourport->dma || !ourport->dma->tx_chan)
s3c24xx_serial_start_tx_pio(ourport);
}
if (ourport->dma && ourport->dma->tx_chan) {
if (!uart_circ_empty(xmit) && !ourport->tx_in_progress)
s3c24xx_serial_start_next_tx(ourport);
}
}
static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,
struct tty_port *tty, int count)
{
struct s3c24xx_uart_dma *dma = ourport->dma;
int copied;
if (!count)
return;
dma_sync_single_for_cpu(dma->rx_chan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
ourport->port.icount.rx += count;
if (!tty) {
dev_err(ourport->port.dev, "No tty port\n");
return;
}
copied = tty_insert_flip_string(tty,
((unsigned char *)(ourport->dma->rx_buf)), count);
if (copied != count) {
WARN_ON(1);
dev_err(ourport->port.dev, "RxData copy to tty layer failed\n");
}
}
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_port *t = &port->state->port;
struct dma_tx_state state;
enum dma_status dma_status;
unsigned int received;
if (ourport->rx_enabled) {
dev_dbg(port->dev, "stopping rx\n");
switch (ourport->info->type) {
case TYPE_S3C6400:
s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
S3C64XX_UINTM);
break;
case TYPE_APPLE_S5L:
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
break;
default:
disable_irq_nosync(ourport->rx_irq);
break;
}
ourport->rx_enabled = 0;
}
if (dma && dma->rx_chan) {
dmaengine_pause(dma->tx_chan);
dma_status = dmaengine_tx_status(dma->rx_chan,
dma->rx_cookie, &state);
if (dma_status == DMA_IN_PROGRESS ||
dma_status == DMA_PAUSED) {
received = dma->rx_bytes_requested - state.residue;
dmaengine_terminate_all(dma->rx_chan);
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
}
}
}
static inline const struct s3c24xx_uart_info
*s3c24xx_port_to_info(struct uart_port *port)
{
return to_ourport(port)->info;
}
static inline const struct s3c2410_uartcfg
*s3c24xx_port_to_cfg(const struct uart_port *port)
{
const struct s3c24xx_uart_port *ourport;
if (port->dev == NULL)
return NULL;
ourport = container_of(port, struct s3c24xx_uart_port, port);
return ourport->cfg;
}
static int s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport,
unsigned long ufstat)
{
const struct s3c24xx_uart_info *info = ourport->info;
if (ufstat & info->rx_fifofull)
return ourport->port.fifosize;
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport);
static void s3c24xx_serial_rx_dma_complete(void *args)
{
struct s3c24xx_uart_port *ourport = args;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_port *t = &port->state->port;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
struct dma_tx_state state;
unsigned long flags;
int received;
dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
received = dma->rx_bytes_requested - state.residue;
async_tx_ack(dma->rx_desc);
spin_lock_irqsave(&port->lock, flags);
if (received)
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
if (tty) {
tty_flip_buffer_push(t);
tty_kref_put(tty);
}
s3c64xx_start_rx_dma(ourport);
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
{
struct s3c24xx_uart_dma *dma = ourport->dma;
dma_sync_single_for_device(dma->rx_chan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!dma->rx_desc) {
dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
return;
}
dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
dma->rx_desc->callback_param = ourport;
dma->rx_bytes_requested = dma->rx_size;
dma->rx_cookie = dmaengine_submit(dma->rx_desc);
dma_async_issue_pending(dma->rx_chan);
}
/* ? - where has parity gone?? */
#define S3C2410_UERSTAT_PARITY (0x1000)
static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
unsigned int ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_RXBURST_MASK |
S3C64XX_UCON_TIMEOUT_MASK |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_MASK);
ucon |= S3C64XX_UCON_RXBURST_1 |
0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_mode = S3C24XX_RX_DMA;
}
static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
unsigned int ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~S3C64XX_UCON_RXMODE_MASK;
ucon |= S3C64XX_UCON_RXMODE_CPU;
/* Apple types use these bits for IRQ masks */
if (ourport->info->type != TYPE_APPLE_S5L) {
ucon &= ~(S3C64XX_UCON_TIMEOUT_MASK |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN);
ucon |= 0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_TIMEOUT_EN;
}
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_mode = S3C24XX_RX_PIO;
}
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
unsigned int utrstat, received;
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
struct tty_port *t = &port->state->port;
struct dma_tx_state state;
utrstat = rd_regl(port, S3C2410_UTRSTAT);
rd_regl(port, S3C2410_UFSTAT);
spin_lock(&port->lock);
if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
s3c64xx_start_rx_dma(ourport);
if (ourport->rx_mode == S3C24XX_RX_PIO)
enable_rx_dma(ourport);
goto finish;
}
if (ourport->rx_mode == S3C24XX_RX_DMA) {
dmaengine_pause(dma->rx_chan);
dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
dmaengine_terminate_all(dma->rx_chan);
received = dma->rx_bytes_requested - state.residue;
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
enable_rx_pio(ourport);
}
s3c24xx_serial_rx_drain_fifo(ourport);
if (tty) {
tty_flip_buffer_push(t);
tty_kref_put(tty);
}
wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
finish:
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
unsigned int ufcon, ufstat, uerstat;
unsigned int fifocnt = 0;
int max_count = port->fifosize;
u8 ch, flag;
while (max_count-- > 0) {
/*
* Receive all characters known to be in FIFO
* before reading FIFO level again
*/
if (fifocnt == 0) {
ufstat = rd_regl(port, S3C2410_UFSTAT);
fifocnt = s3c24xx_serial_rx_fifocnt(ourport, ufstat);
if (fifocnt == 0)
break;
}
fifocnt--;
uerstat = rd_regl(port, S3C2410_UERSTAT);
ch = rd_reg(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
if (ourport->rx_enabled) {
if (!txe) {
ourport->rx_enabled = 0;
continue;
}
} else {
if (txe) {
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
ourport->rx_enabled = 1;
return;
}
continue;
}
}
/* insert the character into the buffer */
flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
dev_dbg(port->dev,
"rxerr: port ch=0x%02x, rxs=0x%08x\n",
ch, uerstat);
/* check for break */
if (uerstat & S3C2410_UERSTAT_BREAK) {
dev_dbg(port->dev, "break!\n");
port->icount.brk++;
if (uart_handle_break(port))
continue; /* Ignore character */
}
if (uerstat & S3C2410_UERSTAT_FRAME)
port->icount.frame++;
if (uerstat & S3C2410_UERSTAT_OVERRUN)
port->icount.overrun++;
uerstat &= port->read_status_mask;
if (uerstat & S3C2410_UERSTAT_BREAK)
flag = TTY_BREAK;
else if (uerstat & S3C2410_UERSTAT_PARITY)
flag = TTY_PARITY;
else if (uerstat & (S3C2410_UERSTAT_FRAME |
S3C2410_UERSTAT_OVERRUN))
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue; /* Ignore character */
uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
ch, flag);
}
tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
spin_lock(&port->lock);
s3c24xx_serial_rx_drain_fifo(ourport);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
if (ourport->dma && ourport->dma->rx_chan)
return s3c24xx_serial_rx_chars_dma(dev_id);
return s3c24xx_serial_rx_chars_pio(dev_id);
}
static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
int count, dma_count = 0;
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (ourport->dma && ourport->dma->tx_chan &&
count >= ourport->min_dma_size) {
int align = dma_get_cache_alignment() -
(xmit->tail & (dma_get_cache_alignment() - 1));
if (count - align >= ourport->min_dma_size) {
dma_count = count - align;
count = align;
}
}
if (port->x_char) {
wr_reg(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
/* if there isn't anything more to transmit, or the uart is now
* stopped, disable the uart and exit
*/
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
s3c24xx_serial_stop_tx(port);
return;
}
/* try and drain the buffer... */
if (count > port->fifosize) {
count = port->fifosize;
dma_count = 0;
}
while (!uart_circ_empty(xmit) && count > 0) {
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
uart_xmit_advance(port, 1);
count--;
}
if (!count && dma_count) {
s3c24xx_serial_start_tx_dma(ourport, dma_count);
return;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
s3c24xx_serial_stop_tx(port);
}
static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
{
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
spin_lock(&port->lock);
s3c24xx_serial_tx_chars(ourport);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
/* interrupt handler for s3c64xx and later SoC's.*/
static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
unsigned int pend = rd_regl(port, S3C64XX_UINTP);
irqreturn_t ret = IRQ_HANDLED;
if (pend & S3C64XX_UINTM_RXD_MSK) {
ret = s3c24xx_serial_rx_irq(irq, id);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK);
}
if (pend & S3C64XX_UINTM_TXD_MSK) {
ret = s3c24xx_serial_tx_irq(irq, id);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK);
}
return ret;
}
/* interrupt handler for Apple SoC's.*/
static irqreturn_t apple_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
unsigned int pend = rd_regl(port, S3C2410_UTRSTAT);
irqreturn_t ret = IRQ_NONE;
if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) {
wr_regl(port, S3C2410_UTRSTAT,
APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO);
ret = s3c24xx_serial_rx_irq(irq, id);
}
if (pend & APPLE_S5L_UTRSTAT_TXTHRESH) {
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_TXTHRESH);
ret = s3c24xx_serial_tx_irq(irq, id);
}
return ret;
}
static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
if (ufcon & S3C2410_UFCON_FIFOMODE) {
if ((ufstat & info->tx_fifomask) != 0 ||
(ufstat & info->tx_fifofull))
return 0;
return 1;
}
return s3c24xx_serial_txempty_nofifo(port);
}
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
unsigned int umstat = rd_reg(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
else
return TIOCM_CAR | TIOCM_DSR;
}
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int umcon = rd_regl(port, S3C2410_UMCON);
unsigned int ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
else
umcon &= ~S3C2410_UMCOM_RTS_LOW;
wr_regl(port, S3C2410_UMCON, umcon);
if (mctrl & TIOCM_LOOP)
ucon |= S3C2410_UCON_LOOPBACK;
else
ucon &= ~S3C2410_UCON_LOOPBACK;
wr_regl(port, S3C2410_UCON, ucon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
unsigned int ucon;
spin_lock_irqsave(&port->lock, flags);
ucon = rd_regl(port, S3C2410_UCON);
if (break_state)
ucon |= S3C2410_UCON_SBREAK;
else
ucon &= ~S3C2410_UCON_SBREAK;
wr_regl(port, S3C2410_UCON, ucon);
spin_unlock_irqrestore(&port->lock, flags);
}
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
{
struct s3c24xx_uart_dma *dma = p->dma;
struct dma_slave_caps dma_caps;
const char *reason = NULL;
int ret;
/* Default slave configuration parameters */
dma->rx_conf.direction = DMA_DEV_TO_MEM;
dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
dma->rx_conf.src_maxburst = 1;
dma->tx_conf.direction = DMA_MEM_TO_DEV;
dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
dma->tx_conf.dst_maxburst = 1;
dma->rx_chan = dma_request_chan(p->port.dev, "rx");
if (IS_ERR(dma->rx_chan)) {
reason = "DMA RX channel request failed";
ret = PTR_ERR(dma->rx_chan);
goto err_warn;
}
ret = dma_get_slave_caps(dma->rx_chan, &dma_caps);
if (ret < 0 ||
dma_caps.residue_granularity < DMA_RESIDUE_GRANULARITY_BURST) {
reason = "insufficient DMA RX engine capabilities";
ret = -EOPNOTSUPP;
goto err_release_rx;
}
dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);
dma->tx_chan = dma_request_chan(p->port.dev, "tx");
if (IS_ERR(dma->tx_chan)) {
reason = "DMA TX channel request failed";
ret = PTR_ERR(dma->tx_chan);
goto err_release_rx;
}
ret = dma_get_slave_caps(dma->tx_chan, &dma_caps);
if (ret < 0 ||
dma_caps.residue_granularity < DMA_RESIDUE_GRANULARITY_BURST) {
reason = "insufficient DMA TX engine capabilities";
ret = -EOPNOTSUPP;
goto err_release_tx;
}
dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
/* RX buffer */
dma->rx_size = PAGE_SIZE;
dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
if (!dma->rx_buf) {
ret = -ENOMEM;
goto err_release_tx;
}
dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
dma->rx_size, DMA_FROM_DEVICE);
if (dma_mapping_error(dma->rx_chan->device->dev, dma->rx_addr)) {
reason = "DMA mapping error for RX buffer";
ret = -EIO;
goto err_free_rx;
}
/* TX buffer */
dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
p->port.state->xmit.buf, UART_XMIT_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma->tx_chan->device->dev, dma->tx_addr)) {
reason = "DMA mapping error for TX buffer";
ret = -EIO;
goto err_unmap_rx;
}
return 0;
err_unmap_rx:
dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
err_free_rx:
kfree(dma->rx_buf);
err_release_tx:
dma_release_channel(dma->tx_chan);
err_release_rx:
dma_release_channel(dma->rx_chan);
err_warn:
if (reason)
dev_warn(p->port.dev, "%s, DMA will not be used\n", reason);
return ret;
}
static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
{
struct s3c24xx_uart_dma *dma = p->dma;
if (dma->rx_chan) {
dmaengine_terminate_all(dma->rx_chan);
dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
kfree(dma->rx_buf);
dma_release_channel(dma->rx_chan);
dma->rx_chan = NULL;
}
if (dma->tx_chan) {
dmaengine_terminate_all(dma->tx_chan);
dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_release_channel(dma->tx_chan);
dma->tx_chan = NULL;
}
}
static void s3c24xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (ourport->tx_claimed) {
free_irq(ourport->tx_irq, ourport);
ourport->tx_enabled = 0;
ourport->tx_claimed = 0;
ourport->tx_mode = 0;
}
if (ourport->rx_claimed) {
free_irq(ourport->rx_irq, ourport);
ourport->rx_claimed = 0;
ourport->rx_enabled = 0;
}
if (ourport->dma)
s3c24xx_serial_release_dma(ourport);
ourport->tx_in_progress = 0;
}
static void s3c64xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
ourport->tx_enabled = 0;
ourport->tx_mode = 0;
ourport->rx_enabled = 0;
free_irq(port->irq, ourport);
wr_regl(port, S3C64XX_UINTP, 0xf);
wr_regl(port, S3C64XX_UINTM, 0xf);
if (ourport->dma)
s3c24xx_serial_release_dma(ourport);
ourport->tx_in_progress = 0;
}
static void apple_s5l_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned int ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTO_ENA_MSK);
wr_regl(port, S3C2410_UCON, ucon);
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
free_irq(port->irq, ourport);
ourport->tx_enabled = 0;
ourport->tx_mode = 0;
ourport->rx_enabled = 0;
if (ourport->dma)
s3c24xx_serial_release_dma(ourport);
ourport->tx_in_progress = 0;
}
static int s3c24xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
int ret;
ourport->rx_enabled = 1;
ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_irq, 0,
s3c24xx_serial_portname(port), ourport);
if (ret != 0) {
dev_err(port->dev, "cannot get irq %d\n", ourport->rx_irq);
return ret;
}
ourport->rx_claimed = 1;
dev_dbg(port->dev, "requesting tx irq...\n");
ourport->tx_enabled = 1;
ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_irq, 0,
s3c24xx_serial_portname(port), ourport);
if (ret) {
dev_err(port->dev, "cannot get irq %d\n", ourport->tx_irq);
goto err;
}
ourport->tx_claimed = 1;
/* the port reset code should have done the correct
* register setup for the port controls
*/
return ret;
err:
s3c24xx_serial_shutdown(port);
return ret;
}
static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ufcon;
int ret;
wr_regl(port, S3C64XX_UINTM, 0xf);
if (ourport->dma) {
ret = s3c24xx_serial_request_dma(ourport);
if (ret < 0) {
devm_kfree(port->dev, ourport->dma);
ourport->dma = NULL;
}
}
ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED,
s3c24xx_serial_portname(port), ourport);
if (ret) {
dev_err(port->dev, "cannot get irq %d\n", port->irq);
return ret;
}
/* For compatibility with s3c24xx Soc's */
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
spin_lock_irqsave(&port->lock, flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
if (!uart_console(port))
ufcon |= S3C2410_UFCON_RESETTX;
wr_regl(port, S3C2410_UFCON, ufcon);
enable_rx_pio(ourport);
spin_unlock_irqrestore(&port->lock, flags);
/* Enable Rx Interrupt */
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
return ret;
}
static int apple_s5l_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ufcon;
int ret;
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
ret = request_irq(port->irq, apple_serial_handle_irq, 0,
s3c24xx_serial_portname(port), ourport);
if (ret) {
dev_err(port->dev, "cannot get irq %d\n", port->irq);
return ret;
}
/* For compatibility with s3c24xx Soc's */
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
spin_lock_irqsave(&port->lock, flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
if (!uart_console(port))
ufcon |= S3C2410_UFCON_RESETTX;
wr_regl(port, S3C2410_UFCON, ufcon);
enable_rx_pio(ourport);
spin_unlock_irqrestore(&port->lock, flags);
/* Enable Rx Interrupt */
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
return ret;
}
/* power power management control */
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
int timeout = 10000;
ourport->pm_level = level;
switch (level) {
case 3:
while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
udelay(100);
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
clk_disable_unprepare(ourport->clk);
break;
case 0:
clk_prepare_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk))
clk_prepare_enable(ourport->baudclk);
break;
default:
dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level);
}
}
/* baud rate calculation
*
* The UARTs on the S3C2410/S3C2440 can take their clocks from a number
* of different sources, including the peripheral clock ("pclk") and an
* external clock ("uclk"). The S3C2440 also adds the core clock ("fclk")
* with a programmable extra divisor.
*
* The following code goes through the clock sources, and calculates the
* baud clocks (and the resultant actual baud rates) and then tries to
* pick the closest one and select that.
*
*/
#define MAX_CLK_NAME_LENGTH 15
static inline int s3c24xx_serial_getsource(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned int ucon;
if (info->num_clks == 1)
return 0;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= info->clksel_mask;
return ucon >> info->clksel_shift;
}
static void s3c24xx_serial_setsource(struct uart_port *port,
unsigned int clk_sel)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned int ucon;
if (info->num_clks == 1)
return;
ucon = rd_regl(port, S3C2410_UCON);
if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel)
return;
ucon &= ~info->clksel_mask;
ucon |= clk_sel << info->clksel_shift;
wr_regl(port, S3C2410_UCON, ucon);
}
static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
unsigned int req_baud, struct clk **best_clk,
unsigned int *clk_num)
{
const struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
unsigned int cnt, baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
for (cnt = 0; cnt < info->num_clks; cnt++) {
/* Keep selected clock if provided */
if (ourport->cfg->clk_sel &&
!(ourport->cfg->clk_sel & (1 << cnt)))
continue;
sprintf(clkname, "clk_uart_baud%d", cnt);
clk = clk_get(ourport->port.dev, clkname);
if (IS_ERR(clk))
continue;
rate = clk_get_rate(clk);
if (!rate) {
dev_err(ourport->port.dev,
"Failed to get clock rate for %s.\n", clkname);
clk_put(clk);
continue;
}
if (ourport->info->has_divslot) {
unsigned long div = rate / req_baud;
/* The UDIVSLOT register on the newer UARTs allows us to
* get a divisor adjustment of 1/16th on the baud clock.
*
* We don't keep the UDIVSLOT value (the 16ths we
* calculated by not multiplying the baud by 16) as it
* is easy enough to recalculate.
*/
quot = div / 16;
baud = rate / div;
} else {
quot = (rate + (8 * req_baud)) / (16 * req_baud);
baud = rate / (quot * 16);
}
quot--;
calc_deviation = abs(req_baud - baud);
if (calc_deviation < deviation) {
/*
* If we find a better clk, release the previous one, if
* any.
*/
if (!IS_ERR(*best_clk))
clk_put(*best_clk);
*best_clk = clk;
best_quot = quot;
*clk_num = cnt;
deviation = calc_deviation;
} else {
clk_put(clk);
}
}
return best_quot;
}
/* udivslot_table[]
*
* This table takes the fractional value of the baud divisor and gives
* the recommended setting for the UDIVSLOT register.
*/
static const u16 udivslot_table[16] = {
[0] = 0x0000,
[1] = 0x0080,
[2] = 0x0808,
[3] = 0x0888,
[4] = 0x2222,
[5] = 0x4924,
[6] = 0x4A52,
[7] = 0x54AA,
[8] = 0x5555,
[9] = 0xD555,
[10] = 0xD5D5,
[11] = 0xDDD5,
[12] = 0xDDDD,
[13] = 0xDFDD,
[14] = 0xDFDF,
[15] = 0xFFDF,
};
static void s3c24xx_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
const struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct clk *clk = ERR_PTR(-EINVAL);
unsigned long flags;
unsigned int baud, quot, clk_sel = 0;
unsigned int ulcon;
unsigned int umcon;
unsigned int udivslot = 0;
/*
* We don't support modem control lines.
*/
termios->c_cflag &= ~(HUPCL | CMSPAR);
termios->c_cflag |= CLOCAL;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, 3000000);
quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel);
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
if (IS_ERR(clk))
return;
/* check to see if we need to change clock source */
if (ourport->baudclk != clk) {
clk_prepare_enable(clk);
s3c24xx_serial_setsource(port, clk_sel);
if (!IS_ERR(ourport->baudclk)) {
clk_disable_unprepare(ourport->baudclk);
ourport->baudclk = ERR_PTR(-EINVAL);
}
ourport->baudclk = clk;
ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
}
if (ourport->info->has_divslot) {
unsigned int div = ourport->baudclk_rate / baud;
if (cfg->has_fracval) {
udivslot = (div & 15);
dev_dbg(port->dev, "fracval = %04x\n", udivslot);
} else {
udivslot = udivslot_table[div & 15];
dev_dbg(port->dev, "udivslot = %04x (div %d)\n",
udivslot, div & 15);
}
}
switch (termios->c_cflag & CSIZE) {
case CS5:
dev_dbg(port->dev, "config: 5bits/char\n");
ulcon = S3C2410_LCON_CS5;
break;
case CS6:
dev_dbg(port->dev, "config: 6bits/char\n");
ulcon = S3C2410_LCON_CS6;
break;
case CS7:
dev_dbg(port->dev, "config: 7bits/char\n");
ulcon = S3C2410_LCON_CS7;
break;
case CS8:
default:
dev_dbg(port->dev, "config: 8bits/char\n");
ulcon = S3C2410_LCON_CS8;
break;
}
/* preserve original lcon IR settings */
ulcon |= (cfg->ulcon & S3C2410_LCON_IRM);
if (termios->c_cflag & CSTOPB)
ulcon |= S3C2410_LCON_STOPB;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
ulcon |= S3C2410_LCON_PODD;
else
ulcon |= S3C2410_LCON_PEVEN;
} else {
ulcon |= S3C2410_LCON_PNONE;
}
spin_lock_irqsave(&port->lock, flags);
dev_dbg(port->dev,
"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
ulcon, quot, udivslot);
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
port->status &= ~UPSTAT_AUTOCTS;
umcon = rd_regl(port, S3C2410_UMCON);
if (termios->c_cflag & CRTSCTS) {
umcon |= S3C2410_UMCOM_AFC;
/* Disable RTS when RX FIFO contains 63 bytes */
umcon &= ~S3C2412_UMCON_AFC_8;
port->status = UPSTAT_AUTOCTS;
} else {
umcon &= ~S3C2410_UMCOM_AFC;
}
wr_regl(port, S3C2410_UMCON, umcon);
if (ourport->info->has_divslot)
wr_regl(port, S3C2443_DIVSLOT, udivslot);
dev_dbg(port->dev,
"uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
rd_regl(port, S3C2410_ULCON),
rd_regl(port, S3C2410_UCON),
rd_regl(port, S3C2410_UFCON));
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* Which character status flags are we interested in?
*/
port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
if (termios->c_iflag & INPCK)
port->read_status_mask |= S3C2410_UERSTAT_FRAME |
S3C2410_UERSTAT_PARITY;
/*
* Which character status flags should we ignore?
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN;
if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
port->ignore_status_mask |= S3C2410_UERSTAT_FRAME;
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= RXSTAT_DUMMY_READ;
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *s3c24xx_serial_type(struct uart_port *port)
{
const struct s3c24xx_uart_port *ourport = to_ourport(port);
switch (ourport->info->type) {
case TYPE_S3C24XX:
return "S3C24XX";
case TYPE_S3C6400:
return "S3C6400/10";
case TYPE_APPLE_S5L:
return "APPLE S5L";
default:
return NULL;
}
}
static void s3c24xx_serial_config_port(struct uart_port *port, int flags)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
if (flags & UART_CONFIG_TYPE)
port->type = info->port_type;
}
/*
* verify the new serial_struct (for TIOCSSERIAL).
*/
static int
s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
if (ser->type != PORT_UNKNOWN && ser->type != info->port_type)
return -EINVAL;
return 0;
}
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
static struct console s3c24xx_serial_console;
static void __init s3c24xx_serial_register_console(void)
{
register_console(&s3c24xx_serial_console);
}
static void s3c24xx_serial_unregister_console(void)
{
if (console_is_registered(&s3c24xx_serial_console))
unregister_console(&s3c24xx_serial_console);
}
#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
#else
static inline void s3c24xx_serial_register_console(void) { }
static inline void s3c24xx_serial_unregister_console(void) { }
#define S3C24XX_SERIAL_CONSOLE NULL
#endif
#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL)
static int s3c24xx_serial_get_poll_char(struct uart_port *port);
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c);
#endif
static const struct uart_ops s3c24xx_serial_ops = {
.pm = s3c24xx_serial_pm,
.tx_empty = s3c24xx_serial_tx_empty,
.get_mctrl = s3c24xx_serial_get_mctrl,
.set_mctrl = s3c24xx_serial_set_mctrl,
.stop_tx = s3c24xx_serial_stop_tx,
.start_tx = s3c24xx_serial_start_tx,
.stop_rx = s3c24xx_serial_stop_rx,
.break_ctl = s3c24xx_serial_break_ctl,
.startup = s3c24xx_serial_startup,
.shutdown = s3c24xx_serial_shutdown,
.set_termios = s3c24xx_serial_set_termios,
.type = s3c24xx_serial_type,
.config_port = s3c24xx_serial_config_port,
.verify_port = s3c24xx_serial_verify_port,
#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL)
.poll_get_char = s3c24xx_serial_get_poll_char,
.poll_put_char = s3c24xx_serial_put_poll_char,
#endif
};
static const struct uart_ops s3c64xx_serial_ops = {
.pm = s3c24xx_serial_pm,
.tx_empty = s3c24xx_serial_tx_empty,
.get_mctrl = s3c24xx_serial_get_mctrl,
.set_mctrl = s3c24xx_serial_set_mctrl,
.stop_tx = s3c24xx_serial_stop_tx,
.start_tx = s3c24xx_serial_start_tx,
.stop_rx = s3c24xx_serial_stop_rx,
.break_ctl = s3c24xx_serial_break_ctl,
.startup = s3c64xx_serial_startup,
.shutdown = s3c64xx_serial_shutdown,
.set_termios = s3c24xx_serial_set_termios,
.type = s3c24xx_serial_type,
.config_port = s3c24xx_serial_config_port,
.verify_port = s3c24xx_serial_verify_port,
#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL)
.poll_get_char = s3c24xx_serial_get_poll_char,
.poll_put_char = s3c24xx_serial_put_poll_char,
#endif
};
static const struct uart_ops apple_s5l_serial_ops = {
.pm = s3c24xx_serial_pm,
.tx_empty = s3c24xx_serial_tx_empty,
.get_mctrl = s3c24xx_serial_get_mctrl,
.set_mctrl = s3c24xx_serial_set_mctrl,
.stop_tx = s3c24xx_serial_stop_tx,
.start_tx = s3c24xx_serial_start_tx,
.stop_rx = s3c24xx_serial_stop_rx,
.break_ctl = s3c24xx_serial_break_ctl,
.startup = apple_s5l_serial_startup,
.shutdown = apple_s5l_serial_shutdown,
.set_termios = s3c24xx_serial_set_termios,
.type = s3c24xx_serial_type,
.config_port = s3c24xx_serial_config_port,
.verify_port = s3c24xx_serial_verify_port,
#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_CONSOLE_POLL)
.poll_get_char = s3c24xx_serial_get_poll_char,
.poll_put_char = s3c24xx_serial_put_poll_char,
#endif
};
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
.nr = UART_NR,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
static void s3c24xx_serial_init_port_default(int index) {
struct uart_port *port = &s3c24xx_serial_ports[index].port;
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
port->uartclk = 0;
port->fifosize = 16;
port->ops = &s3c24xx_serial_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = index;
}
/* s3c24xx_serial_resetport
*
* reset the fifos and other the settings.
*/
static void s3c24xx_serial_resetport(struct uart_port *port,
const struct s3c2410_uartcfg *cfg)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned long ucon = rd_regl(port, S3C2410_UCON);
ucon &= (info->clksel_mask | info->ucon_mask);
wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
/* reset both fifos */
wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
wr_regl(port, S3C2410_UFCON, cfg->ufcon);
/* some delay is required after fifo reset */
udelay(1);
}
static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport)
{
struct device *dev = ourport->port.dev;
const struct s3c24xx_uart_info *info = ourport->info;
char clk_name[MAX_CLK_NAME_LENGTH];
unsigned int clk_sel;
struct clk *clk;
int clk_num;
int ret;
clk_sel = ourport->cfg->clk_sel ? : info->def_clk_sel;
for (clk_num = 0; clk_num < info->num_clks; clk_num++) {
if (!(clk_sel & (1 << clk_num)))
continue;
sprintf(clk_name, "clk_uart_baud%d", clk_num);
clk = clk_get(dev, clk_name);
if (IS_ERR(clk))
continue;
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
continue;
}
ourport->baudclk = clk;
ourport->baudclk_rate = clk_get_rate(clk);
s3c24xx_serial_setsource(&ourport->port, clk_num);
return 0;
}
return -EINVAL;
}
/* s3c24xx_serial_init_port
*
* initialise a single serial port from the platform device given
*/
static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
struct platform_device *platdev)
{
struct uart_port *port = &ourport->port;
const struct s3c2410_uartcfg *cfg = ourport->cfg;
struct resource *res;
int ret;
if (platdev == NULL)
return -ENODEV;
if (port->mapbase != 0)
return -EINVAL;
/* setup info for port */
port->dev = &platdev->dev;
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
dev_dbg(port->dev, "enabling flow control\n");
port->flags |= UPF_CONS_FLOW;
}
/* sort our the physical and virtual addresses for each UART */
res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(port->dev, "failed to find memory resource for uart\n");
return -EINVAL;
}
dev_dbg(port->dev, "resource %pR)\n", res);
port->membase = devm_ioremap_resource(port->dev, res);
if (IS_ERR(port->membase)) {
dev_err(port->dev, "failed to remap controller address\n");
return -EBUSY;
}
port->mapbase = res->start;
ret = platform_get_irq(platdev, 0);
if (ret < 0) {
port->irq = 0;
} else {
port->irq = ret;
ourport->rx_irq = ret;
ourport->tx_irq = ret + 1;
}
switch (ourport->info->type) {
case TYPE_S3C24XX:
ret = platform_get_irq(platdev, 1);
if (ret > 0)
ourport->tx_irq = ret;
break;
default:
break;
}
/*
* DMA is currently supported only on DT platforms, if DMA properties
* are specified.
*/
if (platdev->dev.of_node && of_find_property(platdev->dev.of_node,
"dmas", NULL)) {
ourport->dma = devm_kzalloc(port->dev,
sizeof(*ourport->dma),
GFP_KERNEL);
if (!ourport->dma) {
ret = -ENOMEM;
goto err;
}
}
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
pr_err("%s: Controller clock not found\n",
dev_name(&platdev->dev));
ret = PTR_ERR(ourport->clk);
goto err;
}
ret = clk_prepare_enable(ourport->clk);
if (ret) {
pr_err("uart: clock failed to prepare+enable: %d\n", ret);
clk_put(ourport->clk);
goto err;
}
ret = s3c24xx_serial_enable_baudclk(ourport);
if (ret)
pr_warn("uart: failed to enable baudclk\n");
/* Keep all interrupts masked and cleared */
switch (ourport->info->type) {
case TYPE_S3C6400:
wr_regl(port, S3C64XX_UINTM, 0xf);
wr_regl(port, S3C64XX_UINTP, 0xf);
wr_regl(port, S3C64XX_UINTSP, 0xf);
break;
case TYPE_APPLE_S5L: {
unsigned int ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTO_ENA_MSK);
wr_regl(port, S3C2410_UCON, ucon);
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
break;
}
default:
break;
}
dev_dbg(port->dev, "port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
&port->mapbase, port->membase, port->irq,
ourport->rx_irq, ourport->tx_irq, port->uartclk);
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
return 0;
err:
port->mapbase = 0;
return ret;
}
/* Device driver serial port probe */
static int probe_index;
static inline const struct s3c24xx_serial_drv_data *
s3c24xx_get_driver_data(struct platform_device *pdev)
{
if (dev_of_node(&pdev->dev))
return of_device_get_match_data(&pdev->dev);
return (struct s3c24xx_serial_drv_data *)
platform_get_device_id(pdev)->driver_data;
}
static int s3c24xx_serial_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
int ret, prop = 0;
if (np) {
ret = of_alias_get_id(np, "serial");
if (ret >= 0)
index = ret;
}
if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", index);
return -EINVAL;
}
ourport = &s3c24xx_serial_ports[index];
s3c24xx_serial_init_port_default(index);
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
dev_err(&pdev->dev, "could not find driver data\n");
return -ENODEV;
}
ourport->baudclk = ERR_PTR(-EINVAL);
ourport->info = &ourport->drv_data->info;
ourport->cfg = (dev_get_platdata(&pdev->dev)) ?
dev_get_platdata(&pdev->dev) :
&ourport->drv_data->def_cfg;
switch (ourport->info->type) {
case TYPE_S3C24XX:
ourport->port.ops = &s3c24xx_serial_ops;
break;
case TYPE_S3C6400:
ourport->port.ops = &s3c64xx_serial_ops;
break;
case TYPE_APPLE_S5L:
ourport->port.ops = &apple_s5l_serial_ops;
break;
}
if (np) {
of_property_read_u32(np,
"samsung,uart-fifosize", &ourport->port.fifosize);
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
switch (prop) {
case 1:
ourport->port.iotype = UPIO_MEM;
break;
case 4:
ourport->port.iotype = UPIO_MEM32;
break;
default:
dev_warn(&pdev->dev, "unsupported reg-io-width (%d)\n",
prop);
return -EINVAL;
}
}
}
if (ourport->drv_data->fifosize[index])
ourport->port.fifosize = ourport->drv_data->fifosize[index];
else if (ourport->info->fifosize)
ourport->port.fifosize = ourport->info->fifosize;
ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
/*
* DMA transfers must be aligned at least to cache line size,
* so find minimal transfer size suitable for DMA mode
*/
ourport->min_dma_size = max_t(int, ourport->port.fifosize,
dma_get_cache_alignment());
dev_dbg(&pdev->dev, "%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
return ret;
if (!s3c24xx_uart_drv.state) {
ret = uart_register_driver(&s3c24xx_uart_drv);
if (ret < 0) {
pr_err("Failed to register Samsung UART driver\n");
return ret;
}
}
dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
/*
* Deactivate the clock enabled in s3c24xx_serial_init_port here,
* so that a potential re-enablement through the pm-callback overlaps
* and keeps the clock enabled in this case.
*/
clk_disable_unprepare(ourport->clk);
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
probe_index++;
return 0;
}
static int s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
if (port) {
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
uart_unregister_driver(&s3c24xx_uart_drv);
return 0;
}
/* UART power management code */
#ifdef CONFIG_PM_SLEEP
static int s3c24xx_serial_suspend(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
if (port)
uart_suspend_port(&s3c24xx_uart_drv, port);
return 0;
}
static int s3c24xx_serial_resume(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (port) {
clk_prepare_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk))
clk_prepare_enable(ourport->baudclk);
s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port));
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
clk_disable_unprepare(ourport->clk);
uart_resume_port(&s3c24xx_uart_drv, port);
}
return 0;
}
static int s3c24xx_serial_resume_noirq(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (port) {
/* restore IRQ mask */
switch (ourport->info->type) {
case TYPE_S3C6400: {
unsigned int uintm = 0xf;
if (ourport->tx_enabled)
uintm &= ~S3C64XX_UINTM_TXD_MSK;
if (ourport->rx_enabled)
uintm &= ~S3C64XX_UINTM_RXD_MSK;
clk_prepare_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk))
clk_prepare_enable(ourport->baudclk);
wr_regl(port, S3C64XX_UINTM, uintm);
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
clk_disable_unprepare(ourport->clk);
break;
}
case TYPE_APPLE_S5L: {
unsigned int ucon;
int ret;
ret = clk_prepare_enable(ourport->clk);
if (ret) {
dev_err(dev, "clk_enable clk failed: %d\n", ret);
return ret;
}
if (!IS_ERR(ourport->baudclk)) {
ret = clk_prepare_enable(ourport->baudclk);
if (ret) {
dev_err(dev, "clk_enable baudclk failed: %d\n", ret);
clk_disable_unprepare(ourport->clk);
return ret;
}
}
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTO_ENA_MSK);
if (ourport->tx_enabled)
ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK;
if (ourport->rx_enabled)
ucon |= APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTO_ENA_MSK;
wr_regl(port, S3C2410_UCON, ucon);
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
clk_disable_unprepare(ourport->clk);
break;
}
default:
break;
}
}
return 0;
}
static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(s3c24xx_serial_suspend, s3c24xx_serial_resume)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, s3c24xx_serial_resume_noirq)
};
#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
#else /* !CONFIG_PM_SLEEP */
#define SERIAL_SAMSUNG_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
/* Console code */
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
static struct uart_port *cons_uart;
static int
s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned long ufstat, utrstat;
if (ufcon & S3C2410_UFCON_FIFOMODE) {
/* fifo mode - check amount of data in fifo registers... */
ufstat = rd_regl(port, S3C2410_UFSTAT);
return (ufstat & info->tx_fifofull) ? 0 : 1;
}
/* in non-fifo mode, we go and use the tx buffer empty */
utrstat = rd_regl(port, S3C2410_UTRSTAT);
return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
}
static bool
s3c24xx_port_configured(unsigned int ucon)
{
/* consider the serial port configured if the tx/rx mode set */
return (ucon & 0xf) != 0;
}
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context.
*/
static int s3c24xx_serial_get_poll_char(struct uart_port *port)
{
const struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned int ufstat;
ufstat = rd_regl(port, S3C2410_UFSTAT);
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
return NO_POLL_CHAR;
return rd_reg(port, S3C2410_URXH);
}
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c)
{
unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
unsigned int ucon = rd_regl(port, S3C2410_UCON);
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
return;
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
wr_reg(port, S3C2410_UTXH, c);
}
#endif /* CONFIG_CONSOLE_POLL */
static void
s3c24xx_serial_console_putchar(struct uart_port *port, unsigned char ch)
{
unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
wr_reg(port, S3C2410_UTXH, ch);
}
static void
s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
unsigned long flags;
bool locked = true;
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
return;
if (cons_uart->sysrq)
locked = false;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&cons_uart->lock, flags);
else
spin_lock_irqsave(&cons_uart->lock, flags);
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
if (locked)
spin_unlock_irqrestore(&cons_uart->lock, flags);
}
/* Shouldn't be __init, as it can be instantiated from other module */
static void
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
struct clk *clk;
unsigned int ulcon;
unsigned int ucon;
unsigned int ubrdiv;
unsigned long rate;
unsigned int clk_sel;
char clk_name[MAX_CLK_NAME_LENGTH];
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
ubrdiv = rd_regl(port, S3C2410_UBRDIV);
if (s3c24xx_port_configured(ucon)) {
switch (ulcon & S3C2410_LCON_CSMASK) {
case S3C2410_LCON_CS5:
*bits = 5;
break;
case S3C2410_LCON_CS6:
*bits = 6;
break;
case S3C2410_LCON_CS7:
*bits = 7;
break;
case S3C2410_LCON_CS8:
default:
*bits = 8;
break;
}
switch (ulcon & S3C2410_LCON_PMASK) {
case S3C2410_LCON_PEVEN:
*parity = 'e';
break;
case S3C2410_LCON_PODD:
*parity = 'o';
break;
case S3C2410_LCON_PNONE:
default:
*parity = 'n';
}
/* now calculate the baud rate */
clk_sel = s3c24xx_serial_getsource(port);
sprintf(clk_name, "clk_uart_baud%d", clk_sel);
clk = clk_get(port->dev, clk_name);
if (!IS_ERR(clk))
rate = clk_get_rate(clk);
else
rate = 1;
*baud = rate / (16 * (ubrdiv + 1));
dev_dbg(port->dev, "calculated baud %d\n", *baud);
}
}
/* Shouldn't be __init, as it can be instantiated from other module */
static int
s3c24xx_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/* is this a valid port */
if (co->index == -1 || co->index >= UART_NR)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
/* is the port configured? */
if (port->mapbase == 0x0)
return -ENODEV;
cons_uart = port;
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
s3c24xx_serial_get_options(port, &baud, &parity, &bits);
dev_dbg(port->dev, "baud %d\n", baud);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console s3c24xx_serial_console = {
.name = S3C24XX_SERIAL_NAME,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.write = s3c24xx_serial_console_write,
.setup = s3c24xx_serial_console_setup,
.data = &s3c24xx_uart_drv,
};
#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410)
static const struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
.info = {
.name = "Samsung S3C6400 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
.fifosize = 64,
.has_divslot = 1,
.rx_fifomask = S3C2440_UFSTAT_RXMASK,
.rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
.rx_fifofull = S3C2440_UFSTAT_RXFULL,
.tx_fifofull = S3C2440_UFSTAT_TXFULL,
.tx_fifomask = S3C2440_UFSTAT_TXMASK,
.tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
.def_clk_sel = S3C2410_UCON_CLKSEL2,
.num_clks = 4,
.clksel_mask = S3C6400_UCON_CLKMASK,
.clksel_shift = S3C6400_UCON_CLKSHIFT,
},
.def_cfg = {
.ucon = S3C2410_UCON_DEFAULT,
.ufcon = S3C2410_UFCON_DEFAULT,
},
};
#define S3C6400_SERIAL_DRV_DATA (&s3c6400_serial_drv_data)
#else
#define S3C6400_SERIAL_DRV_DATA NULL
#endif
#ifdef CONFIG_CPU_S5PV210
static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
.info = {
.name = "Samsung S5PV210 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
.has_divslot = 1,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
.tx_fifofull = S5PV210_UFSTAT_TXFULL,
.tx_fifomask = S5PV210_UFSTAT_TXMASK,
.tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
.def_clk_sel = S3C2410_UCON_CLKSEL0,
.num_clks = 2,
.clksel_mask = S5PV210_UCON_CLKMASK,
.clksel_shift = S5PV210_UCON_CLKSHIFT,
},
.def_cfg = {
.ucon = S5PV210_UCON_DEFAULT,
.ufcon = S5PV210_UFCON_DEFAULT,
},
.fifosize = { 256, 64, 16, 16 },
};
#define S5PV210_SERIAL_DRV_DATA (&s5pv210_serial_drv_data)
#else
#define S5PV210_SERIAL_DRV_DATA NULL
#endif
#if defined(CONFIG_ARCH_EXYNOS)
#define EXYNOS_COMMON_SERIAL_DRV_DATA() \
.info = { \
.name = "Samsung Exynos UART", \
.type = TYPE_S3C6400, \
.port_type = PORT_S3C6400, \
.has_divslot = 1, \
.rx_fifomask = S5PV210_UFSTAT_RXMASK, \
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
.rx_fifofull = S5PV210_UFSTAT_RXFULL, \
.tx_fifofull = S5PV210_UFSTAT_TXFULL, \
.tx_fifomask = S5PV210_UFSTAT_TXMASK, \
.tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
.def_clk_sel = S3C2410_UCON_CLKSEL0, \
.num_clks = 1, \
.clksel_mask = 0, \
.clksel_shift = 0, \
}, \
.def_cfg = { \
.ucon = S5PV210_UCON_DEFAULT, \
.ufcon = S5PV210_UFCON_DEFAULT, \
.has_fracval = 1, \
} \
static const struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 16, 16 },
};
static const struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 64, 256, 16, 256 },
};
static const struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 64, 64 },
};
#define EXYNOS4210_SERIAL_DRV_DATA (&exynos4210_serial_drv_data)
#define EXYNOS5433_SERIAL_DRV_DATA (&exynos5433_serial_drv_data)
#define EXYNOS850_SERIAL_DRV_DATA (&exynos850_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA NULL
#define EXYNOS5433_SERIAL_DRV_DATA NULL
#define EXYNOS850_SERIAL_DRV_DATA NULL
#endif
#ifdef CONFIG_ARCH_APPLE
static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.info = {
.name = "Apple S5L UART",
.type = TYPE_APPLE_S5L,
.port_type = PORT_8250,
.fifosize = 16,
.rx_fifomask = S3C2410_UFSTAT_RXMASK,
.rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
.rx_fifofull = S3C2410_UFSTAT_RXFULL,
.tx_fifofull = S3C2410_UFSTAT_TXFULL,
.tx_fifomask = S3C2410_UFSTAT_TXMASK,
.tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
.def_clk_sel = S3C2410_UCON_CLKSEL0,
.num_clks = 1,
.clksel_mask = 0,
.clksel_shift = 0,
.ucon_mask = APPLE_S5L_UCON_MASK,
},
.def_cfg = {
.ucon = APPLE_S5L_UCON_DEFAULT,
.ufcon = S3C2410_UFCON_DEFAULT,
},
};
#define S5L_SERIAL_DRV_DATA (&s5l_serial_drv_data)
#else
#define S5L_SERIAL_DRV_DATA NULL
#endif
#if defined(CONFIG_ARCH_ARTPEC)
static const struct s3c24xx_serial_drv_data artpec8_serial_drv_data = {
.info = {
.name = "Axis ARTPEC-8 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
.fifosize = 64,
.has_divslot = 1,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
.tx_fifofull = S5PV210_UFSTAT_TXFULL,
.tx_fifomask = S5PV210_UFSTAT_TXMASK,
.tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
.def_clk_sel = S3C2410_UCON_CLKSEL0,
.num_clks = 1,
.clksel_mask = 0,
.clksel_shift = 0,
},
.def_cfg = {
.ucon = S5PV210_UCON_DEFAULT,
.ufcon = S5PV210_UFCON_DEFAULT,
.has_fracval = 1,
}
};
#define ARTPEC8_SERIAL_DRV_DATA (&artpec8_serial_drv_data)
#else
#define ARTPEC8_SERIAL_DRV_DATA (NULL)
#endif
static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
{
.name = "s3c6400-uart",
.driver_data = (kernel_ulong_t)S3C6400_SERIAL_DRV_DATA,
}, {
.name = "s5pv210-uart",
.driver_data = (kernel_ulong_t)S5PV210_SERIAL_DRV_DATA,
}, {
.name = "exynos4210-uart",
.driver_data = (kernel_ulong_t)EXYNOS4210_SERIAL_DRV_DATA,
}, {
.name = "exynos5433-uart",
.driver_data = (kernel_ulong_t)EXYNOS5433_SERIAL_DRV_DATA,
}, {
.name = "s5l-uart",
.driver_data = (kernel_ulong_t)S5L_SERIAL_DRV_DATA,
}, {
.name = "exynos850-uart",
.driver_data = (kernel_ulong_t)EXYNOS850_SERIAL_DRV_DATA,
}, {
.name = "artpec8-uart",
.driver_data = (kernel_ulong_t)ARTPEC8_SERIAL_DRV_DATA,
},
{ },
};
MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids);
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_uart_dt_match[] = {
{ .compatible = "samsung,s3c6400-uart",
.data = S3C6400_SERIAL_DRV_DATA },
{ .compatible = "samsung,s5pv210-uart",
.data = S5PV210_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos4210-uart",
.data = EXYNOS4210_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos5433-uart",
.data = EXYNOS5433_SERIAL_DRV_DATA },
{ .compatible = "apple,s5l-uart",
.data = S5L_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos850-uart",
.data = EXYNOS850_SERIAL_DRV_DATA },
{ .compatible = "axis,artpec8-uart",
.data = ARTPEC8_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
#endif
static struct platform_driver samsung_serial_driver = {
.probe = s3c24xx_serial_probe,
.remove = s3c24xx_serial_remove,
.id_table = s3c24xx_serial_driver_ids,
.driver = {
.name = "samsung-uart",
.pm = SERIAL_SAMSUNG_PM_OPS,
.of_match_table = of_match_ptr(s3c24xx_uart_dt_match),
},
};
static int __init samsung_serial_init(void)
{
int ret;
s3c24xx_serial_register_console();
ret = platform_driver_register(&samsung_serial_driver);
if (ret) {
s3c24xx_serial_unregister_console();
return ret;
}
return 0;
}
static void __exit samsung_serial_exit(void)
{
platform_driver_unregister(&samsung_serial_driver);
s3c24xx_serial_unregister_console();
}
module_init(samsung_serial_init);
module_exit(samsung_serial_exit);
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
/*
* Early console.
*/
static void wr_reg_barrier(const struct uart_port *port, u32 reg, u32 val)
{
switch (port->iotype) {
case UPIO_MEM:
writeb(val, portaddr(port, reg));
break;
case UPIO_MEM32:
writel(val, portaddr(port, reg));
break;
}
}
struct samsung_early_console_data {
u32 txfull_mask;
u32 rxfifo_mask;
};
static void samsung_early_busyuart(const struct uart_port *port)
{
while (!(readl(port->membase + S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXFE))
;
}
static void samsung_early_busyuart_fifo(const struct uart_port *port)
{
const struct samsung_early_console_data *data = port->private_data;
while (readl(port->membase + S3C2410_UFSTAT) & data->txfull_mask)
;
}
static void samsung_early_putc(struct uart_port *port, unsigned char c)
{
if (readl(port->membase + S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE)
samsung_early_busyuart_fifo(port);
else
samsung_early_busyuart(port);
wr_reg_barrier(port, S3C2410_UTXH, c);
}
static void samsung_early_write(struct console *con, const char *s,
unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, samsung_early_putc);
}
static int samsung_early_read(struct console *con, char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
const struct samsung_early_console_data *data = dev->port.private_data;
int ch, ufstat, num_read = 0;
while (num_read < n) {
ufstat = rd_regl(&dev->port, S3C2410_UFSTAT);
if (!(ufstat & data->rxfifo_mask))
break;
ch = rd_reg(&dev->port, S3C2410_URXH);
if (ch == NO_POLL_CHAR)
break;
s[num_read++] = ch;
}
return num_read;
}
static int __init samsung_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = samsung_early_write;
device->con->read = samsung_early_read;
return 0;
}
/* S3C2410 */
static struct samsung_early_console_data s3c2410_early_console_data = {
.txfull_mask = S3C2410_UFSTAT_TXFULL,
.rxfifo_mask = S3C2410_UFSTAT_RXFULL | S3C2410_UFSTAT_RXMASK,
};
static int __init s3c2410_early_console_setup(struct earlycon_device *device,
const char *opt)
{
device->port.private_data = &s3c2410_early_console_data;
return samsung_early_console_setup(device, opt);
}
OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
s3c2410_early_console_setup);
/* S3C2412, S3C2440, S3C64xx */
static struct samsung_early_console_data s3c2440_early_console_data = {
.txfull_mask = S3C2440_UFSTAT_TXFULL,
.rxfifo_mask = S3C2440_UFSTAT_RXFULL | S3C2440_UFSTAT_RXMASK,
};
static int __init s3c2440_early_console_setup(struct earlycon_device *device,
const char *opt)
{
device->port.private_data = &s3c2440_early_console_data;
return samsung_early_console_setup(device, opt);
}
OF_EARLYCON_DECLARE(s3c2412, "samsung,s3c2412-uart",
s3c2440_early_console_setup);
OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
s3c2440_early_console_setup);
OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
s3c2440_early_console_setup);
/* S5PV210, Exynos */
static struct samsung_early_console_data s5pv210_early_console_data = {
.txfull_mask = S5PV210_UFSTAT_TXFULL,
.rxfifo_mask = S5PV210_UFSTAT_RXFULL | S5PV210_UFSTAT_RXMASK,
};
static int __init s5pv210_early_console_setup(struct earlycon_device *device,
const char *opt)
{
device->port.private_data = &s5pv210_early_console_data;
return samsung_early_console_setup(device, opt);
}
OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart",
s5pv210_early_console_setup);
OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
s5pv210_early_console_setup);
OF_EARLYCON_DECLARE(artpec8, "axis,artpec8-uart",
s5pv210_early_console_setup);
/* Apple S5L */
static int __init apple_s5l_early_console_setup(struct earlycon_device *device,
const char *opt)
{
/* Close enough to S3C2410 for earlycon... */
device->port.private_data = &s3c2410_early_console_data;
#ifdef CONFIG_ARM64
/* ... but we need to override the existing fixmap entry as nGnRnE */
__set_fixmap(FIX_EARLYCON_MEM_BASE, device->port.mapbase,
__pgprot(PROT_DEVICE_nGnRnE));
#endif
return samsung_early_console_setup(device, opt);
}
OF_EARLYCON_DECLARE(s5l, "apple,s5l-uart", apple_s5l_early_console_setup);
#endif
MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/samsung_tty.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* MA35D1 serial driver
* Copyright (C) 2023 Nuvoton Technology Corp.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/iopoll.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/units.h>
#define MA35_UART_NR 17
#define MA35_RBR_REG 0x00
#define MA35_THR_REG 0x00
#define MA35_IER_REG 0x04
#define MA35_FCR_REG 0x08
#define MA35_LCR_REG 0x0C
#define MA35_MCR_REG 0x10
#define MA35_MSR_REG 0x14
#define MA35_FSR_REG 0x18
#define MA35_ISR_REG 0x1C
#define MA35_TOR_REG 0x20
#define MA35_BAUD_REG 0x24
#define MA35_ALTCTL_REG 0x2C
#define MA35_FUN_SEL_REG 0x30
#define MA35_WKCTL_REG 0x40
#define MA35_WKSTS_REG 0x44
/* MA35_IER_REG - Interrupt Enable Register */
#define MA35_IER_RDA_IEN BIT(0) /* RBR Available Interrupt Enable */
#define MA35_IER_THRE_IEN BIT(1) /* THR Empty Interrupt Enable */
#define MA35_IER_RLS_IEN BIT(2) /* RX Line Status Interrupt Enable */
#define MA35_IER_RTO_IEN BIT(4) /* RX Time-out Interrupt Enable */
#define MA35_IER_BUFERR_IEN BIT(5) /* Buffer Error Interrupt Enable */
#define MA35_IER_TIME_OUT_EN BIT(11) /* RX Buffer Time-out Counter Enable */
#define MA35_IER_AUTO_RTS BIT(12) /* nRTS Auto-flow Control Enable */
#define MA35_IER_AUTO_CTS BIT(13) /* nCTS Auto-flow Control Enable */
/* MA35_FCR_REG - FIFO Control Register */
#define MA35_FCR_RFR BIT(1) /* RX Field Software Reset */
#define MA35_FCR_TFR BIT(2) /* TX Field Software Reset */
#define MA35_FCR_RFITL_MASK GENMASK(7, 4) /* RX FIFO Interrupt Trigger Level */
#define MA35_FCR_RFITL_1BYTE FIELD_PREP(MA35_FCR_RFITL_MASK, 0)
#define MA35_FCR_RFITL_4BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 1)
#define MA35_FCR_RFITL_8BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 2)
#define MA35_FCR_RFITL_14BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 3)
#define MA35_FCR_RFITL_30BYTES FIELD_PREP(MA35_FCR_RFITL_MASK, 4)
#define MA35_FCR_RTSTL_MASK GENMASK(19, 16) /* nRTS Trigger Level */
#define MA35_FCR_RTSTL_1BYTE FIELD_PREP(MA35_FCR_RTSTL_MASK, 0)
#define MA35_FCR_RTSTL_4BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 1)
#define MA35_FCR_RTSTL_8BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 2)
#define MA35_FCR_RTSTL_14BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 3)
#define MA35_FCR_RTSTLL_30BYTES FIELD_PREP(MA35_FCR_RTSTL_MASK, 4)
/* MA35_LCR_REG - Line Control Register */
#define MA35_LCR_NSB BIT(2) /* Number of “STOP Bit” */
#define MA35_LCR_PBE BIT(3) /* Parity Bit Enable */
#define MA35_LCR_EPE BIT(4) /* Even Parity Enable */
#define MA35_LCR_SPE BIT(5) /* Stick Parity Enable */
#define MA35_LCR_BREAK BIT(6) /* Break Control */
#define MA35_LCR_WLS_MASK GENMASK(1, 0) /* Word Length Selection */
#define MA35_LCR_WLS_5BITS FIELD_PREP(MA35_LCR_WLS_MASK, 0)
#define MA35_LCR_WLS_6BITS FIELD_PREP(MA35_LCR_WLS_MASK, 1)
#define MA35_LCR_WLS_7BITS FIELD_PREP(MA35_LCR_WLS_MASK, 2)
#define MA35_LCR_WLS_8BITS FIELD_PREP(MA35_LCR_WLS_MASK, 3)
/* MA35_MCR_REG - Modem Control Register */
#define MA35_MCR_RTS_CTRL BIT(1) /* nRTS Signal Control */
#define MA35_MCR_RTSACTLV BIT(9) /* nRTS Pin Active Level */
#define MA35_MCR_RTSSTS BIT(13) /* nRTS Pin Status (Read Only) */
/* MA35_MSR_REG - Modem Status Register */
#define MA35_MSR_CTSDETF BIT(0) /* Detect nCTS State Change Flag */
#define MA35_MSR_CTSSTS BIT(4) /* nCTS Pin Status (Read Only) */
#define MA35_MSR_CTSACTLV BIT(8) /* nCTS Pin Active Level */
/* MA35_FSR_REG - FIFO Status Register */
#define MA35_FSR_RX_OVER_IF BIT(0) /* RX Overflow Error Interrupt Flag */
#define MA35_FSR_PEF BIT(4) /* Parity Error Flag*/
#define MA35_FSR_FEF BIT(5) /* Framing Error Flag */
#define MA35_FSR_BIF BIT(6) /* Break Interrupt Flag */
#define MA35_FSR_RX_EMPTY BIT(14) /* Receiver FIFO Empty (Read Only) */
#define MA35_FSR_RX_FULL BIT(15) /* Receiver FIFO Full (Read Only) */
#define MA35_FSR_TX_EMPTY BIT(22) /* Transmitter FIFO Empty (Read Only) */
#define MA35_FSR_TX_FULL BIT(23) /* Transmitter FIFO Full (Read Only) */
#define MA35_FSR_TX_OVER_IF BIT(24) /* TX Overflow Error Interrupt Flag */
#define MA35_FSR_TE_FLAG BIT(28) /* Transmitter Empty Flag (Read Only) */
#define MA35_FSR_RXPTR_MSK GENMASK(13, 8) /* TX FIFO Pointer mask */
#define MA35_FSR_TXPTR_MSK GENMASK(21, 16) /* RX FIFO Pointer mask */
/* MA35_ISR_REG - Interrupt Status Register */
#define MA35_ISR_RDA_IF BIT(0) /* RBR Available Interrupt Flag */
#define MA35_ISR_THRE_IF BIT(1) /* THR Empty Interrupt Flag */
#define MA35_ISR_RLSIF BIT(2) /* Receive Line Interrupt Flag */
#define MA35_ISR_MODEMIF BIT(3) /* MODEM Interrupt Flag */
#define MA35_ISR_RXTO_IF BIT(4) /* RX Time-out Interrupt Flag */
#define MA35_ISR_BUFEIF BIT(5) /* Buffer Error Interrupt Flag */
#define MA35_ISR_WK_IF BIT(6) /* UART Wake-up Interrupt Flag */
#define MA35_ISR_RDAINT BIT(8) /* RBR Available Interrupt Indicator */
#define MA35_ISR_THRE_INT BIT(9) /* THR Empty Interrupt Indicator */
#define MA35_ISR_ALL 0xFFFFFFFF
/* MA35_BAUD_REG - Baud Rate Divider Register */
#define MA35_BAUD_MODE_MASK GENMASK(29, 28)
#define MA35_BAUD_MODE0 FIELD_PREP(MA35_BAUD_MODE_MASK, 0)
#define MA35_BAUD_MODE1 FIELD_PREP(MA35_BAUD_MODE_MASK, 2)
#define MA35_BAUD_MODE2 FIELD_PREP(MA35_BAUD_MODE_MASK, 3)
#define MA35_BAUD_MASK GENMASK(15, 0)
/* MA35_ALTCTL_REG - Alternate Control/Status Register */
#define MA35_ALTCTL_RS485AUD BIT(10) /* RS-485 Auto Direction Function */
/* MA35_FUN_SEL_REG - Function Select Register */
#define MA35_FUN_SEL_MASK GENMASK(2, 0)
#define MA35_FUN_SEL_UART FIELD_PREP(MA35_FUN_SEL_MASK, 0)
#define MA35_FUN_SEL_RS485 FIELD_PREP(MA35_FUN_SEL_MASK, 3)
/* The constrain for MA35D1 UART baud rate divider */
#define MA35_BAUD_DIV_MAX 0xFFFF
#define MA35_BAUD_DIV_MIN 11
/* UART FIFO depth */
#define MA35_UART_FIFO_DEPTH 32
/* UART console clock */
#define MA35_UART_CONSOLE_CLK (24 * HZ_PER_MHZ)
/* UART register ioremap size */
#define MA35_UART_REG_SIZE 0x100
/* Rx Timeout */
#define MA35_UART_RX_TOUT 0x40
#define MA35_IER_CONFIG (MA35_IER_RTO_IEN | MA35_IER_RDA_IEN | \
MA35_IER_TIME_OUT_EN | MA35_IER_BUFERR_IEN)
#define MA35_ISR_IF_CHECK (MA35_ISR_RDA_IF | MA35_ISR_RXTO_IF | \
MA35_ISR_THRE_INT | MA35_ISR_BUFEIF)
#define MA35_FSR_TX_BOTH_EMPTY (MA35_FSR_TE_FLAG | MA35_FSR_TX_EMPTY)
static struct uart_driver ma35d1serial_reg;
struct uart_ma35d1_port {
struct uart_port port;
struct clk *clk;
u16 capabilities; /* port capabilities */
u8 ier;
u8 lcr;
u8 mcr;
u32 baud_rate;
u32 console_baud_rate;
u32 console_line;
u32 console_int;
};
static struct uart_ma35d1_port ma35d1serial_ports[MA35_UART_NR];
static struct uart_ma35d1_port *to_ma35d1_uart_port(struct uart_port *uart)
{
return container_of(uart, struct uart_ma35d1_port, port);
}
static u32 serial_in(struct uart_ma35d1_port *p, u32 offset)
{
return readl_relaxed(p->port.membase + offset);
}
static void serial_out(struct uart_ma35d1_port *p, u32 offset, u32 value)
{
writel_relaxed(value, p->port.membase + offset);
}
static void __stop_tx(struct uart_ma35d1_port *p)
{
u32 ier;
ier = serial_in(p, MA35_IER_REG);
if (ier & MA35_IER_THRE_IEN)
serial_out(p, MA35_IER_REG, ier & ~MA35_IER_THRE_IEN);
}
static void ma35d1serial_stop_tx(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
__stop_tx(up);
}
static void transmit_chars(struct uart_ma35d1_port *up)
{
u32 count;
u8 ch;
if (uart_tx_stopped(&up->port)) {
ma35d1serial_stop_tx(&up->port);
return;
}
count = MA35_UART_FIFO_DEPTH - FIELD_GET(MA35_FSR_TXPTR_MSK,
serial_in(up, MA35_FSR_REG));
uart_port_tx_limited(&up->port, ch, count,
!(serial_in(up, MA35_FSR_REG) & MA35_FSR_TX_FULL),
serial_out(up, MA35_THR_REG, ch),
({}));
}
static void ma35d1serial_start_tx(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 ier;
ier = serial_in(up, MA35_IER_REG);
serial_out(up, MA35_IER_REG, ier & ~MA35_IER_THRE_IEN);
transmit_chars(up);
serial_out(up, MA35_IER_REG, ier | MA35_IER_THRE_IEN);
}
static void ma35d1serial_stop_rx(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 ier;
ier = serial_in(up, MA35_IER_REG);
ier &= ~MA35_IER_RDA_IEN;
serial_out(up, MA35_IER_REG, ier);
}
static void receive_chars(struct uart_ma35d1_port *up)
{
int max_count = 256;
u8 ch, flag;
u32 fsr;
fsr = serial_in(up, MA35_FSR_REG);
do {
flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(fsr & (MA35_FSR_BIF | MA35_FSR_FEF |
MA35_FSR_PEF | MA35_FSR_RX_OVER_IF))) {
if (fsr & MA35_FSR_BIF) {
up->port.icount.brk++;
if (uart_handle_break(&up->port))
continue;
}
if (fsr & MA35_FSR_FEF)
up->port.icount.frame++;
if (fsr & MA35_FSR_PEF)
up->port.icount.parity++;
if (fsr & MA35_FSR_RX_OVER_IF)
up->port.icount.overrun++;
serial_out(up, MA35_FSR_REG,
fsr & (MA35_FSR_BIF | MA35_FSR_FEF |
MA35_FSR_PEF | MA35_FSR_RX_OVER_IF));
if (fsr & MA35_FSR_BIF)
flag = TTY_BREAK;
else if (fsr & MA35_FSR_PEF)
flag = TTY_PARITY;
else if (fsr & MA35_FSR_FEF)
flag = TTY_FRAME;
}
ch = serial_in(up, MA35_RBR_REG);
if (uart_handle_sysrq_char(&up->port, ch))
continue;
spin_lock(&up->port.lock);
uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
spin_unlock(&up->port.lock);
fsr = serial_in(up, MA35_FSR_REG);
} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
spin_lock(&up->port.lock);
tty_flip_buffer_push(&up->port.state->port);
spin_unlock(&up->port.lock);
}
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 isr, fsr;
isr = serial_in(up, MA35_ISR_REG);
fsr = serial_in(up, MA35_FSR_REG);
if (!(isr & MA35_ISR_IF_CHECK))
return IRQ_NONE;
if (isr & (MA35_ISR_RDA_IF | MA35_ISR_RXTO_IF))
receive_chars(up);
if (isr & MA35_ISR_THRE_INT)
transmit_chars(up);
if (fsr & MA35_FSR_TX_OVER_IF)
serial_out(up, MA35_FSR_REG, MA35_FSR_TX_OVER_IF);
return IRQ_HANDLED;
}
static u32 ma35d1serial_tx_empty(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 fsr;
fsr = serial_in(up, MA35_FSR_REG);
if ((fsr & MA35_FSR_TX_BOTH_EMPTY) == MA35_FSR_TX_BOTH_EMPTY)
return TIOCSER_TEMT;
else
return 0;
}
static u32 ma35d1serial_get_mctrl(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 status;
u32 ret = 0;
status = serial_in(up, MA35_MSR_REG);
if (!(status & MA35_MSR_CTSSTS))
ret |= TIOCM_CTS;
return ret;
}
static void ma35d1serial_set_mctrl(struct uart_port *port, u32 mctrl)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 mcr, msr, ier;
mcr = serial_in(up, MA35_MCR_REG);
mcr &= ~MA35_MCR_RTS_CTRL;
if (mctrl & TIOCM_RTS)
mcr |= MA35_MCR_RTSACTLV;
else
mcr &= ~MA35_MCR_RTSACTLV;
if (up->mcr & UART_MCR_AFE) {
ier = serial_in(up, MA35_IER_REG);
ier |= MA35_IER_AUTO_RTS | MA35_IER_AUTO_CTS;
serial_out(up, MA35_IER_REG, ier);
up->port.flags |= UPF_HARD_FLOW;
} else {
ier = serial_in(up, MA35_IER_REG);
ier &= ~(MA35_IER_AUTO_RTS | MA35_IER_AUTO_CTS);
serial_out(up, MA35_IER_REG, ier);
up->port.flags &= ~UPF_HARD_FLOW;
}
msr = serial_in(up, MA35_MSR_REG);
msr |= MA35_MSR_CTSACTLV;
serial_out(up, MA35_MSR_REG, msr);
serial_out(up, MA35_MCR_REG, mcr);
}
static void ma35d1serial_break_ctl(struct uart_port *port, int break_state)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
unsigned long flags;
u32 lcr;
spin_lock_irqsave(&up->port.lock, flags);
lcr = serial_in(up, MA35_LCR_REG);
if (break_state != 0)
lcr |= MA35_LCR_BREAK;
else
lcr &= ~MA35_LCR_BREAK;
serial_out(up, MA35_LCR_REG, lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int ma35d1serial_startup(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
u32 fcr;
int retval;
/* Reset FIFO */
serial_out(up, MA35_FCR_REG, MA35_FCR_TFR | MA35_FCR_RFR);
/* Clear pending interrupts */
serial_out(up, MA35_ISR_REG, MA35_ISR_ALL);
retval = request_irq(port->irq, ma35d1serial_interrupt, 0,
dev_name(port->dev), port);
if (retval) {
dev_err(up->port.dev, "request irq failed.\n");
return retval;
}
fcr = serial_in(up, MA35_FCR_REG);
fcr |= MA35_FCR_RFITL_4BYTES | MA35_FCR_RTSTL_8BYTES;
serial_out(up, MA35_FCR_REG, fcr);
serial_out(up, MA35_LCR_REG, MA35_LCR_WLS_8BITS);
serial_out(up, MA35_TOR_REG, MA35_UART_RX_TOUT);
serial_out(up, MA35_IER_REG, MA35_IER_CONFIG);
return 0;
}
static void ma35d1serial_shutdown(struct uart_port *port)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
serial_out(up, MA35_IER_REG, 0);
free_irq(port->irq, port);
}
static void ma35d1serial_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
unsigned long flags;
u32 baud, quot;
u32 lcr = 0;
lcr = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
if (termios->c_cflag & CSTOPB)
lcr |= MA35_LCR_NSB;
if (termios->c_cflag & PARENB)
lcr |= MA35_LCR_PBE;
if (!(termios->c_cflag & PARODD))
lcr |= MA35_LCR_EPE;
if (termios->c_cflag & CMSPAR)
lcr |= MA35_LCR_SPE;
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / MA35_BAUD_DIV_MAX,
port->uartclk / MA35_BAUD_DIV_MIN);
/* MA35D1 UART baud rate equation: baudrate = UART_CLK / (quot + 2) */
quot = (port->uartclk / baud) - 2;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= MA35_FSR_FEF | MA35_FSR_PEF;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= MA35_FSR_BIF;
/* Characteres to ignore */
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= MA35_FSR_FEF | MA35_FSR_PEF;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= MA35_FSR_BIF;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= MA35_FSR_RX_OVER_IF;
}
if (termios->c_cflag & CRTSCTS)
up->mcr |= UART_MCR_AFE;
else
up->mcr &= ~UART_MCR_AFE;
uart_update_timeout(port, termios->c_cflag, baud);
ma35d1serial_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, MA35_BAUD_REG, MA35_BAUD_MODE2 | FIELD_PREP(MA35_BAUD_MASK, quot));
serial_out(up, MA35_LCR_REG, lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static const char *ma35d1serial_type(struct uart_port *port)
{
return "ma35d1-uart";
}
static void ma35d1serial_config_port(struct uart_port *port, int flags)
{
/*
* Driver core for serial ports forces a non-zero value for port type.
* Write an arbitrary value here to accommodate the serial core driver,
* as ID part of UAPI is redundant.
*/
port->type = 1;
}
static int ma35d1serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (port->type != PORT_UNKNOWN && ser->type != 1)
return -EINVAL;
return 0;
}
static const struct uart_ops ma35d1serial_ops = {
.tx_empty = ma35d1serial_tx_empty,
.set_mctrl = ma35d1serial_set_mctrl,
.get_mctrl = ma35d1serial_get_mctrl,
.stop_tx = ma35d1serial_stop_tx,
.start_tx = ma35d1serial_start_tx,
.stop_rx = ma35d1serial_stop_rx,
.break_ctl = ma35d1serial_break_ctl,
.startup = ma35d1serial_startup,
.shutdown = ma35d1serial_shutdown,
.set_termios = ma35d1serial_set_termios,
.type = ma35d1serial_type,
.config_port = ma35d1serial_config_port,
.verify_port = ma35d1serial_verify_port,
};
static const struct of_device_id ma35d1_serial_of_match[] = {
{ .compatible = "nuvoton,ma35d1-uart" },
{},
};
MODULE_DEVICE_TABLE(of, ma35d1_serial_of_match);
#ifdef CONFIG_SERIAL_NUVOTON_MA35D1_CONSOLE
static struct device_node *ma35d1serial_uart_nodes[MA35_UART_NR];
static void wait_for_xmitr(struct uart_ma35d1_port *up)
{
unsigned int reg = 0;
read_poll_timeout_atomic(serial_in, reg, reg & MA35_FSR_TX_EMPTY,
1, 10000, false,
up, MA35_FSR_REG);
}
static void ma35d1serial_console_putchar(struct uart_port *port, unsigned char ch)
{
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
wait_for_xmitr(up);
serial_out(up, MA35_THR_REG, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void ma35d1serial_console_write(struct console *co, const char *s, u32 count)
{
struct uart_ma35d1_port *up = &ma35d1serial_ports[co->index];
unsigned long flags;
int locked = 1;
u32 ier;
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&up->port.lock, flags);
else
spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, MA35_IER_REG);
serial_out(up, MA35_IER_REG, 0);
uart_console_write(&up->port, s, count, ma35d1serial_console_putchar);
wait_for_xmitr(up);
serial_out(up, MA35_IER_REG, ier);
if (locked)
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init ma35d1serial_console_setup(struct console *co, char *options)
{
struct device_node *np;
struct uart_ma35d1_port *p;
u32 val32[4];
struct uart_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
pr_debug("Console Port%x out of range\n", co->index);
return -EINVAL;
}
np = ma35d1serial_uart_nodes[co->index];
p = &ma35d1serial_ports[co->index];
if (!np || !p)
return -ENODEV;
if (of_property_read_u32_array(np, "reg", val32, ARRAY_SIZE(val32)) != 0)
return -EINVAL;
p->port.iobase = val32[1];
p->port.membase = ioremap(p->port.iobase, MA35_UART_REG_SIZE);
if (!p->port.membase)
return -ENOMEM;
p->port.ops = &ma35d1serial_ops;
p->port.line = 0;
p->port.uartclk = MA35_UART_CONSOLE_CLK;
port = &ma35d1serial_ports[co->index].port;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console ma35d1serial_console = {
.name = "ttyNVT",
.write = ma35d1serial_console_write,
.device = uart_console_device,
.setup = ma35d1serial_console_setup,
.flags = CON_PRINTBUFFER | CON_ENABLED,
.index = -1,
.data = &ma35d1serial_reg,
};
static void ma35d1serial_console_init_port(void)
{
u32 i = 0;
struct device_node *np;
for_each_matching_node(np, ma35d1_serial_of_match) {
if (ma35d1serial_uart_nodes[i] == NULL) {
of_node_get(np);
ma35d1serial_uart_nodes[i] = np;
i++;
if (i == MA35_UART_NR)
break;
}
}
}
static int __init ma35d1serial_console_init(void)
{
ma35d1serial_console_init_port();
register_console(&ma35d1serial_console);
return 0;
}
console_initcall(ma35d1serial_console_init);
#define MA35D1SERIAL_CONSOLE (&ma35d1serial_console)
#else
#define MA35D1SERIAL_CONSOLE NULL
#endif
static struct uart_driver ma35d1serial_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
.dev_name = "ttyNVT",
.major = TTY_MAJOR,
.minor = 64,
.cons = MA35D1SERIAL_CONSOLE,
.nr = MA35_UART_NR,
};
/*
* Register a set of serial devices attached to a platform device.
* The list is terminated with a zero flags entry, which means we expect
* all entries to have at least UPF_BOOT_AUTOCONF set.
*/
static int ma35d1serial_probe(struct platform_device *pdev)
{
struct resource *res_mem;
struct uart_ma35d1_port *up;
int ret = 0;
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
return ret;
}
}
up = &ma35d1serial_ports[ret];
up->port.line = ret;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem)
return -ENODEV;
up->port.iobase = res_mem->start;
up->port.membase = ioremap(up->port.iobase, MA35_UART_REG_SIZE);
up->port.ops = &ma35d1serial_ops;
spin_lock_init(&up->port.lock);
up->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(up->clk)) {
ret = PTR_ERR(up->clk);
dev_err(&pdev->dev, "failed to get core clk: %d\n", ret);
goto err_iounmap;
}
ret = clk_prepare_enable(up->clk);
if (ret)
goto err_iounmap;
if (up->port.line != 0)
up->port.uartclk = clk_get_rate(up->clk);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_clk_disable;
up->port.irq = ret;
up->port.dev = &pdev->dev;
up->port.flags = UPF_BOOT_AUTOCONF;
platform_set_drvdata(pdev, up);
ret = uart_add_one_port(&ma35d1serial_reg, &up->port);
if (ret < 0)
goto err_free_irq;
return 0;
err_free_irq:
free_irq(up->port.irq, &up->port);
err_clk_disable:
clk_disable_unprepare(up->clk);
err_iounmap:
iounmap(up->port.membase);
return ret;
}
/*
* Remove serial ports registered against a platform device.
*/
static int ma35d1serial_remove(struct platform_device *dev)
{
struct uart_port *port = platform_get_drvdata(dev);
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
uart_remove_one_port(&ma35d1serial_reg, port);
clk_disable_unprepare(up->clk);
return 0;
}
static int ma35d1serial_suspend(struct platform_device *dev, pm_message_t state)
{
struct uart_port *port = platform_get_drvdata(dev);
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
uart_suspend_port(&ma35d1serial_reg, &up->port);
if (up->port.line == 0) {
up->console_baud_rate = serial_in(up, MA35_BAUD_REG);
up->console_line = serial_in(up, MA35_LCR_REG);
up->console_int = serial_in(up, MA35_IER_REG);
}
return 0;
}
static int ma35d1serial_resume(struct platform_device *dev)
{
struct uart_port *port = platform_get_drvdata(dev);
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
if (up->port.line == 0) {
serial_out(up, MA35_BAUD_REG, up->console_baud_rate);
serial_out(up, MA35_LCR_REG, up->console_line);
serial_out(up, MA35_IER_REG, up->console_int);
}
uart_resume_port(&ma35d1serial_reg, &up->port);
return 0;
}
static struct platform_driver ma35d1serial_driver = {
.probe = ma35d1serial_probe,
.remove = ma35d1serial_remove,
.suspend = ma35d1serial_suspend,
.resume = ma35d1serial_resume,
.driver = {
.name = "ma35d1-uart",
.of_match_table = of_match_ptr(ma35d1_serial_of_match),
},
};
static int __init ma35d1serial_init(void)
{
int ret;
ret = uart_register_driver(&ma35d1serial_reg);
if (ret)
return ret;
ret = platform_driver_register(&ma35d1serial_driver);
if (ret)
uart_unregister_driver(&ma35d1serial_reg);
return ret;
}
static void __exit ma35d1serial_exit(void)
{
platform_driver_unregister(&ma35d1serial_driver);
uart_unregister_driver(&ma35d1serial_reg);
}
module_init(ma35d1serial_init);
module_exit(ma35d1serial_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MA35D1 serial driver");
| linux-master | drivers/tty/serial/ma35d1_serial.c |
// SPDX-License-Identifier: GPL-2.0
/* suncore.c
*
* Common SUN serial routines. Based entirely
* upon drivers/sbus/char/sunserial.c which is:
*
* Copyright (C) 1997 Eddie C. Dost ([email protected])
*
* Adaptation to new UART layer is:
*
* Copyright (C) 2002 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
#include <linux/init.h>
#include <asm/prom.h>
static int sunserial_current_minor = 64;
int sunserial_register_minors(struct uart_driver *drv, int count)
{
int err = 0;
drv->minor = sunserial_current_minor;
drv->nr += count;
/* Register the driver on the first call */
if (drv->nr == count)
err = uart_register_driver(drv);
if (err == 0) {
sunserial_current_minor += count;
drv->tty_driver->name_base = drv->minor - 64;
}
return err;
}
EXPORT_SYMBOL(sunserial_register_minors);
void sunserial_unregister_minors(struct uart_driver *drv, int count)
{
drv->nr -= count;
sunserial_current_minor -= count;
if (drv->nr == 0)
uart_unregister_driver(drv);
}
EXPORT_SYMBOL(sunserial_unregister_minors);
int sunserial_console_match(struct console *con, struct device_node *dp,
struct uart_driver *drv, int line, bool ignore_line)
{
if (!con)
return 0;
drv->cons = con;
if (of_console_device != dp)
return 0;
if (!ignore_line) {
int off = 0;
if (of_console_options &&
*of_console_options == 'b')
off = 1;
if ((line & 1) != off)
return 0;
}
if (!console_set_on_cmdline) {
con->index = line;
add_preferred_console(con->name, line, NULL);
}
return 1;
}
EXPORT_SYMBOL(sunserial_console_match);
void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
{
const char *mode, *s;
char mode_prop[] = "ttyX-mode";
int baud, bits, stop, cflag;
char parity;
if (of_node_name_eq(uart_dp, "rsc") ||
of_node_name_eq(uart_dp, "rsc-console") ||
of_node_name_eq(uart_dp, "rsc-control")) {
mode = of_get_property(uart_dp,
"ssp-console-modes", NULL);
if (!mode)
mode = "115200,8,n,1,-";
} else if (of_node_name_eq(uart_dp, "lom-console")) {
mode = "9600,8,n,1,-";
} else {
struct device_node *dp;
char c;
c = 'a';
if (of_console_options)
c = *of_console_options;
mode_prop[3] = c;
dp = of_find_node_by_path("/options");
mode = of_get_property(dp, mode_prop, NULL);
if (!mode)
mode = "9600,8,n,1,-";
of_node_put(dp);
}
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
baud = simple_strtoul(s, NULL, 0);
s = strchr(s, ',');
bits = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
parity = *(++s);
s = strchr(s, ',');
stop = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
/* XXX handshake is not handled here. */
switch (baud) {
case 150: cflag |= B150; break;
case 300: cflag |= B300; break;
case 600: cflag |= B600; break;
case 1200: cflag |= B1200; break;
case 2400: cflag |= B2400; break;
case 4800: cflag |= B4800; break;
case 9600: cflag |= B9600; break;
case 19200: cflag |= B19200; break;
case 38400: cflag |= B38400; break;
case 57600: cflag |= B57600; break;
case 115200: cflag |= B115200; break;
case 230400: cflag |= B230400; break;
case 460800: cflag |= B460800; break;
default: baud = 9600; cflag |= B9600; break;
}
switch (bits) {
case 5: cflag |= CS5; break;
case 6: cflag |= CS6; break;
case 7: cflag |= CS7; break;
case 8: cflag |= CS8; break;
default: cflag |= CS8; break;
}
switch (parity) {
case 'o': cflag |= (PARENB | PARODD); break;
case 'e': cflag |= PARENB; break;
case 'n': default: break;
}
switch (stop) {
case 2: cflag |= CSTOPB; break;
case 1: default: break;
}
con->cflag = cflag;
}
/* Sun serial MOUSE auto baud rate detection. */
static struct mouse_baud_cflag {
int baud;
unsigned int cflag;
} mouse_baud_table[] = {
{ 1200, B1200 },
{ 2400, B2400 },
{ 4800, B4800 },
{ 9600, B9600 },
{ -1, ~0 },
{ -1, ~0 },
};
unsigned int suncore_mouse_baud_cflag_next(unsigned int cflag, int *new_baud)
{
int i;
for (i = 0; mouse_baud_table[i].baud != -1; i++)
if (mouse_baud_table[i].cflag == (cflag & CBAUD))
break;
i += 1;
if (mouse_baud_table[i].baud == -1)
i = 0;
*new_baud = mouse_baud_table[i].baud;
return mouse_baud_table[i].cflag;
}
EXPORT_SYMBOL(suncore_mouse_baud_cflag_next);
/* Basically, when the baud rate is wrong the mouse spits out
* breaks to us.
*/
int suncore_mouse_baud_detection(unsigned char ch, int is_break)
{
static int mouse_got_break = 0;
static int ctr = 0;
if (is_break) {
/* Let a few normal bytes go by before we jump the gun
* and say we need to try another baud rate.
*/
if (mouse_got_break && ctr < 8)
return 1;
/* Ok, we need to try another baud. */
ctr = 0;
mouse_got_break = 1;
return 2;
}
if (mouse_got_break) {
ctr++;
if (ch == 0x87) {
/* Correct baud rate determined. */
mouse_got_break = 0;
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(suncore_mouse_baud_detection);
static int __init suncore_init(void)
{
return 0;
}
device_initcall(suncore_init);
#if 0 /* ..def MODULE ; never supported as such */
MODULE_AUTHOR("Eddie C. Dost, David S. Miller");
MODULE_DESCRIPTION("Sun serial common layer");
MODULE_LICENSE("GPL");
#endif
| linux-master | drivers/tty/serial/suncore.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMBA serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This is a generic driver for ARM AMBA-type serial ports. They
* have a lot of 16550-like features, but are not register compatible.
* Note that although they do have CTS, DCD and DSR inputs, they do
* not have an RI input, nor do they have DTR or RTS outputs. If
* required, these have to be supplied via some other means (eg, GPIO)
* and hooked into this driver.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/amba/bus.h>
#include <linux/amba/serial.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#define UART_NR 8
#define SERIAL_AMBA_MAJOR 204
#define SERIAL_AMBA_MINOR 16
#define SERIAL_AMBA_NR UART_NR
#define AMBA_ISR_PASS_LIMIT 256
#define UART_RX_DATA(s) (((s) & UART01x_FR_RXFE) == 0)
#define UART_TX_READY(s) (((s) & UART01x_FR_TXFF) == 0)
#define UART_DUMMY_RSR_RX 256
#define UART_PORT_SIZE 64
/*
* We wrap our port structure around the generic uart_port.
*/
struct uart_amba_port {
struct uart_port port;
struct clk *clk;
struct amba_device *dev;
struct amba_pl010_data *data;
unsigned int old_status;
};
static void pl010_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
cr &= ~UART010_CR_TIE;
writel(cr, uap->port.membase + UART010_CR);
}
static void pl010_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
cr |= UART010_CR_TIE;
writel(cr, uap->port.membase + UART010_CR);
}
static void pl010_stop_rx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
cr &= ~(UART010_CR_RIE | UART010_CR_RTIE);
writel(cr, uap->port.membase + UART010_CR);
}
static void pl010_disable_ms(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
cr &= ~UART010_CR_MSIE;
writel(cr, uap->port.membase + UART010_CR);
}
static void pl010_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
cr |= UART010_CR_MSIE;
writel(cr, uap->port.membase + UART010_CR);
}
static void pl010_rx_chars(struct uart_port *port)
{
unsigned int status, rsr, max_count = 256;
u8 ch, flag;
status = readb(port->membase + UART01x_FR);
while (UART_RX_DATA(status) && max_count--) {
ch = readb(port->membase + UART01x_DR);
flag = TTY_NORMAL;
port->icount.rx++;
/*
* Note that the error handling code is
* out of the main execution path
*/
rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX;
if (unlikely(rsr & UART01x_RSR_ANY)) {
writel(0, port->membase + UART01x_ECR);
if (rsr & UART01x_RSR_BE) {
rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE);
port->icount.brk++;
if (uart_handle_break(port))
goto ignore_char;
} else if (rsr & UART01x_RSR_PE)
port->icount.parity++;
else if (rsr & UART01x_RSR_FE)
port->icount.frame++;
if (rsr & UART01x_RSR_OE)
port->icount.overrun++;
rsr &= port->read_status_mask;
if (rsr & UART01x_RSR_BE)
flag = TTY_BREAK;
else if (rsr & UART01x_RSR_PE)
flag = TTY_PARITY;
else if (rsr & UART01x_RSR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, rsr, UART01x_RSR_OE, ch, flag);
ignore_char:
status = readb(port->membase + UART01x_FR);
}
tty_flip_buffer_push(&port->state->port);
}
static void pl010_tx_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx_limited(port, ch, port->fifosize >> 1,
true,
writel(ch, port->membase + UART01x_DR),
({}));
}
static void pl010_modem_status(struct uart_amba_port *uap)
{
struct uart_port *port = &uap->port;
unsigned int status, delta;
writel(0, port->membase + UART010_ICR);
status = readb(port->membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
delta = status ^ uap->old_status;
uap->old_status = status;
if (!delta)
return;
if (delta & UART01x_FR_DCD)
uart_handle_dcd_change(port, status & UART01x_FR_DCD);
if (delta & UART01x_FR_DSR)
port->icount.dsr++;
if (delta & UART01x_FR_CTS)
uart_handle_cts_change(port, status & UART01x_FR_CTS);
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
static irqreturn_t pl010_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
struct uart_port *port = &uap->port;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
spin_lock(&port->lock);
status = readb(port->membase + UART010_IIR);
if (status) {
do {
if (status & (UART010_IIR_RTIS | UART010_IIR_RIS))
pl010_rx_chars(port);
if (status & UART010_IIR_MIS)
pl010_modem_status(uap);
if (status & UART010_IIR_TIS)
pl010_tx_chars(port);
if (pass_counter-- == 0)
break;
status = readb(port->membase + UART010_IIR);
} while (status & (UART010_IIR_RTIS | UART010_IIR_RIS |
UART010_IIR_TIS));
handled = 1;
}
spin_unlock(&port->lock);
return IRQ_RETVAL(handled);
}
static unsigned int pl010_tx_empty(struct uart_port *port)
{
unsigned int status = readb(port->membase + UART01x_FR);
return status & UART01x_FR_BUSY ? 0 : TIOCSER_TEMT;
}
static unsigned int pl010_get_mctrl(struct uart_port *port)
{
unsigned int result = 0;
unsigned int status;
status = readb(port->membase + UART01x_FR);
if (status & UART01x_FR_DCD)
result |= TIOCM_CAR;
if (status & UART01x_FR_DSR)
result |= TIOCM_DSR;
if (status & UART01x_FR_CTS)
result |= TIOCM_CTS;
return result;
}
static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
if (uap->data)
uap->data->set_mctrl(uap->dev, port->membase, mctrl);
}
static void pl010_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
unsigned int lcr_h;
spin_lock_irqsave(&port->lock, flags);
lcr_h = readb(port->membase + UART010_LCRH);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
writel(lcr_h, port->membase + UART010_LCRH);
spin_unlock_irqrestore(&port->lock, flags);
}
static int pl010_startup(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
int retval;
/*
* Try to enable the clock producer.
*/
retval = clk_prepare_enable(uap->clk);
if (retval)
goto out;
port->uartclk = clk_get_rate(uap->clk);
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, pl010_int, 0, "uart-pl010", uap);
if (retval)
goto clk_dis;
/*
* initialise the old status of the modem signals
*/
uap->old_status = readb(port->membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
/*
* Finally, enable interrupts
*/
writel(UART01x_CR_UARTEN | UART010_CR_RIE | UART010_CR_RTIE,
port->membase + UART010_CR);
return 0;
clk_dis:
clk_disable_unprepare(uap->clk);
out:
return retval;
}
static void pl010_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
/*
* Free the interrupt
*/
free_irq(port->irq, uap);
/*
* disable all interrupts, disable the port
*/
writel(0, port->membase + UART010_CR);
/* disable break condition and fifos */
writel(readb(port->membase + UART010_LCRH) &
~(UART01x_LCRH_BRK | UART01x_LCRH_FEN),
port->membase + UART010_LCRH);
/*
* Shut down the clock producer
*/
clk_disable_unprepare(uap->clk);
}
static void
pl010_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned int lcr_h, old_cr;
unsigned long flags;
unsigned int baud, quot;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr_h = UART01x_LCRH_WLEN_5;
break;
case CS6:
lcr_h = UART01x_LCRH_WLEN_6;
break;
case CS7:
lcr_h = UART01x_LCRH_WLEN_7;
break;
default: // CS8
lcr_h = UART01x_LCRH_WLEN_8;
break;
}
if (termios->c_cflag & CSTOPB)
lcr_h |= UART01x_LCRH_STP2;
if (termios->c_cflag & PARENB) {
lcr_h |= UART01x_LCRH_PEN;
if (!(termios->c_cflag & PARODD))
lcr_h |= UART01x_LCRH_EPS;
}
if (port->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
spin_lock_irqsave(&port->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = UART01x_RSR_OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART01x_RSR_BE;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= UART01x_RSR_BE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART01x_RSR_OE;
}
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_DUMMY_RSR_RX;
old_cr = readb(port->membase + UART010_CR) & ~UART010_CR_MSIE;
if (UART_ENABLE_MS(port, termios->c_cflag))
old_cr |= UART010_CR_MSIE;
/* Set baud rate */
quot -= 1;
writel((quot & 0xf00) >> 8, port->membase + UART010_LCRM);
writel(quot & 0xff, port->membase + UART010_LCRL);
/*
* ----------v----------v----------v----------v-----
* NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
* ----------^----------^----------^----------^-----
*/
writel(lcr_h, port->membase + UART010_LCRH);
writel(old_cr, port->membase + UART010_CR);
spin_unlock_irqrestore(&port->lock, flags);
}
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
spin_lock_irq(&port->lock);
pl010_enable_ms(port);
spin_unlock_irq(&port->lock);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
spin_lock_irq(&port->lock);
pl010_disable_ms(port);
spin_unlock_irq(&port->lock);
}
}
}
static const char *pl010_type(struct uart_port *port)
{
return port->type == PORT_AMBA ? "AMBA" : NULL;
}
/*
* Release the memory region(s) being used by 'port'
*/
static void pl010_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'
*/
static int pl010_request_port(struct uart_port *port)
{
return request_mem_region(port->mapbase, UART_PORT_SIZE, "uart-pl010")
!= NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void pl010_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_AMBA;
pl010_request_port(port);
}
}
/*
* verify the new serial_struct (for TIOCSSERIAL).
*/
static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
return ret;
}
static const struct uart_ops amba_pl010_pops = {
.tx_empty = pl010_tx_empty,
.set_mctrl = pl010_set_mctrl,
.get_mctrl = pl010_get_mctrl,
.stop_tx = pl010_stop_tx,
.start_tx = pl010_start_tx,
.stop_rx = pl010_stop_rx,
.enable_ms = pl010_enable_ms,
.break_ctl = pl010_break_ctl,
.startup = pl010_startup,
.shutdown = pl010_shutdown,
.set_termios = pl010_set_termios,
.set_ldisc = pl010_set_ldisc,
.type = pl010_type,
.release_port = pl010_release_port,
.request_port = pl010_request_port,
.config_port = pl010_config_port,
.verify_port = pl010_verify_port,
};
static struct uart_amba_port *amba_ports[UART_NR];
#ifdef CONFIG_SERIAL_AMBA_PL010_CONSOLE
static void pl010_console_putchar(struct uart_port *port, unsigned char ch)
{
unsigned int status;
do {
status = readb(port->membase + UART01x_FR);
barrier();
} while (!UART_TX_READY(status));
writel(ch, port->membase + UART01x_DR);
}
static void
pl010_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_amba_port *uap = amba_ports[co->index];
struct uart_port *port = &uap->port;
unsigned int status, old_cr;
clk_enable(uap->clk);
/*
* First save the CR then disable the interrupts
*/
old_cr = readb(port->membase + UART010_CR);
writel(UART01x_CR_UARTEN, port->membase + UART010_CR);
uart_console_write(port, s, count, pl010_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the TCR
*/
do {
status = readb(port->membase + UART01x_FR);
barrier();
} while (status & UART01x_FR_BUSY);
writel(old_cr, port->membase + UART010_CR);
clk_disable(uap->clk);
}
static void __init
pl010_console_get_options(struct uart_amba_port *uap, int *baud,
int *parity, int *bits)
{
if (readb(uap->port.membase + UART010_CR) & UART01x_CR_UARTEN) {
unsigned int lcr_h, quot;
lcr_h = readb(uap->port.membase + UART010_LCRH);
*parity = 'n';
if (lcr_h & UART01x_LCRH_PEN) {
if (lcr_h & UART01x_LCRH_EPS)
*parity = 'e';
else
*parity = 'o';
}
if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
*bits = 7;
else
*bits = 8;
quot = readb(uap->port.membase + UART010_LCRL) |
readb(uap->port.membase + UART010_LCRM) << 8;
*baud = uap->port.uartclk / (16 * (quot + 1));
}
}
static int __init pl010_console_setup(struct console *co, char *options)
{
struct uart_amba_port *uap;
int baud = 38400;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index >= UART_NR)
co->index = 0;
uap = amba_ports[co->index];
if (!uap)
return -ENODEV;
ret = clk_prepare(uap->clk);
if (ret)
return ret;
uap->port.uartclk = clk_get_rate(uap->clk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
pl010_console_get_options(uap, &baud, &parity, &bits);
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAM",
.write = pl010_console_write,
.device = uart_console_device,
.setup = pl010_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &amba_reg,
};
#define AMBA_CONSOLE &amba_console
#else
#define AMBA_CONSOLE NULL
#endif
static DEFINE_MUTEX(amba_reg_lock);
static struct uart_driver amba_reg = {
.owner = THIS_MODULE,
.driver_name = "ttyAM",
.dev_name = "ttyAM",
.major = SERIAL_AMBA_MAJOR,
.minor = SERIAL_AMBA_MINOR,
.nr = UART_NR,
.cons = AMBA_CONSOLE,
};
static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;
int i, ret;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == NULL)
break;
if (i == ARRAY_SIZE(amba_ports))
return -EBUSY;
uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
GFP_KERNEL);
if (!uap)
return -ENOMEM;
base = devm_ioremap(&dev->dev, dev->res.start,
resource_size(&dev->res));
if (!base)
return -ENOMEM;
uap->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk))
return PTR_ERR(uap->clk);
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
uap->port.iotype = UPIO_MEM;
uap->port.irq = dev->irq[0];
uap->port.fifosize = 16;
uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL010_CONSOLE);
uap->port.ops = &amba_pl010_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
uap->dev = dev;
uap->data = dev_get_platdata(&dev->dev);
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
mutex_lock(&amba_reg_lock);
if (!amba_reg.state) {
ret = uart_register_driver(&amba_reg);
if (ret < 0) {
mutex_unlock(&amba_reg_lock);
dev_err(uap->port.dev,
"Failed to register AMBA-PL010 driver\n");
return ret;
}
}
mutex_unlock(&amba_reg_lock);
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret)
amba_ports[i] = NULL;
return ret;
}
static void pl010_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
bool busy = false;
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == uap)
amba_ports[i] = NULL;
else if (amba_ports[i])
busy = true;
if (!busy)
uart_unregister_driver(&amba_reg);
}
#ifdef CONFIG_PM_SLEEP
static int pl010_suspend(struct device *dev)
{
struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_suspend_port(&amba_reg, &uap->port);
return 0;
}
static int pl010_resume(struct device *dev)
{
struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_resume_port(&amba_reg, &uap->port);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pl010_dev_pm_ops, pl010_suspend, pl010_resume);
static const struct amba_id pl010_ids[] = {
{
.id = 0x00041010,
.mask = 0x000fffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl010_ids);
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
.pm = &pl010_dev_pm_ops,
},
.id_table = pl010_ids,
.probe = pl010_probe,
.remove = pl010_remove,
};
static int __init pl010_init(void)
{
printk(KERN_INFO "Serial: AMBA driver\n");
return amba_driver_register(&pl010_driver);
}
static void __exit pl010_exit(void)
{
amba_driver_unregister(&pl010_driver);
}
module_init(pl010_init);
module_exit(pl010_exit);
MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
MODULE_DESCRIPTION("ARM AMBA serial port driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/amba-pl010.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Zilog serial chips found on SGI workstations and
* servers. This driver could actually be made more generic.
*
* This is based on the drivers/serial/sunzilog.c code as of 2.6.0-test7 and the
* old drivers/sgi/char/sgiserial.c code which itself is based of the original
* drivers/sbus/char/zs.c code. A lot of code has been simply moved over
* directly from there but much has been rewritten. Credits therefore go out
* to David S. Miller, Eddie C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell
* for their work there.
*
* Copyright (C) 2002 Ralf Baechle ([email protected])
* Copyright (C) 2002 David S. Miller ([email protected])
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/sgialib.h>
#include <asm/sgi/ioc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
#include <linux/serial_core.h>
#include "ip22zilog.h"
/*
* On IP22 we need to delay after register accesses but we do not need to
* flush writes.
*/
#define ZSDELAY() udelay(5)
#define ZSDELAY_LONG() udelay(20)
#define ZS_WSYNC(channel) do { } while (0)
#define NUM_IP22ZILOG 1
#define NUM_CHANNELS (NUM_IP22ZILOG * 2)
#define ZS_CLOCK 3672000 /* Zilog input clock rate. */
#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
/*
* We wrap our port structure around the generic uart_port.
*/
struct uart_ip22zilog_port {
struct uart_port port;
/* IRQ servicing chain. */
struct uart_ip22zilog_port *next;
/* Current values of Zilog write registers. */
unsigned char curregs[NUM_ZSREGS];
unsigned int flags;
#define IP22ZILOG_FLAG_IS_CONS 0x00000004
#define IP22ZILOG_FLAG_IS_KGDB 0x00000008
#define IP22ZILOG_FLAG_MODEM_STATUS 0x00000010
#define IP22ZILOG_FLAG_IS_CHANNEL_A 0x00000020
#define IP22ZILOG_FLAG_REGS_HELD 0x00000040
#define IP22ZILOG_FLAG_TX_STOPPED 0x00000080
#define IP22ZILOG_FLAG_TX_ACTIVE 0x00000100
#define IP22ZILOG_FLAG_RESET_DONE 0x00000200
unsigned int tty_break;
unsigned char parity_mask;
unsigned char prev_status;
};
#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel *)((PORT)->membase))
#define UART_ZILOG(PORT) ((struct uart_ip22zilog_port *)(PORT))
#define IP22ZILOG_GET_CURR_REG(PORT, REGNUM) \
(UART_ZILOG(PORT)->curregs[REGNUM])
#define IP22ZILOG_SET_CURR_REG(PORT, REGNUM, REGVAL) \
((UART_ZILOG(PORT)->curregs[REGNUM]) = (REGVAL))
#define ZS_IS_CONS(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CONS)
#define ZS_IS_KGDB(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_KGDB)
#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & IP22ZILOG_FLAG_MODEM_STATUS)
#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CHANNEL_A)
#define ZS_REGS_HELD(UP) ((UP)->flags & IP22ZILOG_FLAG_REGS_HELD)
#define ZS_TX_STOPPED(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_STOPPED)
#define ZS_TX_ACTIVE(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_ACTIVE)
/* Reading and writing Zilog8530 registers. The delays are to make this
* driver work on the IP22 which needs a settling delay after each chip
* register access, other machines handle this in hardware via auxiliary
* flip-flops which implement the settle time we do in software.
*
* The port lock must be held and local IRQs must be disabled
* when {read,write}_zsreg is invoked.
*/
static unsigned char read_zsreg(struct zilog_channel *channel,
unsigned char reg)
{
unsigned char retval;
writeb(reg, &channel->control);
ZSDELAY();
retval = readb(&channel->control);
ZSDELAY();
return retval;
}
static void write_zsreg(struct zilog_channel *channel,
unsigned char reg, unsigned char value)
{
writeb(reg, &channel->control);
ZSDELAY();
writeb(value, &channel->control);
ZSDELAY();
}
static void ip22zilog_clear_fifo(struct zilog_channel *channel)
{
int i;
for (i = 0; i < 32; i++) {
unsigned char regval;
regval = readb(&channel->control);
ZSDELAY();
if (regval & Rx_CH_AV)
break;
regval = read_zsreg(channel, R1);
readb(&channel->data);
ZSDELAY();
if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) {
writeb(ERR_RES, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
}
}
}
/* This function must only be called when the TX is not busy. The UART
* port lock must be held and local interrupts disabled.
*/
static void __load_zsregs(struct zilog_channel *channel, unsigned char *regs)
{
int i;
/* Let pending transmits finish. */
for (i = 0; i < 1000; i++) {
unsigned char stat = read_zsreg(channel, R1);
if (stat & ALL_SNT)
break;
udelay(100);
}
writeb(ERR_RES, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
ip22zilog_clear_fifo(channel);
/* Disable all interrupts. */
write_zsreg(channel, R1,
regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB));
/* Set parity, sync config, stop bits, and clock divisor. */
write_zsreg(channel, R4, regs[R4]);
/* Set misc. TX/RX control bits. */
write_zsreg(channel, R10, regs[R10]);
/* Set TX/RX controls sans the enable bits. */
write_zsreg(channel, R3, regs[R3] & ~RxENAB);
write_zsreg(channel, R5, regs[R5] & ~TxENAB);
/* Synchronous mode config. */
write_zsreg(channel, R6, regs[R6]);
write_zsreg(channel, R7, regs[R7]);
/* Don't mess with the interrupt vector (R2, unused by us) and
* master interrupt control (R9). We make sure this is setup
* properly at probe time then never touch it again.
*/
/* Disable baud generator. */
write_zsreg(channel, R14, regs[R14] & ~BRENAB);
/* Clock mode control. */
write_zsreg(channel, R11, regs[R11]);
/* Lower and upper byte of baud rate generator divisor. */
write_zsreg(channel, R12, regs[R12]);
write_zsreg(channel, R13, regs[R13]);
/* Now rewrite R14, with BRENAB (if set). */
write_zsreg(channel, R14, regs[R14]);
/* External status interrupt control. */
write_zsreg(channel, R15, regs[R15]);
/* Reset external status interrupts. */
write_zsreg(channel, R0, RES_EXT_INT);
write_zsreg(channel, R0, RES_EXT_INT);
/* Rewrite R3/R5, this time without enables masked. */
write_zsreg(channel, R3, regs[R3]);
write_zsreg(channel, R5, regs[R5]);
/* Rewrite R1, this time without IRQ enabled masked. */
write_zsreg(channel, R1, regs[R1]);
}
/* Reprogram the Zilog channel HW registers with the copies found in the
* software state struct. If the transmitter is busy, we defer this update
* until the next TX complete interrupt. Else, we do it right now.
*
* The UART port lock must be held and local interrupts disabled.
*/
static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
if (!ZS_REGS_HELD(up)) {
if (ZS_TX_ACTIVE(up)) {
up->flags |= IP22ZILOG_FLAG_REGS_HELD;
} else {
__load_zsregs(channel, up->curregs);
}
}
}
#define Rx_BRK 0x0100 /* BREAK event software flag. */
#define Rx_SYS 0x0200 /* SysRq event software flag. */
static bool ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
unsigned int r1;
u8 ch, flag;
bool push = up->port.state != NULL;
for (;;) {
ch = readb(&channel->control);
ZSDELAY();
if (!(ch & Rx_CH_AV))
break;
r1 = read_zsreg(channel, R1);
if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
writeb(ERR_RES, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
}
ch = readb(&channel->data);
ZSDELAY();
ch &= up->parity_mask;
/* Handle the null char got when BREAK is removed. */
if (!ch)
r1 |= up->tty_break;
/* A real serial line, record the character and status. */
flag = TTY_NORMAL;
up->port.icount.rx++;
if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | Rx_SYS | Rx_BRK)) {
up->tty_break = 0;
if (r1 & (Rx_SYS | Rx_BRK)) {
up->port.icount.brk++;
if (r1 & Rx_SYS)
continue;
r1 &= ~(PAR_ERR | CRC_ERR);
}
else if (r1 & PAR_ERR)
up->port.icount.parity++;
else if (r1 & CRC_ERR)
up->port.icount.frame++;
if (r1 & Rx_OVR)
up->port.icount.overrun++;
r1 &= up->port.read_status_mask;
if (r1 & Rx_BRK)
flag = TTY_BREAK;
else if (r1 & PAR_ERR)
flag = TTY_PARITY;
else if (r1 & CRC_ERR)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch))
continue;
if (push)
uart_insert_char(&up->port, r1, Rx_OVR, ch, flag);
}
return push;
}
static void ip22zilog_status_handle(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
unsigned char status;
status = readb(&channel->control);
ZSDELAY();
writeb(RES_EXT_INT, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (up->curregs[R15] & BRKIE) {
if ((status & BRK_ABRT) && !(up->prev_status & BRK_ABRT)) {
if (uart_handle_break(&up->port))
up->tty_break = Rx_SYS;
else
up->tty_break = Rx_BRK;
}
}
if (ZS_WANTS_MODEM_STATUS(up)) {
if (status & SYNC)
up->port.icount.dsr++;
/* The Zilog just gives us an interrupt when DCD/CTS/etc. change.
* But it does not tell us which bit has changed, we have to keep
* track of this ourselves.
*/
if ((status ^ up->prev_status) ^ DCD)
uart_handle_dcd_change(&up->port,
(status & DCD));
if ((status ^ up->prev_status) ^ CTS)
uart_handle_cts_change(&up->port,
(status & CTS));
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
up->prev_status = status;
}
static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
struct circ_buf *xmit;
if (ZS_IS_CONS(up)) {
unsigned char status = readb(&channel->control);
ZSDELAY();
/* TX still busy? Just wait for the next TX done interrupt.
*
* It can occur because of how we do serial console writes. It would
* be nice to transmit console writes just like we normally would for
* a TTY line. (ie. buffered and TX interrupt driven). That is not
* easy because console writes cannot sleep. One solution might be
* to poll on enough port->xmit space becoming free. -DaveM
*/
if (!(status & Tx_BUF_EMP))
return;
}
up->flags &= ~IP22ZILOG_FLAG_TX_ACTIVE;
if (ZS_REGS_HELD(up)) {
__load_zsregs(channel, up->curregs);
up->flags &= ~IP22ZILOG_FLAG_REGS_HELD;
}
if (ZS_TX_STOPPED(up)) {
up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED;
goto ack_tx_int;
}
if (up->port.x_char) {
up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
writeb(up->port.x_char, &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (up->port.state == NULL)
goto ack_tx_int;
xmit = &up->port.state->xmit;
if (uart_circ_empty(xmit))
goto ack_tx_int;
if (uart_tx_stopped(&up->port))
goto ack_tx_int;
up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
uart_xmit_advance(&up->port, 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
return;
ack_tx_int:
writeb(RES_Tx_P, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
}
static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
{
struct uart_ip22zilog_port *up = dev_id;
while (up) {
struct zilog_channel *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
unsigned char r3;
bool push = false;
spin_lock(&up->port.lock);
r3 = read_zsreg(channel, R3);
/* Channel A */
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHARxIP)
push = ip22zilog_receive_chars(up, channel);
if (r3 & CHAEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHATxIP)
ip22zilog_transmit_chars(up, channel);
}
spin_unlock(&up->port.lock);
if (push)
tty_flip_buffer_push(&up->port.state->port);
/* Channel B */
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
push = false;
spin_lock(&up->port.lock);
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
ZS_WSYNC(channel);
if (r3 & CHBRxIP)
push = ip22zilog_receive_chars(up, channel);
if (r3 & CHBEXT)
ip22zilog_status_handle(up, channel);
if (r3 & CHBTxIP)
ip22zilog_transmit_chars(up, channel);
}
spin_unlock(&up->port.lock);
if (push)
tty_flip_buffer_push(&up->port.state->port);
up = up->next;
}
return IRQ_HANDLED;
}
/* A convenient way to quickly get R0 status. The caller must _not_ hold the
* port lock, it is acquired here.
*/
static __inline__ unsigned char ip22zilog_read_channel_status(struct uart_port *port)
{
struct zilog_channel *channel;
unsigned char status;
channel = ZILOG_CHANNEL_FROM_PORT(port);
status = readb(&channel->control);
ZSDELAY();
return status;
}
/* The port lock is not held. */
static unsigned int ip22zilog_tx_empty(struct uart_port *port)
{
unsigned long flags;
unsigned char status;
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
status = ip22zilog_read_channel_status(port);
spin_unlock_irqrestore(&port->lock, flags);
if (status & Tx_BUF_EMP)
ret = TIOCSER_TEMT;
else
ret = 0;
return ret;
}
/* The port lock is held and interrupts are disabled. */
static unsigned int ip22zilog_get_mctrl(struct uart_port *port)
{
unsigned char status;
unsigned int ret;
status = ip22zilog_read_channel_status(port);
ret = 0;
if (status & DCD)
ret |= TIOCM_CAR;
if (status & SYNC)
ret |= TIOCM_DSR;
if (status & CTS)
ret |= TIOCM_CTS;
return ret;
}
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits;
set_bits = clear_bits = 0;
if (mctrl & TIOCM_RTS)
set_bits |= RTS;
else
clear_bits |= RTS;
if (mctrl & TIOCM_DTR)
set_bits |= DTR;
else
clear_bits |= DTR;
/* NOTE: Not subject to 'transmitter active' rule. */
up->curregs[R5] |= set_bits;
up->curregs[R5] &= ~clear_bits;
write_zsreg(channel, R5, up->curregs[R5]);
}
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_stop_tx(struct uart_port *port)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
up->flags |= IP22ZILOG_FLAG_TX_STOPPED;
}
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_start_tx(struct uart_port *port)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char status;
up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED;
status = readb(&channel->control);
ZSDELAY();
/* TX busy? Just wait for the TX done interrupt. */
if (!(status & Tx_BUF_EMP))
return;
/* Send the first character to jump-start the TX done
* IRQ sending engine.
*/
if (port->x_char) {
writeb(port->x_char, &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
port->icount.tx++;
port->x_char = 0;
} else {
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
return;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
uart_xmit_advance(port, 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
}
}
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_stop_rx(struct uart_port *port)
{
struct uart_ip22zilog_port *up = UART_ZILOG(port);
struct zilog_channel *channel;
if (ZS_IS_CONS(up))
return;
channel = ZILOG_CHANNEL_FROM_PORT(port);
/* Disable all RX interrupts. */
up->curregs[R1] &= ~RxINT_MASK;
ip22zilog_maybe_update_regs(up, channel);
}
/* The port lock is held. */
static void ip22zilog_enable_ms(struct uart_port *port)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char new_reg;
new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
if (new_reg != up->curregs[R15]) {
up->curregs[R15] = new_reg;
/* NOTE: Not subject to 'transmitter active' rule. */
write_zsreg(channel, R15, up->curregs[R15]);
}
}
/* The port lock is not held. */
static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
set_bits = clear_bits = 0;
if (break_state)
set_bits |= SND_BRK;
else
clear_bits |= SND_BRK;
spin_lock_irqsave(&port->lock, flags);
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != up->curregs[R5]) {
up->curregs[R5] = new_reg;
/* NOTE: Not subject to 'transmitter active' rule. */
write_zsreg(channel, R5, up->curregs[R5]);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
{
struct zilog_channel *channel;
int i;
if (up->flags & IP22ZILOG_FLAG_RESET_DONE)
return;
/* Let pending transmits finish. */
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
for (i = 0; i < 1000; i++) {
unsigned char stat = read_zsreg(channel, R1);
if (stat & ALL_SNT)
break;
udelay(100);
}
if (!ZS_IS_CHANNEL_A(up)) {
up++;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
}
write_zsreg(channel, R9, FHWRES);
ZSDELAY_LONG();
(void) read_zsreg(channel, R0);
up->flags |= IP22ZILOG_FLAG_RESET_DONE;
up->next->flags |= IP22ZILOG_FLAG_RESET_DONE;
}
static void __ip22zilog_startup(struct uart_ip22zilog_port *up)
{
struct zilog_channel *channel;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
__ip22zilog_reset(up);
__load_zsregs(channel, up->curregs);
/* set master interrupt enable */
write_zsreg(channel, R9, up->curregs[R9]);
up->prev_status = readb(&channel->control);
/* Enable receiver and transmitter. */
up->curregs[R3] |= RxENAB;
up->curregs[R5] |= TxENAB;
up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
ip22zilog_maybe_update_regs(up, channel);
}
static int ip22zilog_startup(struct uart_port *port)
{
struct uart_ip22zilog_port *up = UART_ZILOG(port);
unsigned long flags;
if (ZS_IS_CONS(up))
return 0;
spin_lock_irqsave(&port->lock, flags);
__ip22zilog_startup(up);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
/*
* The test for ZS_IS_CONS is explained by the following e-mail:
*****
* From: Russell King <[email protected]>
* Date: Sun, 8 Dec 2002 10:18:38 +0000
*
* On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote:
* > I boot my 2.5 boxes using "console=ttyS0,9600" argument,
* > and I noticed that something is not right with reference
* > counting in this case. It seems that when the console
* > is open by kernel initially, this is not accounted
* > as an open, and uart_startup is not called.
*
* That is correct. We are unable to call uart_startup when the serial
* console is initialised because it may need to allocate memory (as
* request_irq does) and the memory allocators may not have been
* initialised.
*
* 1. initialise the port into a state where it can send characters in the
* console write method.
*
* 2. don't do the actual hardware shutdown in your shutdown() method (but
* do the normal software shutdown - ie, free irqs etc)
*****
*/
static void ip22zilog_shutdown(struct uart_port *port)
{
struct uart_ip22zilog_port *up = UART_ZILOG(port);
struct zilog_channel *channel;
unsigned long flags;
if (ZS_IS_CONS(up))
return;
spin_lock_irqsave(&port->lock, flags);
channel = ZILOG_CHANNEL_FROM_PORT(port);
/* Disable receiver and transmitter. */
up->curregs[R3] &= ~RxENAB;
up->curregs[R5] &= ~TxENAB;
/* Disable all interrupts and BRK assertion. */
up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
up->curregs[R5] &= ~SND_BRK;
ip22zilog_maybe_update_regs(up, channel);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
* and local interrupts are disabled.
*/
static void
ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
unsigned int iflag, int brg)
{
up->curregs[R10] = NRZ;
up->curregs[R11] = TCBR | RCBR;
/* Program BAUD and clock source. */
up->curregs[R4] &= ~XCLK_MASK;
up->curregs[R4] |= X16CLK;
up->curregs[R12] = brg & 0xff;
up->curregs[R13] = (brg >> 8) & 0xff;
up->curregs[R14] = BRENAB;
/* Character size, stop bits, and parity. */
up->curregs[3] &= ~RxN_MASK;
up->curregs[5] &= ~TxN_MASK;
switch (cflag & CSIZE) {
case CS5:
up->curregs[3] |= Rx5;
up->curregs[5] |= Tx5;
up->parity_mask = 0x1f;
break;
case CS6:
up->curregs[3] |= Rx6;
up->curregs[5] |= Tx6;
up->parity_mask = 0x3f;
break;
case CS7:
up->curregs[3] |= Rx7;
up->curregs[5] |= Tx7;
up->parity_mask = 0x7f;
break;
case CS8:
default:
up->curregs[3] |= Rx8;
up->curregs[5] |= Tx8;
up->parity_mask = 0xff;
break;
}
up->curregs[4] &= ~0x0c;
if (cflag & CSTOPB)
up->curregs[4] |= SB2;
else
up->curregs[4] |= SB1;
if (cflag & PARENB)
up->curregs[4] |= PAR_ENAB;
else
up->curregs[4] &= ~PAR_ENAB;
if (!(cflag & PARODD))
up->curregs[4] |= PAR_EVEN;
else
up->curregs[4] &= ~PAR_EVEN;
up->port.read_status_mask = Rx_OVR;
if (iflag & INPCK)
up->port.read_status_mask |= CRC_ERR | PAR_ERR;
if (iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= BRK_ABRT;
up->port.ignore_status_mask = 0;
if (iflag & IGNPAR)
up->port.ignore_status_mask |= CRC_ERR | PAR_ERR;
if (iflag & IGNBRK) {
up->port.ignore_status_mask |= BRK_ABRT;
if (iflag & IGNPAR)
up->port.ignore_status_mask |= Rx_OVR;
}
if ((cflag & CREAD) == 0)
up->port.ignore_status_mask = 0xff;
}
/* The port lock is not held. */
static void
ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
unsigned long flags;
int baud, brg;
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
spin_lock_irqsave(&up->port.lock, flags);
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
ip22zilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg);
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->flags |= IP22ZILOG_FLAG_MODEM_STATUS;
else
up->flags &= ~IP22ZILOG_FLAG_MODEM_STATUS;
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static const char *ip22zilog_type(struct uart_port *port)
{
return "IP22-Zilog";
}
/* We do not request/release mappings of the registers here, this
* happens at early serial probe time.
*/
static void ip22zilog_release_port(struct uart_port *port)
{
}
static int ip22zilog_request_port(struct uart_port *port)
{
return 0;
}
/* These do not need to do anything interesting either. */
static void ip22zilog_config_port(struct uart_port *port, int flags)
{
}
/* We do not support letting the user mess with the divisor, IRQ, etc. */
static int ip22zilog_verify_port(struct uart_port *port, struct serial_struct *ser)
{
return -EINVAL;
}
static const struct uart_ops ip22zilog_pops = {
.tx_empty = ip22zilog_tx_empty,
.set_mctrl = ip22zilog_set_mctrl,
.get_mctrl = ip22zilog_get_mctrl,
.stop_tx = ip22zilog_stop_tx,
.start_tx = ip22zilog_start_tx,
.stop_rx = ip22zilog_stop_rx,
.enable_ms = ip22zilog_enable_ms,
.break_ctl = ip22zilog_break_ctl,
.startup = ip22zilog_startup,
.shutdown = ip22zilog_shutdown,
.set_termios = ip22zilog_set_termios,
.type = ip22zilog_type,
.release_port = ip22zilog_release_port,
.request_port = ip22zilog_request_port,
.config_port = ip22zilog_config_port,
.verify_port = ip22zilog_verify_port,
};
static struct uart_ip22zilog_port *ip22zilog_port_table;
static struct zilog_layout **ip22zilog_chip_regs;
static struct uart_ip22zilog_port *ip22zilog_irq_chain;
static int zilog_irq = -1;
static void * __init alloc_one_table(unsigned long size)
{
return kzalloc(size, GFP_KERNEL);
}
static void __init ip22zilog_alloc_tables(void)
{
ip22zilog_port_table = (struct uart_ip22zilog_port *)
alloc_one_table(NUM_CHANNELS * sizeof(struct uart_ip22zilog_port));
ip22zilog_chip_regs = (struct zilog_layout **)
alloc_one_table(NUM_IP22ZILOG * sizeof(struct zilog_layout *));
if (ip22zilog_port_table == NULL || ip22zilog_chip_regs == NULL) {
panic("IP22-Zilog: Cannot allocate IP22-Zilog tables.");
}
}
/* Get the address of the registers for IP22-Zilog instance CHIP. */
static struct zilog_layout * __init get_zs(int chip)
{
unsigned long base;
if (chip < 0 || chip >= NUM_IP22ZILOG) {
panic("IP22-Zilog: Illegal chip number %d in get_zs.", chip);
}
/* Not probe-able, hard code it. */
base = (unsigned long) &sgioc->uart;
zilog_irq = SGI_SERIAL_IRQ;
request_mem_region(base, 8, "IP22-Zilog");
return (struct zilog_layout *) base;
}
#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */
#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE
static void ip22zilog_put_char(struct uart_port *port, unsigned char ch)
{
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
int loops = ZS_PUT_CHAR_MAX_DELAY;
/* This is a timed polling loop so do not switch the explicit
* udelay with ZSDELAY as that is a NOP on some platforms. -DaveM
*/
do {
unsigned char val = readb(&channel->control);
if (val & Tx_BUF_EMP) {
ZSDELAY();
break;
}
udelay(5);
} while (--loops);
writeb(ch, &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
}
static void
ip22zilog_console_write(struct console *con, const char *s, unsigned int count)
{
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, ip22zilog_put_char);
udelay(2);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init ip22zilog_console_setup(struct console *con, char *options)
{
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
unsigned long flags;
int baud = 9600, bits = 8;
int parity = 'n';
int flow = 'n';
up->flags |= IP22ZILOG_FLAG_IS_CONS;
printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
spin_lock_irqsave(&up->port.lock, flags);
up->curregs[R15] |= BRKIE;
__ip22zilog_startup(up);
spin_unlock_irqrestore(&up->port.lock, flags);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, con, baud, parity, bits, flow);
}
static struct uart_driver ip22zilog_reg;
static struct console ip22zilog_console = {
.name = "ttyS",
.write = ip22zilog_console_write,
.device = uart_console_device,
.setup = ip22zilog_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &ip22zilog_reg,
};
#endif /* CONFIG_SERIAL_IP22_ZILOG_CONSOLE */
static struct uart_driver ip22zilog_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = NUM_CHANNELS,
#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE
.cons = &ip22zilog_console,
#endif
};
static void __init ip22zilog_prepare(void)
{
unsigned char sysrq_on = IS_ENABLED(CONFIG_SERIAL_IP22_ZILOG_CONSOLE);
struct uart_ip22zilog_port *up;
struct zilog_layout *rp;
int channel, chip;
/*
* Temporary fix.
*/
for (channel = 0; channel < NUM_CHANNELS; channel++)
spin_lock_init(&ip22zilog_port_table[channel].port.lock);
ip22zilog_irq_chain = &ip22zilog_port_table[NUM_CHANNELS - 1];
up = &ip22zilog_port_table[0];
for (channel = NUM_CHANNELS - 1 ; channel > 0; channel--)
up[channel].next = &up[channel - 1];
up[channel].next = NULL;
for (chip = 0; chip < NUM_IP22ZILOG; chip++) {
if (!ip22zilog_chip_regs[chip]) {
ip22zilog_chip_regs[chip] = rp = get_zs(chip);
up[(chip * 2) + 0].port.membase = (char *) &rp->channelB;
up[(chip * 2) + 1].port.membase = (char *) &rp->channelA;
/* In theory mapbase is the physical address ... */
up[(chip * 2) + 0].port.mapbase =
(unsigned long) ioremap((unsigned long) &rp->channelB, 8);
up[(chip * 2) + 1].port.mapbase =
(unsigned long) ioremap((unsigned long) &rp->channelA, 8);
}
/* Channel A */
up[(chip * 2) + 0].port.iotype = UPIO_MEM;
up[(chip * 2) + 0].port.irq = zilog_irq;
up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 0].port.fifosize = 1;
up[(chip * 2) + 0].port.has_sysrq = sysrq_on;
up[(chip * 2) + 0].port.ops = &ip22zilog_pops;
up[(chip * 2) + 0].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 0].port.flags = 0;
up[(chip * 2) + 0].port.line = (chip * 2) + 0;
up[(chip * 2) + 0].flags = 0;
/* Channel B */
up[(chip * 2) + 1].port.iotype = UPIO_MEM;
up[(chip * 2) + 1].port.irq = zilog_irq;
up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 1].port.fifosize = 1;
up[(chip * 2) + 1].port.has_sysrq = sysrq_on;
up[(chip * 2) + 1].port.ops = &ip22zilog_pops;
up[(chip * 2) + 1].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 1].port.line = (chip * 2) + 1;
up[(chip * 2) + 1].flags |= IP22ZILOG_FLAG_IS_CHANNEL_A;
}
for (channel = 0; channel < NUM_CHANNELS; channel++) {
struct uart_ip22zilog_port *up = &ip22zilog_port_table[channel];
int brg;
/* Normal serial TTY. */
up->parity_mask = 0xff;
up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
up->curregs[R3] = RxENAB | Rx8;
up->curregs[R5] = TxENAB | Tx8;
up->curregs[R9] = NV | MIE;
up->curregs[R10] = NRZ;
up->curregs[R11] = TCBR | RCBR;
brg = BPS_TO_BRG(9600, ZS_CLOCK / ZS_CLOCK_DIVISOR);
up->curregs[R12] = (brg & 0xff);
up->curregs[R13] = (brg >> 8) & 0xff;
up->curregs[R14] = BRENAB;
}
}
static int __init ip22zilog_ports_init(void)
{
int ret;
printk(KERN_INFO "Serial: IP22 Zilog driver (%d chips).\n", NUM_IP22ZILOG);
ip22zilog_prepare();
if (request_irq(zilog_irq, ip22zilog_interrupt, 0,
"IP22-Zilog", ip22zilog_irq_chain)) {
panic("IP22-Zilog: Unable to register zs interrupt handler.\n");
}
ret = uart_register_driver(&ip22zilog_reg);
if (ret == 0) {
int i;
for (i = 0; i < NUM_CHANNELS; i++) {
struct uart_ip22zilog_port *up = &ip22zilog_port_table[i];
uart_add_one_port(&ip22zilog_reg, &up->port);
}
}
return ret;
}
static int __init ip22zilog_init(void)
{
/* IP22 Zilog setup is hard coded, no probing to do. */
ip22zilog_alloc_tables();
ip22zilog_ports_init();
return 0;
}
static void __exit ip22zilog_exit(void)
{
int i;
struct uart_ip22zilog_port *up;
for (i = 0; i < NUM_CHANNELS; i++) {
up = &ip22zilog_port_table[i];
uart_remove_one_port(&ip22zilog_reg, &up->port);
}
/* Free IO mem */
up = &ip22zilog_port_table[0];
for (i = 0; i < NUM_IP22ZILOG; i++) {
if (up[(i * 2) + 0].port.mapbase) {
iounmap((void*)up[(i * 2) + 0].port.mapbase);
up[(i * 2) + 0].port.mapbase = 0;
}
if (up[(i * 2) + 1].port.mapbase) {
iounmap((void*)up[(i * 2) + 1].port.mapbase);
up[(i * 2) + 1].port.mapbase = 0;
}
}
uart_unregister_driver(&ip22zilog_reg);
}
module_init(ip22zilog_init);
module_exit(ip22zilog_exit);
/* David wrote it but I'm to blame for the bugs ... */
MODULE_AUTHOR("Ralf Baechle <[email protected]>");
MODULE_DESCRIPTION("SGI Zilog serial port driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/ip22zilog.c |
// SPDX-License-Identifier: GPL-2.0+
/****************************************************************************/
/*
* mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <[email protected]>
*/
/****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/nettel.h>
/****************************************************************************/
/*
* Some boards implement the DTR/DCD lines using GPIO lines, most
* don't. Dummy out the access macros for those that don't. Those
* that do should define these macros somewhere in there board
* specific inlude files.
*/
#if !defined(mcf_getppdcd)
#define mcf_getppdcd(p) (1)
#endif
#if !defined(mcf_getppdtr)
#define mcf_getppdtr(p) (1)
#endif
#if !defined(mcf_setppdtr)
#define mcf_setppdtr(p, v) do { } while (0)
#endif
/****************************************************************************/
/*
* Local per-uart structure.
*/
struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
};
/****************************************************************************/
static unsigned int mcf_tx_empty(struct uart_port *port)
{
return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ?
TIOCSER_TEMT : 0;
}
/****************************************************************************/
static unsigned int mcf_get_mctrl(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int sigs;
sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
0 : TIOCM_CTS;
sigs |= (pp->sigs & TIOCM_RTS);
sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
return sigs;
}
/****************************************************************************/
static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->sigs = sigs;
mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
if (sigs & TIOCM_RTS)
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
else
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
}
/****************************************************************************/
static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
if (port->rs485.flags & SER_RS485_ENABLED) {
/* Enable Transmitter */
writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR);
/* Manually assert RTS */
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
}
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_rx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (break_state == -1)
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
else
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static int mcf_startup(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Reset UART, get it into known state... */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
/* Enable the UART transmitter and receiver */
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
/* Enable RX interrupts now */
pp->imr = MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
/****************************************************************************/
static void mcf_shutdown(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable all interrupts now */
pp->imr = 0;
writeb(pp->imr, port->membase + MCFUART_UIMR);
/* Disable UART transmitter and receiver */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
unsigned int baudfr;
#endif
unsigned char mr1, mr2;
baud = uart_get_baud_rate(port, termios, old, 0, 230400);
#if defined(CONFIG_M5272)
baudclk = (MCF_BUSCLK / baud) / 32;
baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16;
#else
baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
#endif
mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
mr2 = 0;
switch (termios->c_cflag & CSIZE) {
case CS5: mr1 |= MCFUART_MR1_CS5; break;
case CS6: mr1 |= MCFUART_MR1_CS6; break;
case CS7: mr1 |= MCFUART_MR1_CS7; break;
case CS8:
default: mr1 |= MCFUART_MR1_CS8; break;
}
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYMARK;
else
mr1 |= MCFUART_MR1_PARITYSPACE;
} else {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYODD;
else
mr1 |= MCFUART_MR1_PARITYEVEN;
}
} else {
mr1 |= MCFUART_MR1_PARITYNONE;
}
/*
* FIXME: port->read_status_mask and port->ignore_status_mask
* need to be initialized based on termios settings for
* INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
*/
if (termios->c_cflag & CSTOPB)
mr2 |= MCFUART_MR2_STOP2;
else
mr2 |= MCFUART_MR2_STOP1;
if (termios->c_cflag & CRTSCTS) {
mr1 |= MCFUART_MR1_RXRTS;
mr2 |= MCFUART_MR2_TXCTS;
}
spin_lock_irqsave(&port->lock, flags);
if (port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
mr2 |= MCFUART_MR2_TXRTS;
}
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
#if defined(CONFIG_M5272)
writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD);
#endif
writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
port->membase + MCFUART_UCSR);
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_rx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
u8 status, ch, flag;
while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
ch = readb(port->membase + MCFUART_URB);
flag = TTY_NORMAL;
port->icount.rx++;
if (status & MCFUART_USR_RXERR) {
writeb(MCFUART_UCR_CMDRESETERR,
port->membase + MCFUART_UCR);
if (status & MCFUART_USR_RXBREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (status & MCFUART_USR_RXPARITY) {
port->icount.parity++;
} else if (status & MCFUART_USR_RXOVERRUN) {
port->icount.overrun++;
} else if (status & MCFUART_USR_RXFRAMING) {
port->icount.frame++;
}
status &= port->read_status_mask;
if (status & MCFUART_USR_RXBREAK)
flag = TTY_BREAK;
else if (status & MCFUART_USR_RXPARITY)
flag = TTY_PARITY;
else if (status & MCFUART_USR_RXFRAMING)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
tty_flip_buffer_push(&port->state->port);
}
/****************************************************************************/
static void mcf_tx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
bool pending;
u8 ch;
pending = uart_port_tx(port, ch,
readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY,
writeb(ch, port->membase + MCFUART_UTB));
/* Disable TX to negate RTS automatically */
if (!pending && (port->rs485.flags & SER_RS485_ENABLED))
writeb(MCFUART_UCR_TXDISABLE, port->membase + MCFUART_UCR);
}
/****************************************************************************/
static irqreturn_t mcf_interrupt(int irq, void *data)
{
struct uart_port *port = data;
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int isr;
irqreturn_t ret = IRQ_NONE;
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
spin_lock(&port->lock);
if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
ret = IRQ_HANDLED;
}
if (isr & MCFUART_UIR_TXREADY) {
mcf_tx_chars(pp);
ret = IRQ_HANDLED;
}
spin_unlock(&port->lock);
return ret;
}
/****************************************************************************/
static void mcf_config_port(struct uart_port *port, int flags)
{
port->type = PORT_MCF;
port->fifosize = MCFUART_TXFIFOSIZE;
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
if (request_irq(port->irq, mcf_interrupt, 0, "UART", port))
printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
"interrupt vector=%d\n", port->line, port->irq);
}
/****************************************************************************/
static const char *mcf_type(struct uart_port *port)
{
return (port->type == PORT_MCF) ? "ColdFire UART" : NULL;
}
/****************************************************************************/
static int mcf_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
/****************************************************************************/
static void mcf_release_port(struct uart_port *port)
{
/* Nothing to release... */
}
/****************************************************************************/
static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF))
return -EINVAL;
return 0;
}
/****************************************************************************/
/* Enable or disable the RS485 support */
static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
unsigned char mr1, mr2;
/* Get mode registers */
mr1 = readb(port->membase + MCFUART_UMR);
mr2 = readb(port->membase + MCFUART_UMR);
if (rs485->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
/* Automatically negate RTS after TX completes */
mr2 |= MCFUART_MR2_TXRTS;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
mr2 &= ~MCFUART_MR2_TXRTS;
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
return 0;
}
static const struct serial_rs485 mcf_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
};
/****************************************************************************/
/*
* Define the basic serial functions we support.
*/
static const struct uart_ops mcf_uart_ops = {
.tx_empty = mcf_tx_empty,
.get_mctrl = mcf_get_mctrl,
.set_mctrl = mcf_set_mctrl,
.start_tx = mcf_start_tx,
.stop_tx = mcf_stop_tx,
.stop_rx = mcf_stop_rx,
.break_ctl = mcf_break_ctl,
.startup = mcf_startup,
.shutdown = mcf_shutdown,
.set_termios = mcf_set_termios,
.type = mcf_type,
.request_port = mcf_request_port,
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
};
static struct mcf_uart mcf_ports[4];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
/****************************************************************************/
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
/****************************************************************************/
int __init early_mcf_setup(struct mcf_platform_uart *platp)
{
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) port->mapbase;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
port->rs485_supported = mcf_rs485_supported;
port->ops = &mcf_uart_ops;
}
return 0;
}
/****************************************************************************/
static void mcf_console_putc(struct console *co, const char c)
{
struct uart_port *port = &(mcf_ports + co->index)->port;
int i;
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
writeb(c, port->membase + MCFUART_UTB);
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
}
/****************************************************************************/
static void mcf_console_write(struct console *co, const char *s, unsigned int count)
{
for (; (count); count--, s++) {
mcf_console_putc(co, *s);
if (*s == '\n')
mcf_console_putc(co, '\r');
}
}
/****************************************************************************/
static int __init mcf_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = CONFIG_SERIAL_MCF_BAUDRATE;
int bits = 8;
int parity = 'n';
int flow = 'n';
if ((co->index < 0) || (co->index >= MCF_MAXPORTS))
co->index = 0;
port = &mcf_ports[co->index].port;
if (port->membase == 0)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
/****************************************************************************/
static struct uart_driver mcf_driver;
static struct console mcf_console = {
.name = "ttyS",
.write = mcf_console_write,
.device = uart_console_device,
.setup = mcf_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mcf_driver,
};
static int __init mcf_console_init(void)
{
register_console(&mcf_console);
return 0;
}
console_initcall(mcf_console_init);
#define MCF_CONSOLE &mcf_console
/****************************************************************************/
#else
/****************************************************************************/
#define MCF_CONSOLE NULL
/****************************************************************************/
#endif /* CONFIG_SERIAL_MCF_CONSOLE */
/****************************************************************************/
/*
* Define the mcf UART driver structure.
*/
static struct uart_driver mcf_driver = {
.owner = THIS_MODULE,
.driver_name = "mcf",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = MCF_MAXPORTS,
.cons = MCF_CONSOLE,
};
/****************************************************************************/
static int mcf_probe(struct platform_device *pdev)
{
struct mcf_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) platp[i].mapbase;
port->dev = &pdev->dev;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
port->rs485_supported = mcf_rs485_supported;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static int mcf_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i;
for (i = 0; (i < MCF_MAXPORTS); i++) {
port = &mcf_ports[i].port;
if (port)
uart_remove_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static struct platform_driver mcf_platform_driver = {
.probe = mcf_probe,
.remove = mcf_remove,
.driver = {
.name = "mcfuart",
},
};
/****************************************************************************/
static int __init mcf_init(void)
{
int rc;
printk("ColdFire internal UART serial driver\n");
rc = uart_register_driver(&mcf_driver);
if (rc)
return rc;
rc = platform_driver_register(&mcf_platform_driver);
if (rc) {
uart_unregister_driver(&mcf_driver);
return rc;
}
return 0;
}
/****************************************************************************/
static void __exit mcf_exit(void)
{
platform_driver_unregister(&mcf_platform_driver);
uart_unregister_driver(&mcf_driver);
}
/****************************************************************************/
module_init(mcf_init);
module_exit(mcf_exit);
MODULE_AUTHOR("Greg Ungerer <[email protected]>");
MODULE_DESCRIPTION("Freescale ColdFire UART driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mcfuart");
/****************************************************************************/
| linux-master | drivers/tty/serial/mcf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* serial_tegra.c
*
* High-speed serial driver for NVIDIA Tegra SoCs
*
* Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define TEGRA_UART_TYPE "TEGRA_UART"
#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
#define TEGRA_UART_IER_EORD 0x20
#define TEGRA_UART_MCR_RTS_EN 0x40
#define TEGRA_UART_MCR_CTS_EN 0x20
#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
UART_LSR_PE | UART_LSR_FE)
#define TEGRA_UART_IRDA_CSR 0x08
#define TEGRA_UART_SIR_ENABLED 0x80
#define TEGRA_UART_TX_PIO 1
#define TEGRA_UART_TX_DMA 2
#define TEGRA_UART_MIN_DMA 16
#define TEGRA_UART_FIFO_SIZE 32
/*
* Tx fifo trigger level setting in tegra uart is in
* reverse way then conventional uart.
*/
#define TEGRA_UART_TX_TRIG_16B 0x00
#define TEGRA_UART_TX_TRIG_8B 0x10
#define TEGRA_UART_TX_TRIG_4B 0x20
#define TEGRA_UART_TX_TRIG_1B 0x30
#define TEGRA_UART_MAXIMUM 8
/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
#define TEGRA_UART_DEFAULT_BAUD 115200
#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
/* Tx transfer mode */
#define TEGRA_TX_PIO 1
#define TEGRA_TX_DMA 2
#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
/**
* struct tegra_uart_chip_data: SOC specific data.
*
* @tx_fifo_full_status: Status flag available for checking tx fifo full.
* @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
* Tegra30 does not allow this.
* @support_clk_src_div: Clock source support the clock divider.
* @fifo_mode_enable_status: Is FIFO mode enabled?
* @uart_max_port: Maximum number of UART ports
* @max_dma_burst_bytes: Maximum size of DMA bursts
* @error_tolerance_low_range: Lowest number in the error tolerance range
* @error_tolerance_high_range: Highest number in the error tolerance range
*/
struct tegra_uart_chip_data {
bool tx_fifo_full_status;
bool allow_txfifo_reset_fifo_mode;
bool support_clk_src_div;
bool fifo_mode_enable_status;
int uart_max_port;
int max_dma_burst_bytes;
int error_tolerance_low_range;
int error_tolerance_high_range;
};
struct tegra_baud_tolerance {
u32 lower_range_baud;
u32 upper_range_baud;
s32 tolerance;
};
struct tegra_uart_port {
struct uart_port uport;
const struct tegra_uart_chip_data *cdata;
struct clk *uart_clk;
struct reset_control *rst;
unsigned int current_baud;
/* Register shadow */
unsigned long fcr_shadow;
unsigned long mcr_shadow;
unsigned long lcr_shadow;
unsigned long ier_shadow;
bool rts_active;
int tx_in_progress;
unsigned int tx_bytes;
bool enable_modem_interrupt;
bool rx_timeout;
int rx_in_progress;
int symb_bit;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
dma_addr_t rx_dma_buf_phys;
dma_addr_t tx_dma_buf_phys;
unsigned char *rx_dma_buf_virt;
unsigned char *tx_dma_buf_virt;
struct dma_async_tx_descriptor *tx_dma_desc;
struct dma_async_tx_descriptor *rx_dma_desc;
dma_cookie_t tx_cookie;
dma_cookie_t rx_cookie;
unsigned int tx_bytes_requested;
unsigned int rx_bytes_requested;
struct tegra_baud_tolerance *baud_tolerance;
int n_adjustable_baud_rates;
int required_rate;
int configured_rate;
bool use_rx_pio;
bool use_tx_pio;
bool rx_dma_active;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
bool dma_to_memory);
static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
unsigned long reg)
{
return readl(tup->uport.membase + (reg << tup->uport.regshift));
}
static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
unsigned long reg)
{
writel(val, tup->uport.membase + (reg << tup->uport.regshift));
}
static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
{
return container_of(u, struct tegra_uart_port, uport);
}
static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
/*
* RI - Ring detector is active
* CD/DCD/CAR - Carrier detect is always active. For some reason
* linux has different names for carrier detect.
* DSR - Data Set ready is active as the hardware doesn't support it.
* Don't know if the linux support this yet?
* CTS - Clear to send. Always set to active, as the hardware handles
* CTS automatically.
*/
if (tup->enable_modem_interrupt)
return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
return TIOCM_CTS;
}
static void set_rts(struct tegra_uart_port *tup, bool active)
{
unsigned long mcr;
mcr = tup->mcr_shadow;
if (active)
mcr |= TEGRA_UART_MCR_RTS_EN;
else
mcr &= ~TEGRA_UART_MCR_RTS_EN;
if (mcr != tup->mcr_shadow) {
tegra_uart_write(tup, mcr, UART_MCR);
tup->mcr_shadow = mcr;
}
}
static void set_dtr(struct tegra_uart_port *tup, bool active)
{
unsigned long mcr;
mcr = tup->mcr_shadow;
if (active)
mcr |= UART_MCR_DTR;
else
mcr &= ~UART_MCR_DTR;
if (mcr != tup->mcr_shadow) {
tegra_uart_write(tup, mcr, UART_MCR);
tup->mcr_shadow = mcr;
}
}
static void set_loopbk(struct tegra_uart_port *tup, bool active)
{
unsigned long mcr = tup->mcr_shadow;
if (active)
mcr |= UART_MCR_LOOP;
else
mcr &= ~UART_MCR_LOOP;
if (mcr != tup->mcr_shadow) {
tegra_uart_write(tup, mcr, UART_MCR);
tup->mcr_shadow = mcr;
}
}
static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
int enable;
tup->rts_active = !!(mctrl & TIOCM_RTS);
set_rts(tup, tup->rts_active);
enable = !!(mctrl & TIOCM_DTR);
set_dtr(tup, enable);
enable = !!(mctrl & TIOCM_LOOP);
set_loopbk(tup, enable);
}
static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned long lcr;
lcr = tup->lcr_shadow;
if (break_ctl)
lcr |= UART_LCR_SBC;
else
lcr &= ~UART_LCR_SBC;
tegra_uart_write(tup, lcr, UART_LCR);
tup->lcr_shadow = lcr;
}
/**
* tegra_uart_wait_cycle_time: Wait for N UART clock periods
*
* @tup: Tegra serial port data structure.
* @cycles: Number of clock periods to wait.
*
* Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
* clock speed is 16X the current baud rate.
*/
static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
unsigned int cycles)
{
if (tup->current_baud)
udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
}
/* Wait for a symbol-time. */
static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
unsigned int syms)
{
if (tup->current_baud)
udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
tup->current_baud));
}
static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
{
unsigned long iir;
unsigned int tmout = 100;
do {
iir = tegra_uart_read(tup, UART_IIR);
if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
return 0;
udelay(1);
} while (--tmout);
return -ETIMEDOUT;
}
static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
{
unsigned long fcr = tup->fcr_shadow;
unsigned int lsr, tmout = 10000;
if (tup->rts_active)
set_rts(tup, false);
if (tup->cdata->allow_txfifo_reset_fifo_mode) {
fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
tegra_uart_write(tup, fcr, UART_FCR);
} else {
fcr &= ~UART_FCR_ENABLE_FIFO;
tegra_uart_write(tup, fcr, UART_FCR);
udelay(60);
fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
tegra_uart_write(tup, fcr, UART_FCR);
fcr |= UART_FCR_ENABLE_FIFO;
tegra_uart_write(tup, fcr, UART_FCR);
if (tup->cdata->fifo_mode_enable_status)
tegra_uart_wait_fifo_mode_enabled(tup);
}
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
/*
* For all tegra devices (up to t210), there is a hardware issue that
* requires software to wait for 32 UART clock periods for the flush
* to propagate, otherwise data could be lost.
*/
tegra_uart_wait_cycle_time(tup, 32);
do {
lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
break;
udelay(1);
} while (--tmout);
if (tup->rts_active)
set_rts(tup, true);
}
static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
unsigned int baud, long rate)
{
int i;
for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
if (baud >= tup->baud_tolerance[i].lower_range_baud &&
baud <= tup->baud_tolerance[i].upper_range_baud)
return (rate + (rate *
tup->baud_tolerance[i].tolerance) / 10000);
}
return rate;
}
static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
{
long diff;
diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
/ tup->required_rate;
if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
diff > (tup->cdata->error_tolerance_high_range * 100)) {
dev_err(tup->uport.dev,
"configured baud rate is out of range by %ld", diff);
return -EIO;
}
return 0;
}
static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
{
unsigned long rate;
unsigned int divisor;
unsigned long lcr;
unsigned long flags;
int ret;
if (tup->current_baud == baud)
return 0;
if (tup->cdata->support_clk_src_div) {
rate = baud * 16;
tup->required_rate = rate;
if (tup->n_adjustable_baud_rates)
rate = tegra_get_tolerance_rate(tup, baud, rate);
ret = clk_set_rate(tup->uart_clk, rate);
if (ret < 0) {
dev_err(tup->uport.dev,
"clk_set_rate() failed for rate %lu\n", rate);
return ret;
}
tup->configured_rate = clk_get_rate(tup->uart_clk);
divisor = 1;
ret = tegra_check_rate_in_range(tup);
if (ret < 0)
return ret;
} else {
rate = clk_get_rate(tup->uart_clk);
divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
}
spin_lock_irqsave(&tup->uport.lock, flags);
lcr = tup->lcr_shadow;
lcr |= UART_LCR_DLAB;
tegra_uart_write(tup, lcr, UART_LCR);
tegra_uart_write(tup, divisor & 0xFF, UART_TX);
tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
lcr &= ~UART_LCR_DLAB;
tegra_uart_write(tup, lcr, UART_LCR);
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
spin_unlock_irqrestore(&tup->uport.lock, flags);
tup->current_baud = baud;
/* wait two character intervals at new rate */
tegra_uart_wait_sym_time(tup, 2);
return 0;
}
static u8 tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
unsigned long lsr)
{
u8 flag = TTY_NORMAL;
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
/* Overrun error */
flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_dbg(tup->uport.dev, "Got overrun errors\n");
} else if (lsr & UART_LSR_PE) {
/* Parity error */
flag = TTY_PARITY;
tup->uport.icount.parity++;
dev_dbg(tup->uport.dev, "Got Parity errors\n");
} else if (lsr & UART_LSR_FE) {
flag = TTY_FRAME;
tup->uport.icount.frame++;
dev_dbg(tup->uport.dev, "Got frame errors\n");
} else if (lsr & UART_LSR_BI) {
/*
* Break error
* If FIFO read error without any data, reset Rx FIFO
*/
if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
if (tup->uport.ignore_status_mask & UART_LSR_BI)
return TTY_BREAK;
flag = TTY_BREAK;
tup->uport.icount.brk++;
dev_dbg(tup->uport.dev, "Got Break\n");
}
uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
}
return flag;
}
static int tegra_uart_request_port(struct uart_port *u)
{
return 0;
}
static void tegra_uart_release_port(struct uart_port *u)
{
/* Nothing to do here */
}
static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
{
struct circ_buf *xmit = &tup->uport.state->xmit;
int i;
for (i = 0; i < max_bytes; i++) {
BUG_ON(uart_circ_empty(xmit));
if (tup->cdata->tx_fifo_full_status) {
unsigned long lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
break;
}
tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
uart_xmit_advance(&tup->uport, 1);
}
}
static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
unsigned int bytes)
{
if (bytes > TEGRA_UART_MIN_DMA)
bytes = TEGRA_UART_MIN_DMA;
tup->tx_in_progress = TEGRA_UART_TX_PIO;
tup->tx_bytes = bytes;
tup->ier_shadow |= UART_IER_THRI;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
}
static void tegra_uart_tx_dma_complete(void *args)
{
struct tegra_uart_port *tup = args;
struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned long flags;
unsigned int count;
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
tegra_uart_start_next_tx(tup);
spin_unlock_irqrestore(&tup->uport.lock, flags);
}
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
unsigned long count)
{
struct circ_buf *xmit = &tup->uport.state->xmit;
dma_addr_t tx_phys_addr;
tup->tx_bytes = count & ~(0xF);
tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
tup->tx_bytes, DMA_TO_DEVICE);
tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!tup->tx_dma_desc) {
dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
return -EIO;
}
tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
tup->tx_dma_desc->callback_param = tup;
tup->tx_in_progress = TEGRA_UART_TX_DMA;
tup->tx_bytes_requested = tup->tx_bytes;
tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
dma_async_issue_pending(tup->tx_dma_chan);
return 0;
}
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
{
unsigned long tail;
unsigned long count;
struct circ_buf *xmit = &tup->uport.state->xmit;
if (!tup->current_baud)
return;
tail = (unsigned long)&xmit->buf[xmit->tail];
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!count)
return;
if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
tegra_uart_start_pio_tx(tup, count);
else if (BYTES_TO_ALIGN(tail) > 0)
tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
else
tegra_uart_start_tx_dma(tup, count);
}
/* Called by serial core driver with u->lock taken. */
static void tegra_uart_start_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct circ_buf *xmit = &u->state->xmit;
if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
tegra_uart_start_next_tx(tup);
}
static unsigned int tegra_uart_tx_empty(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned int ret = 0;
unsigned long flags;
spin_lock_irqsave(&u->lock, flags);
if (!tup->tx_in_progress) {
unsigned long lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
ret = TIOCSER_TEMT;
}
spin_unlock_irqrestore(&u->lock, flags);
return ret;
}
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct dma_tx_state state;
unsigned int count;
if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
return;
dmaengine_pause(tup->tx_dma_chan);
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
dmaengine_terminate_all(tup->tx_dma_chan);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
{
struct circ_buf *xmit = &tup->uport.state->xmit;
tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
tegra_uart_start_next_tx(tup);
}
static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
struct tty_port *port)
{
do {
unsigned long lsr = 0;
u8 ch, flag = TTY_NORMAL;
lsr = tegra_uart_read(tup, UART_LSR);
if (!(lsr & UART_LSR_DR))
break;
flag = tegra_uart_decode_rx_error(tup, lsr);
if (flag != TTY_NORMAL)
continue;
ch = (unsigned char) tegra_uart_read(tup, UART_RX);
tup->uport.icount.rx++;
if (uart_handle_sysrq_char(&tup->uport, ch))
continue;
if (tup->uport.ignore_status_mask & UART_LSR_DR)
continue;
tty_insert_flip_char(port, ch, flag);
} while (1);
}
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
struct tty_port *port,
unsigned int count)
{
int copied;
/* If count is zero, then there is no data to be copied */
if (!count)
return;
tup->uport.icount.rx += count;
if (tup->uport.ignore_status_mask & UART_LSR_DR)
return;
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
count, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(port,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count) {
WARN_ON(1);
dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
}
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
count, DMA_TO_DEVICE);
}
static void do_handle_rx_pio(struct tegra_uart_port *tup)
{
struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
struct tty_port *port = &tup->uport.state->port;
tegra_uart_handle_rx_pio(tup, port);
if (tty) {
tty_flip_buffer_push(port);
tty_kref_put(tty);
}
}
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
unsigned int residue)
{
struct tty_port *port = &tup->uport.state->port;
unsigned int count;
async_tx_ack(tup->rx_dma_desc);
count = tup->rx_bytes_requested - residue;
/* If we are here, DMA is stopped */
tegra_uart_copy_rx_to_tty(tup, port, count);
do_handle_rx_pio(tup);
}
static void tegra_uart_rx_dma_complete(void *args)
{
struct tegra_uart_port *tup = args;
struct uart_port *u = &tup->uport;
unsigned long flags;
struct dma_tx_state state;
enum dma_status status;
spin_lock_irqsave(&u->lock, flags);
status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
if (status == DMA_IN_PROGRESS) {
dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
goto done;
}
/* Deactivate flow control to stop sender */
if (tup->rts_active)
set_rts(tup, false);
tup->rx_dma_active = false;
tegra_uart_rx_buffer_push(tup, 0);
tegra_uart_start_rx_dma(tup);
/* Activate flow control to start transfer */
if (tup->rts_active)
set_rts(tup, true);
done:
spin_unlock_irqrestore(&u->lock, flags);
}
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
{
struct dma_tx_state state;
if (!tup->rx_dma_active) {
do_handle_rx_pio(tup);
return;
}
dmaengine_pause(tup->rx_dma_chan);
dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
dmaengine_terminate_all(tup->rx_dma_chan);
tegra_uart_rx_buffer_push(tup, state.residue);
tup->rx_dma_active = false;
}
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
{
/* Deactivate flow control to stop sender */
if (tup->rts_active)
set_rts(tup, false);
tegra_uart_terminate_rx_dma(tup);
if (tup->rts_active)
set_rts(tup, true);
}
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
if (tup->rx_dma_active)
return 0;
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!tup->rx_dma_desc) {
dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
return -EIO;
}
tup->rx_dma_active = true;
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
tup->rx_bytes_requested = count;
tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
dma_async_issue_pending(tup->rx_dma_chan);
return 0;
}
static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned long msr;
msr = tegra_uart_read(tup, UART_MSR);
if (!(msr & UART_MSR_ANY_DELTA))
return;
if (msr & UART_MSR_TERI)
tup->uport.icount.rng++;
if (msr & UART_MSR_DDSR)
tup->uport.icount.dsr++;
/* We may only get DDCD when HW init and reset */
if (msr & UART_MSR_DDCD)
uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
/* Will start/stop_tx accordingly */
if (msr & UART_MSR_DCTS)
uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
}
static irqreturn_t tegra_uart_isr(int irq, void *data)
{
struct tegra_uart_port *tup = data;
struct uart_port *u = &tup->uport;
unsigned long iir;
unsigned long ier;
bool is_rx_start = false;
bool is_rx_int = false;
unsigned long flags;
spin_lock_irqsave(&u->lock, flags);
while (1) {
iir = tegra_uart_read(tup, UART_IIR);
if (iir & UART_IIR_NO_INT) {
if (!tup->use_rx_pio && is_rx_int) {
tegra_uart_handle_rx_dma(tup);
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
ier |= (UART_IER_RLSI | UART_IER_RTOIE |
TEGRA_UART_IER_EORD | UART_IER_RDI);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
}
} else if (is_rx_start) {
tegra_uart_start_rx_dma(tup);
}
spin_unlock_irqrestore(&u->lock, flags);
return IRQ_HANDLED;
}
switch ((iir >> 1) & 0x7) {
case 0: /* Modem signal change interrupt */
tegra_uart_handle_modem_signal_change(u);
break;
case 1: /* Transmit interrupt only triggered when using PIO */
tup->ier_shadow &= ~UART_IER_THRI;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
tegra_uart_handle_tx_pio(tup);
break;
case 4: /* End of data */
case 6: /* Rx timeout */
if (!tup->use_rx_pio) {
is_rx_int = tup->rx_in_progress;
/* Disable Rx interrupts */
ier = tup->ier_shadow;
ier &= ~(UART_IER_RDI | UART_IER_RLSI |
UART_IER_RTOIE | TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
break;
}
fallthrough;
case 2: /* Receive */
if (!tup->use_rx_pio) {
is_rx_start = tup->rx_in_progress;
tup->ier_shadow &= ~UART_IER_RDI;
tegra_uart_write(tup, tup->ier_shadow,
UART_IER);
} else {
do_handle_rx_pio(tup);
}
break;
case 3: /* Receive error */
tegra_uart_decode_rx_error(tup,
tegra_uart_read(tup, UART_LSR));
break;
case 5: /* break nothing to handle */
case 7: /* break nothing to handle */
break;
}
}
}
static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct tty_port *port = &tup->uport.state->port;
unsigned long ier;
if (tup->rts_active)
set_rts(tup, false);
if (!tup->rx_in_progress)
return;
tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
ier = tup->ier_shadow;
ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
tup->rx_in_progress = 0;
if (!tup->use_rx_pio)
tegra_uart_terminate_rx_dma(tup);
else
tegra_uart_handle_rx_pio(tup, port);
}
static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
{
unsigned long flags;
unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
unsigned long wait_time;
unsigned long lsr;
unsigned long msr;
unsigned long mcr;
/* Disable interrupts */
tegra_uart_write(tup, 0, UART_IER);
lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
msr = tegra_uart_read(tup, UART_MSR);
mcr = tegra_uart_read(tup, UART_MCR);
if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
dev_err(tup->uport.dev,
"Tx Fifo not empty, CTS disabled, waiting\n");
/* Wait for Tx fifo to be empty */
while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
wait_time = min(fifo_empty_time, 100lu);
udelay(wait_time);
fifo_empty_time -= wait_time;
if (!fifo_empty_time) {
msr = tegra_uart_read(tup, UART_MSR);
mcr = tegra_uart_read(tup, UART_MCR);
if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
(msr & UART_MSR_CTS))
dev_err(tup->uport.dev,
"Slave not ready\n");
break;
}
lsr = tegra_uart_read(tup, UART_LSR);
}
}
spin_lock_irqsave(&tup->uport.lock, flags);
/* Reset the Rx and Tx FIFOs */
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
tup->current_baud = 0;
spin_unlock_irqrestore(&tup->uport.lock, flags);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
if (!tup->use_rx_pio)
tegra_uart_dma_channel_free(tup, true);
if (!tup->use_tx_pio)
tegra_uart_dma_channel_free(tup, false);
clk_disable_unprepare(tup->uart_clk);
}
static int tegra_uart_hw_init(struct tegra_uart_port *tup)
{
int ret;
tup->fcr_shadow = 0;
tup->mcr_shadow = 0;
tup->lcr_shadow = 0;
tup->ier_shadow = 0;
tup->current_baud = 0;
ret = clk_prepare_enable(tup->uart_clk);
if (ret) {
dev_err(tup->uport.dev, "could not enable clk\n");
return ret;
}
/* Reset the UART controller to clear all previous status.*/
reset_control_assert(tup->rst);
udelay(10);
reset_control_deassert(tup->rst);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
/*
* Set the trigger level
*
* For PIO mode:
*
* For receive, this will interrupt the CPU after that many number of
* bytes are received, for the remaining bytes the receive timeout
* interrupt is received. Rx high watermark is set to 4.
*
* For transmit, if the trasnmit interrupt is enabled, this will
* interrupt the CPU when the number of entries in the FIFO reaches the
* low watermark. Tx low watermark is set to 16 bytes.
*
* For DMA mode:
*
* Set the Tx trigger to 16. This should match the DMA burst size that
* programmed in the DMA registers.
*/
tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
if (tup->use_rx_pio) {
tup->fcr_shadow |= UART_FCR_R_TRIG_11;
} else {
if (tup->cdata->max_dma_burst_bytes == 8)
tup->fcr_shadow |= UART_FCR_R_TRIG_10;
else
tup->fcr_shadow |= UART_FCR_R_TRIG_01;
}
tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
if (tup->cdata->fifo_mode_enable_status) {
ret = tegra_uart_wait_fifo_mode_enabled(tup);
if (ret < 0) {
clk_disable_unprepare(tup->uart_clk);
dev_err(tup->uport.dev,
"Failed to enable FIFO mode: %d\n", ret);
return ret;
}
} else {
/*
* For all tegra devices (up to t210), there is a hardware
* issue that requires software to wait for 3 UART clock
* periods after enabling the TX fifo, otherwise data could
* be lost.
*/
tegra_uart_wait_cycle_time(tup, 3);
}
/*
* Initialize the UART with default configuration
* (115200, N, 8, 1) so that the receive DMA buffer may be
* enqueued
*/
ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
if (ret < 0) {
clk_disable_unprepare(tup->uart_clk);
dev_err(tup->uport.dev, "Failed to set baud rate\n");
return ret;
}
if (!tup->use_rx_pio) {
tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
tup->fcr_shadow |= UART_FCR_DMA_SELECT;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
} else {
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
}
tup->rx_in_progress = 1;
/*
* Enable IE_RXS for the receive status interrupts like line errors.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
* the DATA is sitting in the FIFO and couldn't be transferred to the
* DMA as the DMA size alignment (4 bytes) is not met. EORD will be
* triggered when there is a pause of the incomming data stream for 4
* characters long.
*
* For pauses in the data which is not aligned to 4 bytes, we get
* both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
* then the EORD.
*/
tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
/*
* If using DMA mode, enable EORD interrupt to notify about RX
* completion.
*/
if (!tup->use_rx_pio)
tup->ier_shadow |= TEGRA_UART_IER_EORD;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
return 0;
}
static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
bool dma_to_memory)
{
if (dma_to_memory) {
dmaengine_terminate_all(tup->rx_dma_chan);
dma_release_channel(tup->rx_dma_chan);
dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
tup->rx_dma_chan = NULL;
tup->rx_dma_buf_phys = 0;
tup->rx_dma_buf_virt = NULL;
} else {
dmaengine_terminate_all(tup->tx_dma_chan);
dma_release_channel(tup->tx_dma_chan);
dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
UART_XMIT_SIZE, DMA_TO_DEVICE);
tup->tx_dma_chan = NULL;
tup->tx_dma_buf_phys = 0;
tup->tx_dma_buf_virt = NULL;
}
}
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
bool dma_to_memory)
{
struct dma_chan *dma_chan;
unsigned char *dma_buf;
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
"DMA channel alloc failed: %d\n", ret);
return ret;
}
if (dma_to_memory) {
dma_buf = dma_alloc_coherent(tup->uport.dev,
TEGRA_UART_RX_DMA_BUFFER_SIZE,
&dma_phys, GFP_KERNEL);
if (!dma_buf) {
dev_err(tup->uport.dev,
"Not able to allocate the dma buffer\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
dma_sync_single_for_device(tup->uport.dev, dma_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE,
DMA_TO_DEVICE);
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
tup->rx_dma_chan = dma_chan;
tup->rx_dma_buf_virt = dma_buf;
tup->rx_dma_buf_phys = dma_phys;
} else {
dma_phys = dma_map_single(tup->uport.dev,
tup->uport.state->xmit.buf, UART_XMIT_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(tup->uport.dev, dma_phys)) {
dev_err(tup->uport.dev, "dma_map_single tx failed\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
dma_buf = tup->uport.state->xmit.buf;
dma_sconfig.dst_addr = tup->uport.mapbase;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.dst_maxburst = 16;
tup->tx_dma_chan = dma_chan;
tup->tx_dma_buf_virt = dma_buf;
tup->tx_dma_buf_phys = dma_phys;
}
ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tup->uport.dev,
"Dma slave config failed, err = %d\n", ret);
tegra_uart_dma_channel_free(tup, dma_to_memory);
return ret;
}
return 0;
}
static int tegra_uart_startup(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
int ret;
if (!tup->use_tx_pio) {
ret = tegra_uart_dma_channel_allocate(tup, false);
if (ret < 0) {
dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
ret);
return ret;
}
}
if (!tup->use_rx_pio) {
ret = tegra_uart_dma_channel_allocate(tup, true);
if (ret < 0) {
dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
ret);
goto fail_rx_dma;
}
}
ret = tegra_uart_hw_init(tup);
if (ret < 0) {
dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
goto fail_hw_init;
}
ret = request_irq(u->irq, tegra_uart_isr, 0,
dev_name(u->dev), tup);
if (ret < 0) {
dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
goto fail_request_irq;
}
return 0;
fail_request_irq:
/* tup->uart_clk is already enabled in tegra_uart_hw_init */
clk_disable_unprepare(tup->uart_clk);
fail_hw_init:
if (!tup->use_rx_pio)
tegra_uart_dma_channel_free(tup, true);
fail_rx_dma:
if (!tup->use_tx_pio)
tegra_uart_dma_channel_free(tup, false);
return ret;
}
/*
* Flush any TX data submitted for DMA and PIO. Called when the
* TX circular buffer is reset.
*/
static void tegra_uart_flush_buffer(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
tup->tx_bytes = 0;
if (tup->tx_dma_chan)
dmaengine_terminate_all(tup->tx_dma_chan);
}
static void tegra_uart_shutdown(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
tegra_uart_hw_deinit(tup);
free_irq(u->irq, tup);
}
static void tegra_uart_enable_ms(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
if (tup->enable_modem_interrupt) {
tup->ier_shadow |= UART_IER_MSI;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
}
}
static void tegra_uart_set_termios(struct uart_port *u,
struct ktermios *termios,
const struct ktermios *oldtermios)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned int baud;
unsigned long flags;
unsigned int lcr;
unsigned char char_bits;
struct clk *parent_clk = clk_get_parent(tup->uart_clk);
unsigned long parent_clk_rate = clk_get_rate(parent_clk);
int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
int ret;
max_divider *= 16;
spin_lock_irqsave(&u->lock, flags);
/* Changing configuration, it is safe to stop any rx now */
if (tup->rts_active)
set_rts(tup, false);
/* Clear all interrupts as configuration is going to be changed */
tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
tegra_uart_read(tup, UART_IER);
tegra_uart_write(tup, 0, UART_IER);
tegra_uart_read(tup, UART_IER);
/* Parity */
lcr = tup->lcr_shadow;
lcr &= ~UART_LCR_PARITY;
/* CMSPAR isn't supported by this driver */
termios->c_cflag &= ~CMSPAR;
if ((termios->c_cflag & PARENB) == PARENB) {
if (termios->c_cflag & PARODD) {
lcr |= UART_LCR_PARITY;
lcr &= ~UART_LCR_EPAR;
lcr &= ~UART_LCR_SPAR;
} else {
lcr |= UART_LCR_PARITY;
lcr |= UART_LCR_EPAR;
lcr &= ~UART_LCR_SPAR;
}
}
char_bits = tty_get_char_size(termios->c_cflag);
lcr &= ~UART_LCR_WLEN8;
lcr |= UART_LCR_WLEN(char_bits);
/* Stop bits */
if (termios->c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
else
lcr &= ~UART_LCR_STOP;
tegra_uart_write(tup, lcr, UART_LCR);
tup->lcr_shadow = lcr;
tup->symb_bit = tty_get_frame_size(termios->c_cflag);
/* Baud rate. */
baud = uart_get_baud_rate(u, termios, oldtermios,
parent_clk_rate/max_divider,
parent_clk_rate/16);
spin_unlock_irqrestore(&u->lock, flags);
ret = tegra_set_baudrate(tup, baud);
if (ret < 0) {
dev_err(tup->uport.dev, "Failed to set baud rate\n");
return;
}
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
spin_lock_irqsave(&u->lock, flags);
/* Flow control */
if (termios->c_cflag & CRTSCTS) {
tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
/* if top layer has asked to set rts active then do so here */
if (tup->rts_active)
set_rts(tup, true);
} else {
tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
}
/* update the port timeout based on new settings */
uart_update_timeout(u, termios->c_cflag, baud);
/* Make sure all writes have completed */
tegra_uart_read(tup, UART_IER);
/* Re-enable interrupt */
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
tegra_uart_read(tup, UART_IER);
tup->uport.ignore_status_mask = 0;
/* Ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
tup->uport.ignore_status_mask |= UART_LSR_DR;
if (termios->c_iflag & IGNBRK)
tup->uport.ignore_status_mask |= UART_LSR_BI;
spin_unlock_irqrestore(&u->lock, flags);
}
static const char *tegra_uart_type(struct uart_port *u)
{
return TEGRA_UART_TYPE;
}
static const struct uart_ops tegra_uart_ops = {
.tx_empty = tegra_uart_tx_empty,
.set_mctrl = tegra_uart_set_mctrl,
.get_mctrl = tegra_uart_get_mctrl,
.stop_tx = tegra_uart_stop_tx,
.start_tx = tegra_uart_start_tx,
.stop_rx = tegra_uart_stop_rx,
.flush_buffer = tegra_uart_flush_buffer,
.enable_ms = tegra_uart_enable_ms,
.break_ctl = tegra_uart_break_ctl,
.startup = tegra_uart_startup,
.shutdown = tegra_uart_shutdown,
.set_termios = tegra_uart_set_termios,
.type = tegra_uart_type,
.request_port = tegra_uart_request_port,
.release_port = tegra_uart_release_port,
};
static struct uart_driver tegra_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "tegra_hsuart",
.dev_name = "ttyTHS",
.cons = NULL,
.nr = TEGRA_UART_MAXIMUM,
};
static int tegra_uart_parse_dt(struct platform_device *pdev,
struct tegra_uart_port *tup)
{
struct device_node *np = pdev->dev.of_node;
int port;
int ret;
int index;
u32 pval;
int count;
int n_entries;
port = of_alias_get_id(np, "serial");
if (port < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
return port;
}
tup->uport.line = port;
tup->enable_modem_interrupt = of_property_read_bool(np,
"nvidia,enable-modem-interrupt");
index = of_property_match_string(np, "dma-names", "rx");
if (index < 0) {
tup->use_rx_pio = true;
dev_info(&pdev->dev, "RX in PIO mode\n");
}
index = of_property_match_string(np, "dma-names", "tx");
if (index < 0) {
tup->use_tx_pio = true;
dev_info(&pdev->dev, "TX in PIO mode\n");
}
n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
if (n_entries > 0) {
tup->n_adjustable_baud_rates = n_entries / 3;
tup->baud_tolerance =
devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
sizeof(*tup->baud_tolerance), GFP_KERNEL);
if (!tup->baud_tolerance)
return -ENOMEM;
for (count = 0, index = 0; count < n_entries; count += 3,
index++) {
ret =
of_property_read_u32_index(np,
"nvidia,adjust-baud-rates",
count, &pval);
if (!ret)
tup->baud_tolerance[index].lower_range_baud =
pval;
ret =
of_property_read_u32_index(np,
"nvidia,adjust-baud-rates",
count + 1, &pval);
if (!ret)
tup->baud_tolerance[index].upper_range_baud =
pval;
ret =
of_property_read_u32_index(np,
"nvidia,adjust-baud-rates",
count + 2, &pval);
if (!ret)
tup->baud_tolerance[index].tolerance =
(s32)pval;
}
} else {
tup->n_adjustable_baud_rates = 0;
}
return 0;
}
static struct tegra_uart_chip_data tegra20_uart_chip_data = {
.tx_fifo_full_status = false,
.allow_txfifo_reset_fifo_mode = true,
.support_clk_src_div = false,
.fifo_mode_enable_status = false,
.uart_max_port = 5,
.max_dma_burst_bytes = 4,
.error_tolerance_low_range = -4,
.error_tolerance_high_range = 4,
};
static struct tegra_uart_chip_data tegra30_uart_chip_data = {
.tx_fifo_full_status = true,
.allow_txfifo_reset_fifo_mode = false,
.support_clk_src_div = true,
.fifo_mode_enable_status = false,
.uart_max_port = 5,
.max_dma_burst_bytes = 4,
.error_tolerance_low_range = -4,
.error_tolerance_high_range = 4,
};
static struct tegra_uart_chip_data tegra186_uart_chip_data = {
.tx_fifo_full_status = true,
.allow_txfifo_reset_fifo_mode = false,
.support_clk_src_div = true,
.fifo_mode_enable_status = true,
.uart_max_port = 8,
.max_dma_burst_bytes = 8,
.error_tolerance_low_range = 0,
.error_tolerance_high_range = 4,
};
static struct tegra_uart_chip_data tegra194_uart_chip_data = {
.tx_fifo_full_status = true,
.allow_txfifo_reset_fifo_mode = false,
.support_clk_src_div = true,
.fifo_mode_enable_status = true,
.uart_max_port = 8,
.max_dma_burst_bytes = 8,
.error_tolerance_low_range = -2,
.error_tolerance_high_range = 2,
};
static const struct of_device_id tegra_uart_of_match[] = {
{
.compatible = "nvidia,tegra30-hsuart",
.data = &tegra30_uart_chip_data,
}, {
.compatible = "nvidia,tegra20-hsuart",
.data = &tegra20_uart_chip_data,
}, {
.compatible = "nvidia,tegra186-hsuart",
.data = &tegra186_uart_chip_data,
}, {
.compatible = "nvidia,tegra194-hsuart",
.data = &tegra194_uart_chip_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
static int tegra_uart_probe(struct platform_device *pdev)
{
struct tegra_uart_port *tup;
struct uart_port *u;
struct resource *resource;
int ret;
const struct tegra_uart_chip_data *cdata;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
if (!tup) {
dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
return -ENOMEM;
}
ret = tegra_uart_parse_dt(pdev, tup);
if (ret < 0)
return ret;
u = &tup->uport;
u->dev = &pdev->dev;
u->ops = &tegra_uart_ops;
u->type = PORT_TEGRA;
u->fifosize = 32;
tup->cdata = cdata;
platform_set_drvdata(pdev, tup);
u->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &resource);
if (IS_ERR(u->membase))
return PTR_ERR(u->membase);
u->mapbase = resource->start;
tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tup->uart_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(tup->uart_clk), "Couldn't get the clock");
tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
if (IS_ERR(tup->rst)) {
dev_err(&pdev->dev, "Couldn't get the reset\n");
return PTR_ERR(tup->rst);
}
u->iotype = UPIO_MEM32;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
u->irq = ret;
u->regshift = 2;
ret = uart_add_one_port(&tegra_uart_driver, u);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
return ret;
}
return ret;
}
static int tegra_uart_remove(struct platform_device *pdev)
{
struct tegra_uart_port *tup = platform_get_drvdata(pdev);
struct uart_port *u = &tup->uport;
uart_remove_one_port(&tegra_uart_driver, u);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tegra_uart_suspend(struct device *dev)
{
struct tegra_uart_port *tup = dev_get_drvdata(dev);
struct uart_port *u = &tup->uport;
return uart_suspend_port(&tegra_uart_driver, u);
}
static int tegra_uart_resume(struct device *dev)
{
struct tegra_uart_port *tup = dev_get_drvdata(dev);
struct uart_port *u = &tup->uport;
return uart_resume_port(&tegra_uart_driver, u);
}
#endif
static const struct dev_pm_ops tegra_uart_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
};
static struct platform_driver tegra_uart_platform_driver = {
.probe = tegra_uart_probe,
.remove = tegra_uart_remove,
.driver = {
.name = "serial-tegra",
.of_match_table = tegra_uart_of_match,
.pm = &tegra_uart_pm_ops,
},
};
static int __init tegra_uart_init(void)
{
int ret;
struct device_node *node;
const struct of_device_id *match = NULL;
const struct tegra_uart_chip_data *cdata = NULL;
node = of_find_matching_node(NULL, tegra_uart_of_match);
if (node)
match = of_match_node(tegra_uart_of_match, node);
of_node_put(node);
if (match)
cdata = match->data;
if (cdata)
tegra_uart_driver.nr = cdata->uart_max_port;
ret = uart_register_driver(&tegra_uart_driver);
if (ret < 0) {
pr_err("Could not register %s driver\n",
tegra_uart_driver.driver_name);
return ret;
}
ret = platform_driver_register(&tegra_uart_platform_driver);
if (ret < 0) {
pr_err("Uart platform driver register failed, e = %d\n", ret);
uart_unregister_driver(&tegra_uart_driver);
return ret;
}
return 0;
}
static void __exit tegra_uart_exit(void)
{
pr_info("Unloading tegra uart driver\n");
platform_driver_unregister(&tegra_uart_platform_driver);
uart_unregister_driver(&tegra_uart_driver);
}
module_init(tegra_uart_init);
module_exit(tegra_uart_exit);
MODULE_ALIAS("platform:serial-tegra");
MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/serial-tegra.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sunplus SoC UART driver
*
* Author: Hammer Hsieh <[email protected]>
*
* Note1: This driver is 8250-like uart, but are not register compatible.
*
* Note2: On some buses, for preventing data incoherence, must do a read
* for ensure write made it to hardware. In this driver, function startup
* and shutdown did not do a read but only do a write directly. For what?
* In Sunplus bus communication between memory bus and peripheral bus with
* posted write, it will send a specific command after last write command
* to make sure write done. Then memory bus identify the specific command
* and send done signal back to master device. After master device received
* done signal, then proceed next write command. It is no need to do a read
* before write.
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/irq.h>
/* Register offsets */
#define SUP_UART_DATA 0x00
#define SUP_UART_LSR 0x04
#define SUP_UART_MSR 0x08
#define SUP_UART_LCR 0x0C
#define SUP_UART_MCR 0x10
#define SUP_UART_DIV_L 0x14
#define SUP_UART_DIV_H 0x18
#define SUP_UART_ISC 0x1C
#define SUP_UART_TX_RESIDUE 0x20
#define SUP_UART_RX_RESIDUE 0x24
/* Line Status Register bits */
#define SUP_UART_LSR_BC BIT(5) /* break condition status */
#define SUP_UART_LSR_FE BIT(4) /* frame error status */
#define SUP_UART_LSR_OE BIT(3) /* overrun error status */
#define SUP_UART_LSR_PE BIT(2) /* parity error status */
#define SUP_UART_LSR_RX BIT(1) /* 1: receive fifo not empty */
#define SUP_UART_LSR_TX BIT(0) /* 1: transmit fifo is not full */
#define SUP_UART_LSR_TX_NOT_FULL 1
#define SUP_UART_LSR_BRK_ERROR_BITS GENMASK(5, 2)
/* Line Control Register bits */
#define SUP_UART_LCR_SBC BIT(5) /* select break condition */
/* Modem Control Register bits */
#define SUP_UART_MCR_RI BIT(3) /* ring indicator */
#define SUP_UART_MCR_DCD BIT(2) /* data carrier detect */
/* Interrupt Status/Control Register bits */
#define SUP_UART_ISC_RXM BIT(5) /* RX interrupt enable */
#define SUP_UART_ISC_TXM BIT(4) /* TX interrupt enable */
#define SUP_UART_ISC_RX BIT(1) /* RX interrupt status */
#define SUP_UART_ISC_TX BIT(0) /* TX interrupt status */
#define SUP_DUMMY_READ BIT(16) /* drop bytes received on a !CREAD port */
#define SUP_UART_NR 5
struct sunplus_uart_port {
struct uart_port port;
struct clk *clk;
struct reset_control *rstc;
};
static void sp_uart_put_char(struct uart_port *port, unsigned int ch)
{
writel(ch, port->membase + SUP_UART_DATA);
}
static u32 sunplus_tx_buf_not_full(struct uart_port *port)
{
unsigned int lsr = readl(port->membase + SUP_UART_LSR);
return (lsr & SUP_UART_LSR_TX) ? SUP_UART_LSR_TX_NOT_FULL : 0;
}
static unsigned int sunplus_tx_empty(struct uart_port *port)
{
unsigned int lsr = readl(port->membase + SUP_UART_LSR);
return (lsr & UART_LSR_TEMT) ? TIOCSER_TEMT : 0;
}
static void sunplus_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mcr = readl(port->membase + SUP_UART_MCR);
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
else
mcr &= ~UART_MCR_DTR;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
if (mctrl & TIOCM_CAR)
mcr |= SUP_UART_MCR_DCD;
else
mcr &= ~SUP_UART_MCR_DCD;
if (mctrl & TIOCM_RI)
mcr |= SUP_UART_MCR_RI;
else
mcr &= ~SUP_UART_MCR_RI;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
else
mcr &= ~UART_MCR_LOOP;
writel(mcr, port->membase + SUP_UART_MCR);
}
static unsigned int sunplus_get_mctrl(struct uart_port *port)
{
unsigned int mcr, ret = 0;
mcr = readl(port->membase + SUP_UART_MCR);
if (mcr & UART_MCR_DTR)
ret |= TIOCM_DTR;
if (mcr & UART_MCR_RTS)
ret |= TIOCM_RTS;
if (mcr & SUP_UART_MCR_DCD)
ret |= TIOCM_CAR;
if (mcr & SUP_UART_MCR_RI)
ret |= TIOCM_RI;
if (mcr & UART_MCR_LOOP)
ret |= TIOCM_LOOP;
return ret;
}
static void sunplus_stop_tx(struct uart_port *port)
{
unsigned int isc;
isc = readl(port->membase + SUP_UART_ISC);
isc &= ~SUP_UART_ISC_TXM;
writel(isc, port->membase + SUP_UART_ISC);
}
static void sunplus_start_tx(struct uart_port *port)
{
unsigned int isc;
isc = readl(port->membase + SUP_UART_ISC);
isc |= SUP_UART_ISC_TXM;
writel(isc, port->membase + SUP_UART_ISC);
}
static void sunplus_stop_rx(struct uart_port *port)
{
unsigned int isc;
isc = readl(port->membase + SUP_UART_ISC);
isc &= ~SUP_UART_ISC_RXM;
writel(isc, port->membase + SUP_UART_ISC);
}
static void sunplus_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
unsigned int lcr;
spin_lock_irqsave(&port->lock, flags);
lcr = readl(port->membase + SUP_UART_LCR);
if (ctl)
lcr |= SUP_UART_LCR_SBC; /* start break */
else
lcr &= ~SUP_UART_LCR_SBC; /* stop break */
writel(lcr, port->membase + SUP_UART_LCR);
spin_unlock_irqrestore(&port->lock, flags);
}
static void transmit_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
sp_uart_put_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
sunplus_stop_tx(port);
return;
}
do {
sp_uart_put_char(port, xmit->buf[xmit->tail]);
uart_xmit_advance(port, 1);
if (uart_circ_empty(xmit))
break;
} while (sunplus_tx_buf_not_full(port));
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
sunplus_stop_tx(port);
}
static void receive_chars(struct uart_port *port)
{
unsigned int lsr = readl(port->membase + SUP_UART_LSR);
u8 ch, flag;
do {
ch = readl(port->membase + SUP_UART_DATA);
flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(lsr & SUP_UART_LSR_BRK_ERROR_BITS)) {
if (lsr & SUP_UART_LSR_BC) {
lsr &= ~(SUP_UART_LSR_FE | SUP_UART_LSR_PE);
port->icount.brk++;
flag = TTY_BREAK;
if (uart_handle_break(port))
goto ignore_char;
} else if (lsr & SUP_UART_LSR_PE) {
port->icount.parity++;
flag = TTY_PARITY;
} else if (lsr & SUP_UART_LSR_FE) {
port->icount.frame++;
flag = TTY_FRAME;
}
if (lsr & SUP_UART_LSR_OE)
port->icount.overrun++;
}
if (port->ignore_status_mask & SUP_DUMMY_READ)
goto ignore_char;
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag);
ignore_char:
lsr = readl(port->membase + SUP_UART_LSR);
} while (lsr & SUP_UART_LSR_RX);
tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t sunplus_uart_irq(int irq, void *args)
{
struct uart_port *port = args;
unsigned int isc;
spin_lock(&port->lock);
isc = readl(port->membase + SUP_UART_ISC);
if (isc & SUP_UART_ISC_RX)
receive_chars(port);
if (isc & SUP_UART_ISC_TX)
transmit_chars(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static int sunplus_startup(struct uart_port *port)
{
unsigned long flags;
unsigned int isc = 0;
int ret;
ret = request_irq(port->irq, sunplus_uart_irq, 0, "sunplus_uart", port);
if (ret)
return ret;
spin_lock_irqsave(&port->lock, flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
isc |= SUP_UART_ISC_RXM;
writel(isc, port->membase + SUP_UART_ISC);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void sunplus_shutdown(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
spin_unlock_irqrestore(&port->lock, flags);
free_irq(port->irq, port);
}
static void sunplus_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *oldtermios)
{
u32 ext, div, div_l, div_h, baud, lcr;
u32 clk = port->uartclk;
unsigned long flags;
baud = uart_get_baud_rate(port, termios, oldtermios, 0, port->uartclk / 16);
/* baud rate = uartclk / ((16 * divisor + 1) + divisor_ext) */
clk += baud >> 1;
div = clk / baud;
ext = div & 0x0F;
div = (div >> 4) - 1;
div_l = (div & 0xFF) | (ext << 12);
div_h = div >> 8;
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr = UART_LCR_WLEN5;
break;
case CS6:
lcr = UART_LCR_WLEN6;
break;
case CS7:
lcr = UART_LCR_WLEN7;
break;
default:
lcr = UART_LCR_WLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (termios->c_cflag & PARENB) {
lcr |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
}
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SUP_UART_LSR_PE | SUP_UART_LSR_FE;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= SUP_UART_LSR_BC;
/* Characters to ignore */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SUP_UART_LSR_FE | SUP_UART_LSR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= SUP_UART_LSR_BC;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SUP_UART_LSR_OE;
}
/* Ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0) {
port->ignore_status_mask |= SUP_DUMMY_READ;
/* flush rx data FIFO */
writel(0, port->membase + SUP_UART_RX_RESIDUE);
}
/* Settings for baud rate divisor and lcr */
writel(div_h, port->membase + SUP_UART_DIV_H);
writel(div_l, port->membase + SUP_UART_DIV_L);
writel(lcr, port->membase + SUP_UART_LCR);
spin_unlock_irqrestore(&port->lock, flags);
}
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
int new = termios->c_line;
if (new == N_PPS)
port->flags |= UPF_HARDPPS_CD;
else
port->flags &= ~UPF_HARDPPS_CD;
}
static const char *sunplus_type(struct uart_port *port)
{
return port->type == PORT_SUNPLUS ? "sunplus_uart" : NULL;
}
static void sunplus_config_port(struct uart_port *port, int type)
{
if (type & UART_CONFIG_TYPE)
port->type = PORT_SUNPLUS;
}
static int sunplus_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->type != PORT_UNKNOWN && ser->type != PORT_SUNPLUS)
return -EINVAL;
return 0;
}
#if defined(CONFIG_SERIAL_SUNPLUS_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
static void wait_for_xmitr(struct uart_port *port)
{
unsigned int val;
int ret;
/* Wait while FIFO is full or timeout */
ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val,
(val & SUP_UART_LSR_TX), 1, 10000);
if (ret == -ETIMEDOUT) {
dev_err(port->dev, "Timeout waiting while UART TX FULL\n");
return;
}
}
#endif
#ifdef CONFIG_CONSOLE_POLL
static void sunplus_poll_put_char(struct uart_port *port, unsigned char data)
{
wait_for_xmitr(port);
sp_uart_put_char(port, data);
}
static int sunplus_poll_get_char(struct uart_port *port)
{
unsigned int lsr = readl(port->membase + SUP_UART_LSR);
if (!(lsr & SUP_UART_LSR_RX))
return NO_POLL_CHAR;
return readl(port->membase + SUP_UART_DATA);
}
#endif
static const struct uart_ops sunplus_uart_ops = {
.tx_empty = sunplus_tx_empty,
.set_mctrl = sunplus_set_mctrl,
.get_mctrl = sunplus_get_mctrl,
.stop_tx = sunplus_stop_tx,
.start_tx = sunplus_start_tx,
.stop_rx = sunplus_stop_rx,
.break_ctl = sunplus_break_ctl,
.startup = sunplus_startup,
.shutdown = sunplus_shutdown,
.set_termios = sunplus_set_termios,
.set_ldisc = sunplus_set_ldisc,
.type = sunplus_type,
.config_port = sunplus_config_port,
.verify_port = sunplus_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = sunplus_poll_put_char,
.poll_get_char = sunplus_poll_get_char,
#endif
};
#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
static struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
static void sunplus_uart_console_putchar(struct uart_port *port,
unsigned char ch)
{
wait_for_xmitr(port);
sp_uart_put_char(port, ch);
}
static void sunplus_console_write(struct console *co,
const char *s,
unsigned int count)
{
unsigned long flags;
int locked = 1;
local_irq_save(flags);
if (sunplus_console_ports[co->index]->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
else
spin_lock(&sunplus_console_ports[co->index]->port.lock);
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
sunplus_uart_console_putchar);
if (locked)
spin_unlock(&sunplus_console_ports[co->index]->port.lock);
local_irq_restore(flags);
}
static int __init sunplus_console_setup(struct console *co, char *options)
{
struct sunplus_uart_port *sup;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= SUP_UART_NR)
return -EINVAL;
sup = sunplus_console_ports[co->index];
if (!sup)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&sup->port, co, baud, parity, bits, flow);
}
static struct uart_driver sunplus_uart_driver;
static struct console sunplus_uart_console = {
.name = "ttySUP",
.write = sunplus_console_write,
.device = uart_console_device,
.setup = sunplus_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sunplus_uart_driver
};
#define SERIAL_SUNPLUS_CONSOLE (&sunplus_uart_console)
#else
#define SERIAL_SUNPLUS_CONSOLE NULL
#endif
static struct uart_driver sunplus_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "sunplus_uart",
.dev_name = "ttySUP",
.major = TTY_MAJOR,
.minor = 64,
.nr = SUP_UART_NR,
.cons = SERIAL_SUNPLUS_CONSOLE,
};
static void sunplus_uart_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static void sunplus_uart_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int sunplus_uart_probe(struct platform_device *pdev)
{
struct sunplus_uart_port *sup;
struct uart_port *port;
struct resource *res;
int ret, irq;
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0 || pdev->id >= SUP_UART_NR)
return -EINVAL;
sup = devm_kzalloc(&pdev->dev, sizeof(*sup), GFP_KERNEL);
if (!sup)
return -ENOMEM;
sup->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(sup->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sup->clk), "clk not found\n");
ret = clk_prepare_enable(sup->clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_disable_unprepare, sup->clk);
if (ret)
return ret;
sup->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(sup->rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(sup->rstc), "rstc not found\n");
port = &sup->port;
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return dev_err_probe(&pdev->dev, PTR_ERR(port->membase), "membase not found\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
port->mapbase = res->start;
port->uartclk = clk_get_rate(sup->clk);
port->line = pdev->id;
port->irq = irq;
port->dev = &pdev->dev;
port->iotype = UPIO_MEM;
port->ops = &sunplus_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->fifosize = 128;
ret = reset_control_deassert(sup->rstc);
if (ret)
return ret;
ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_reset_control_assert, sup->rstc);
if (ret)
return ret;
#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
sunplus_console_ports[sup->port.line] = sup;
#endif
platform_set_drvdata(pdev, &sup->port);
ret = uart_add_one_port(&sunplus_uart_driver, &sup->port);
#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
if (ret)
sunplus_console_ports[sup->port.line] = NULL;
#endif
return ret;
}
static int sunplus_uart_remove(struct platform_device *pdev)
{
struct sunplus_uart_port *sup = platform_get_drvdata(pdev);
uart_remove_one_port(&sunplus_uart_driver, &sup->port);
return 0;
}
static int __maybe_unused sunplus_uart_suspend(struct device *dev)
{
struct sunplus_uart_port *sup = dev_get_drvdata(dev);
if (!uart_console(&sup->port))
uart_suspend_port(&sunplus_uart_driver, &sup->port);
return 0;
}
static int __maybe_unused sunplus_uart_resume(struct device *dev)
{
struct sunplus_uart_port *sup = dev_get_drvdata(dev);
if (!uart_console(&sup->port))
uart_resume_port(&sunplus_uart_driver, &sup->port);
return 0;
}
static const struct dev_pm_ops sunplus_uart_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sunplus_uart_suspend, sunplus_uart_resume)
};
static const struct of_device_id sp_uart_of_match[] = {
{ .compatible = "sunplus,sp7021-uart" },
{}
};
MODULE_DEVICE_TABLE(of, sp_uart_of_match);
static struct platform_driver sunplus_uart_platform_driver = {
.probe = sunplus_uart_probe,
.remove = sunplus_uart_remove,
.driver = {
.name = "sunplus_uart",
.of_match_table = sp_uart_of_match,
.pm = &sunplus_uart_pm_ops,
}
};
static int __init sunplus_uart_init(void)
{
int ret;
ret = uart_register_driver(&sunplus_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&sunplus_uart_platform_driver);
if (ret)
uart_unregister_driver(&sunplus_uart_driver);
return ret;
}
module_init(sunplus_uart_init);
static void __exit sunplus_uart_exit(void)
{
platform_driver_unregister(&sunplus_uart_platform_driver);
uart_unregister_driver(&sunplus_uart_driver);
}
module_exit(sunplus_uart_exit);
#ifdef CONFIG_SERIAL_EARLYCON
static void sunplus_uart_putc(struct uart_port *port, unsigned char c)
{
unsigned int val;
int ret;
ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val,
(val & UART_LSR_TEMT), 1, 10000);
if (ret)
return;
writel(c, port->membase + SUP_UART_DATA);
}
static void sunplus_uart_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, sunplus_uart_putc);
}
static int __init
sunplus_uart_early_setup(struct earlycon_device *dev, const char *opt)
{
if (!(dev->port.membase || dev->port.iobase))
return -ENODEV;
dev->con->write = sunplus_uart_early_write;
return 0;
}
OF_EARLYCON_DECLARE(sunplus_uart, "sunplus,sp7021-uart", sunplus_uart_early_setup);
#endif
MODULE_DESCRIPTION("Sunplus UART driver");
MODULE_AUTHOR("Hammer Hsieh <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/sunplus-uart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* SC16IS7xx tty serial driver - Copyright (C) 2014 GridPoint
* Author: Jon Ringle <[email protected]>
*
* Based on max310x.c, by Alexander Shiyan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
#define SC16IS7XX_NAME "sc16is7xx"
#define SC16IS7XX_MAX_DEVS 8
/* SC16IS7XX register definitions */
#define SC16IS7XX_RHR_REG (0x00) /* RX FIFO */
#define SC16IS7XX_THR_REG (0x00) /* TX FIFO */
#define SC16IS7XX_IER_REG (0x01) /* Interrupt enable */
#define SC16IS7XX_IIR_REG (0x02) /* Interrupt Identification */
#define SC16IS7XX_FCR_REG (0x02) /* FIFO control */
#define SC16IS7XX_LCR_REG (0x03) /* Line Control */
#define SC16IS7XX_MCR_REG (0x04) /* Modem Control */
#define SC16IS7XX_LSR_REG (0x05) /* Line Status */
#define SC16IS7XX_MSR_REG (0x06) /* Modem Status */
#define SC16IS7XX_SPR_REG (0x07) /* Scratch Pad */
#define SC16IS7XX_TXLVL_REG (0x08) /* TX FIFO level */
#define SC16IS7XX_RXLVL_REG (0x09) /* RX FIFO level */
#define SC16IS7XX_IODIR_REG (0x0a) /* I/O Direction
* - only on 75x/76x
*/
#define SC16IS7XX_IOSTATE_REG (0x0b) /* I/O State
* - only on 75x/76x
*/
#define SC16IS7XX_IOINTENA_REG (0x0c) /* I/O Interrupt Enable
* - only on 75x/76x
*/
#define SC16IS7XX_IOCONTROL_REG (0x0e) /* I/O Control
* - only on 75x/76x
*/
#define SC16IS7XX_EFCR_REG (0x0f) /* Extra Features Control */
/* TCR/TLR Register set: Only if ((MCR[2] == 1) && (EFR[4] == 1)) */
#define SC16IS7XX_TCR_REG (0x06) /* Transmit control */
#define SC16IS7XX_TLR_REG (0x07) /* Trigger level */
/* Special Register set: Only if ((LCR[7] == 1) && (LCR != 0xBF)) */
#define SC16IS7XX_DLL_REG (0x00) /* Divisor Latch Low */
#define SC16IS7XX_DLH_REG (0x01) /* Divisor Latch High */
/* Enhanced Register set: Only if (LCR == 0xBF) */
#define SC16IS7XX_EFR_REG (0x02) /* Enhanced Features */
#define SC16IS7XX_XON1_REG (0x04) /* Xon1 word */
#define SC16IS7XX_XON2_REG (0x05) /* Xon2 word */
#define SC16IS7XX_XOFF1_REG (0x06) /* Xoff1 word */
#define SC16IS7XX_XOFF2_REG (0x07) /* Xoff2 word */
/* IER register bits */
#define SC16IS7XX_IER_RDI_BIT (1 << 0) /* Enable RX data interrupt */
#define SC16IS7XX_IER_THRI_BIT (1 << 1) /* Enable TX holding register
* interrupt */
#define SC16IS7XX_IER_RLSI_BIT (1 << 2) /* Enable RX line status
* interrupt */
#define SC16IS7XX_IER_MSI_BIT (1 << 3) /* Enable Modem status
* interrupt */
/* IER register bits - write only if (EFR[4] == 1) */
#define SC16IS7XX_IER_SLEEP_BIT (1 << 4) /* Enable Sleep mode */
#define SC16IS7XX_IER_XOFFI_BIT (1 << 5) /* Enable Xoff interrupt */
#define SC16IS7XX_IER_RTSI_BIT (1 << 6) /* Enable nRTS interrupt */
#define SC16IS7XX_IER_CTSI_BIT (1 << 7) /* Enable nCTS interrupt */
/* FCR register bits */
#define SC16IS7XX_FCR_FIFO_BIT (1 << 0) /* Enable FIFO */
#define SC16IS7XX_FCR_RXRESET_BIT (1 << 1) /* Reset RX FIFO */
#define SC16IS7XX_FCR_TXRESET_BIT (1 << 2) /* Reset TX FIFO */
#define SC16IS7XX_FCR_RXLVLL_BIT (1 << 6) /* RX Trigger level LSB */
#define SC16IS7XX_FCR_RXLVLH_BIT (1 << 7) /* RX Trigger level MSB */
/* FCR register bits - write only if (EFR[4] == 1) */
#define SC16IS7XX_FCR_TXLVLL_BIT (1 << 4) /* TX Trigger level LSB */
#define SC16IS7XX_FCR_TXLVLH_BIT (1 << 5) /* TX Trigger level MSB */
/* IIR register bits */
#define SC16IS7XX_IIR_NO_INT_BIT (1 << 0) /* No interrupts pending */
#define SC16IS7XX_IIR_ID_MASK 0x3e /* Mask for the interrupt ID */
#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */
#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */
#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */
#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */
#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt
* - only on 75x/76x
*/
#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state
* - only on 75x/76x
*/
#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */
#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state
* from active (LOW)
* to inactive (HIGH)
*/
/* LCR register bits */
#define SC16IS7XX_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
#define SC16IS7XX_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
*
* Word length bits table:
* 00 -> 5 bit words
* 01 -> 6 bit words
* 10 -> 7 bit words
* 11 -> 8 bit words
*/
#define SC16IS7XX_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
*
* STOP length bit table:
* 0 -> 1 stop bit
* 1 -> 1-1.5 stop bits if
* word length is 5,
* 2 stop bits otherwise
*/
#define SC16IS7XX_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
#define SC16IS7XX_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
#define SC16IS7XX_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
#define SC16IS7XX_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
#define SC16IS7XX_LCR_DLAB_BIT (1 << 7) /* Divisor Latch enable */
#define SC16IS7XX_LCR_WORD_LEN_5 (0x00)
#define SC16IS7XX_LCR_WORD_LEN_6 (0x01)
#define SC16IS7XX_LCR_WORD_LEN_7 (0x02)
#define SC16IS7XX_LCR_WORD_LEN_8 (0x03)
#define SC16IS7XX_LCR_CONF_MODE_A SC16IS7XX_LCR_DLAB_BIT /* Special
* reg set */
#define SC16IS7XX_LCR_CONF_MODE_B 0xBF /* Enhanced
* reg set */
/* MCR register bits */
#define SC16IS7XX_MCR_DTR_BIT (1 << 0) /* DTR complement
* - only on 75x/76x
*/
#define SC16IS7XX_MCR_RTS_BIT (1 << 1) /* RTS complement */
#define SC16IS7XX_MCR_TCRTLR_BIT (1 << 2) /* TCR/TLR register enable */
#define SC16IS7XX_MCR_LOOP_BIT (1 << 4) /* Enable loopback test mode */
#define SC16IS7XX_MCR_XONANY_BIT (1 << 5) /* Enable Xon Any
* - write enabled
* if (EFR[4] == 1)
*/
#define SC16IS7XX_MCR_IRDA_BIT (1 << 6) /* Enable IrDA mode
* - write enabled
* if (EFR[4] == 1)
*/
#define SC16IS7XX_MCR_CLKSEL_BIT (1 << 7) /* Divide clock by 4
* - write enabled
* if (EFR[4] == 1)
*/
/* LSR register bits */
#define SC16IS7XX_LSR_DR_BIT (1 << 0) /* Receiver data ready */
#define SC16IS7XX_LSR_OE_BIT (1 << 1) /* Overrun Error */
#define SC16IS7XX_LSR_PE_BIT (1 << 2) /* Parity Error */
#define SC16IS7XX_LSR_FE_BIT (1 << 3) /* Frame Error */
#define SC16IS7XX_LSR_BI_BIT (1 << 4) /* Break Interrupt */
#define SC16IS7XX_LSR_BRK_ERROR_MASK 0x1E /* BI, FE, PE, OE bits */
#define SC16IS7XX_LSR_THRE_BIT (1 << 5) /* TX holding register empty */
#define SC16IS7XX_LSR_TEMT_BIT (1 << 6) /* Transmitter empty */
#define SC16IS7XX_LSR_FIFOE_BIT (1 << 7) /* Fifo Error */
/* MSR register bits */
#define SC16IS7XX_MSR_DCTS_BIT (1 << 0) /* Delta CTS Clear To Send */
#define SC16IS7XX_MSR_DDSR_BIT (1 << 1) /* Delta DSR Data Set Ready
* or (IO4)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_DRI_BIT (1 << 2) /* Delta RI Ring Indicator
* or (IO7)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_DCD_BIT (1 << 3) /* Delta CD Carrier Detect
* or (IO6)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */
#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */
/*
* TCR register bits
* TCR trigger levels are available from 0 to 60 characters with a granularity
* of four.
* The programmer must program the TCR such that TCR[3:0] > TCR[7:4]. There is
* no built-in hardware check to make sure this condition is met. Also, the TCR
* must be programmed with this condition before auto RTS or software flow
* control is enabled to avoid spurious operation of the device.
*/
#define SC16IS7XX_TCR_RX_HALT(words) ((((words) / 4) & 0x0f) << 0)
#define SC16IS7XX_TCR_RX_RESUME(words) ((((words) / 4) & 0x0f) << 4)
/*
* TLR register bits
* If TLR[3:0] or TLR[7:4] are logical 0, the selectable trigger levels via the
* FIFO Control Register (FCR) are used for the transmit and receive FIFO
* trigger levels. Trigger levels from 4 characters to 60 characters are
* available with a granularity of four.
*
* When the trigger level setting in TLR is zero, the SC16IS740/750/760 uses the
* trigger level setting defined in FCR. If TLR has non-zero trigger level value
* the trigger level defined in FCR is discarded. This applies to both transmit
* FIFO and receive FIFO trigger level setting.
*
* When TLR is used for RX trigger level control, FCR[7:6] should be left at the
* default state, that is, '00'.
*/
#define SC16IS7XX_TLR_TX_TRIGGER(words) ((((words) / 4) & 0x0f) << 0)
#define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4)
/* IOControl register bits (Only 750/760) */
#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */
#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */
#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
/* EFCR register bits */
#define SC16IS7XX_EFCR_9BIT_MODE_BIT (1 << 0) /* Enable 9-bit or Multidrop
* mode (RS485) */
#define SC16IS7XX_EFCR_RXDISABLE_BIT (1 << 1) /* Disable receiver */
#define SC16IS7XX_EFCR_TXDISABLE_BIT (1 << 2) /* Disable transmitter */
#define SC16IS7XX_EFCR_AUTO_RS485_BIT (1 << 4) /* Auto RS485 RTS direction */
#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */
#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode
* 0 = rate upto 115.2 kbit/s
* - Only 750/760
* 1 = rate upto 1.152 Mbit/s
* - Only 760
*/
/* EFR register bits */
#define SC16IS7XX_EFR_AUTORTS_BIT (1 << 6) /* Auto RTS flow ctrl enable */
#define SC16IS7XX_EFR_AUTOCTS_BIT (1 << 7) /* Auto CTS flow ctrl enable */
#define SC16IS7XX_EFR_XOFF2_DETECT_BIT (1 << 5) /* Enable Xoff2 detection */
#define SC16IS7XX_EFR_ENABLE_BIT (1 << 4) /* Enable enhanced functions
* and writing to IER[7:4],
* FCR[5:4], MCR[7:5]
*/
#define SC16IS7XX_EFR_SWFLOW3_BIT (1 << 3) /* SWFLOW bit 3 */
#define SC16IS7XX_EFR_SWFLOW2_BIT (1 << 2) /* SWFLOW bit 2
*
* SWFLOW bits 3 & 2 table:
* 00 -> no transmitter flow
* control
* 01 -> transmitter generates
* XON2 and XOFF2
* 10 -> transmitter generates
* XON1 and XOFF1
* 11 -> transmitter generates
* XON1, XON2, XOFF1 and
* XOFF2
*/
#define SC16IS7XX_EFR_SWFLOW1_BIT (1 << 1) /* SWFLOW bit 2 */
#define SC16IS7XX_EFR_SWFLOW0_BIT (1 << 0) /* SWFLOW bit 3
*
* SWFLOW bits 3 & 2 table:
* 00 -> no received flow
* control
* 01 -> receiver compares
* XON2 and XOFF2
* 10 -> receiver compares
* XON1 and XOFF1
* 11 -> receiver compares
* XON1, XON2, XOFF1 and
* XOFF2
*/
#define SC16IS7XX_EFR_FLOWCTRL_BITS (SC16IS7XX_EFR_AUTORTS_BIT | \
SC16IS7XX_EFR_AUTOCTS_BIT | \
SC16IS7XX_EFR_XOFF2_DETECT_BIT | \
SC16IS7XX_EFR_SWFLOW3_BIT | \
SC16IS7XX_EFR_SWFLOW2_BIT | \
SC16IS7XX_EFR_SWFLOW1_BIT | \
SC16IS7XX_EFR_SWFLOW0_BIT)
/* Misc definitions */
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_REG_SHIFT 2
#define SC16IS7XX_GPIOS_PER_BANK 4
struct sc16is7xx_devtype {
char name[10];
int nr_gpio;
int nr_uart;
};
#define SC16IS7XX_RECONF_MD (1 << 0)
#define SC16IS7XX_RECONF_IER (1 << 1)
#define SC16IS7XX_RECONF_RS485 (1 << 2)
struct sc16is7xx_one_config {
unsigned int flags;
u8 ier_mask;
u8 ier_val;
};
struct sc16is7xx_one {
struct uart_port port;
u8 line;
struct kthread_work tx_work;
struct kthread_work reg_work;
struct kthread_delayed_work ms_work;
struct sc16is7xx_one_config config;
bool irda_mode;
unsigned int old_mctrl;
};
struct sc16is7xx_port {
const struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
unsigned long gpio_valid_mask;
#endif
u8 mctrl_mask;
unsigned char buf[SC16IS7XX_FIFO_SIZE];
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct mutex efr_lock;
struct sc16is7xx_one p[];
};
static unsigned long sc16is7xx_lines;
static struct uart_driver sc16is7xx_uart = {
.owner = THIS_MODULE,
.dev_name = "ttySC",
.nr = SC16IS7XX_MAX_DEVS,
};
static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
static void sc16is7xx_stop_tx(struct uart_port *port);
#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
static int sc16is7xx_line(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
return one->line;
}
static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val = 0;
const u8 line = sc16is7xx_line(port);
regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
return val;
}
static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
const u8 line = sc16is7xx_line(port);
regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
}
static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
const u8 line = sc16is7xx_line(port);
u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
regcache_cache_bypass(s->regmap, true);
regmap_raw_read(s->regmap, addr, s->buf, rxlen);
regcache_cache_bypass(s->regmap, false);
}
static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
const u8 line = sc16is7xx_line(port);
u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
/*
* Don't send zero-length data, at least on SPI it confuses the chip
* delivering wrong TXLVL data.
*/
if (unlikely(!to_send))
return;
regcache_cache_bypass(s->regmap, true);
regmap_raw_write(s->regmap, addr, s->buf, to_send);
regcache_cache_bypass(s->regmap, false);
}
static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
u8 mask, u8 val)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
const u8 line = sc16is7xx_line(port);
regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
mask, val);
}
static int sc16is7xx_alloc_line(void)
{
int i;
BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
if (!test_and_set_bit(i, &sc16is7xx_lines))
break;
return i;
}
static void sc16is7xx_power(struct uart_port *port, int on)
{
sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
SC16IS7XX_IER_SLEEP_BIT,
on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
}
static const struct sc16is7xx_devtype sc16is74x_devtype = {
.name = "SC16IS74X",
.nr_gpio = 0,
.nr_uart = 1,
};
static const struct sc16is7xx_devtype sc16is750_devtype = {
.name = "SC16IS750",
.nr_gpio = 8,
.nr_uart = 1,
};
static const struct sc16is7xx_devtype sc16is752_devtype = {
.name = "SC16IS752",
.nr_gpio = 8,
.nr_uart = 2,
};
static const struct sc16is7xx_devtype sc16is760_devtype = {
.name = "SC16IS760",
.nr_gpio = 8,
.nr_uart = 1,
};
static const struct sc16is7xx_devtype sc16is762_devtype = {
.name = "SC16IS762",
.nr_gpio = 8,
.nr_uart = 2,
};
static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
{
switch (reg >> SC16IS7XX_REG_SHIFT) {
case SC16IS7XX_RHR_REG:
case SC16IS7XX_IIR_REG:
case SC16IS7XX_LSR_REG:
case SC16IS7XX_MSR_REG:
case SC16IS7XX_TXLVL_REG:
case SC16IS7XX_RXLVL_REG:
case SC16IS7XX_IOSTATE_REG:
case SC16IS7XX_IOCONTROL_REG:
return true;
default:
break;
}
return false;
}
static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
{
switch (reg >> SC16IS7XX_REG_SHIFT) {
case SC16IS7XX_RHR_REG:
return true;
default:
break;
}
return false;
}
static int sc16is7xx_set_baud(struct uart_port *port, int baud)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
u8 lcr;
u8 prescaler = 0;
unsigned long clk = port->uartclk, div = clk / 16 / baud;
if (div > 0xffff) {
prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
div /= 4;
}
/* In an amazing feat of design, the Enhanced Features Register shares
* the address of the Interrupt Identification Register, and is
* switched in by writing a magic value (0xbf) to the Line Control
* Register. Any interrupt firing during this time will see the EFR
* where it expects the IIR to be, leading to "Unexpected interrupt"
* messages.
*
* Prevent this possibility by claiming a mutex while accessing the
* EFR, and claiming the same mutex from within the interrupt handler.
* This is similar to disabling the interrupt, but that doesn't work
* because the bulk of the interrupt processing is run as a workqueue
* job in thread context.
*/
mutex_lock(&s->efr_lock);
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
/* Open the LCR divisors for configuration */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
/* Enable enhanced features */
regcache_cache_bypass(s->regmap, true);
sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_ENABLE_BIT,
SC16IS7XX_EFR_ENABLE_BIT);
regcache_cache_bypass(s->regmap, false);
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
/* Open the LCR divisors for configuration */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_A);
/* Write the new divisor */
regcache_cache_bypass(s->regmap, true);
sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256);
sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
regcache_cache_bypass(s->regmap, false);
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
return DIV_ROUND_CLOSEST(clk / 16, div);
}
static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
unsigned int iir)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int lsr = 0, bytes_read, i;
bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false;
u8 ch, flag;
if (unlikely(rxlen >= sizeof(s->buf))) {
dev_warn_ratelimited(port->dev,
"ttySC%i: Possible RX FIFO overrun: %d\n",
port->line, rxlen);
port->icount.buf_overrun++;
/* Ensure sanity of RX level */
rxlen = sizeof(s->buf);
}
while (rxlen) {
/* Only read lsr if there are possible errors in FIFO */
if (read_lsr) {
lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG);
if (!(lsr & SC16IS7XX_LSR_FIFOE_BIT))
read_lsr = false; /* No errors left in FIFO */
} else
lsr = 0;
if (read_lsr) {
s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
bytes_read = 1;
} else {
sc16is7xx_fifo_read(port, rxlen);
bytes_read = rxlen;
}
lsr &= SC16IS7XX_LSR_BRK_ERROR_MASK;
port->icount.rx++;
flag = TTY_NORMAL;
if (unlikely(lsr)) {
if (lsr & SC16IS7XX_LSR_BI_BIT) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (lsr & SC16IS7XX_LSR_PE_BIT)
port->icount.parity++;
else if (lsr & SC16IS7XX_LSR_FE_BIT)
port->icount.frame++;
else if (lsr & SC16IS7XX_LSR_OE_BIT)
port->icount.overrun++;
lsr &= port->read_status_mask;
if (lsr & SC16IS7XX_LSR_BI_BIT)
flag = TTY_BREAK;
else if (lsr & SC16IS7XX_LSR_PE_BIT)
flag = TTY_PARITY;
else if (lsr & SC16IS7XX_LSR_FE_BIT)
flag = TTY_FRAME;
else if (lsr & SC16IS7XX_LSR_OE_BIT)
flag = TTY_OVERRUN;
}
for (i = 0; i < bytes_read; ++i) {
ch = s->buf[i];
if (uart_handle_sysrq_char(port, ch))
continue;
if (lsr & port->ignore_status_mask)
continue;
uart_insert_char(port, lsr, SC16IS7XX_LSR_OE_BIT, ch,
flag);
}
rxlen -= bytes_read;
}
tty_flip_buffer_push(&port->state->port);
}
static void sc16is7xx_handle_tx(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct circ_buf *xmit = &port->state->xmit;
unsigned int txlen, to_send, i;
unsigned long flags;
if (unlikely(port->x_char)) {
sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
spin_lock_irqsave(&port->lock, flags);
sc16is7xx_stop_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
return;
}
/* Get length of data pending in circular buffer */
to_send = uart_circ_chars_pending(xmit);
if (likely(to_send)) {
/* Limit to size of TX FIFO */
txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG);
if (txlen > SC16IS7XX_FIFO_SIZE) {
dev_err_ratelimited(port->dev,
"chip reports %d free bytes in TX fifo, but it only has %d",
txlen, SC16IS7XX_FIFO_SIZE);
txlen = 0;
}
to_send = (to_send > txlen) ? txlen : to_send;
/* Convert to linear buffer */
for (i = 0; i < to_send; ++i) {
s->buf[i] = xmit->buf[xmit->tail];
uart_xmit_advance(port, 1);
}
sc16is7xx_fifo_write(port, to_send);
}
spin_lock_irqsave(&port->lock, flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
sc16is7xx_stop_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
{
u8 msr = sc16is7xx_port_read(port, SC16IS7XX_MSR_REG);
unsigned int mctrl = 0;
mctrl |= (msr & SC16IS7XX_MSR_CTS_BIT) ? TIOCM_CTS : 0;
mctrl |= (msr & SC16IS7XX_MSR_DSR_BIT) ? TIOCM_DSR : 0;
mctrl |= (msr & SC16IS7XX_MSR_CD_BIT) ? TIOCM_CAR : 0;
mctrl |= (msr & SC16IS7XX_MSR_RI_BIT) ? TIOCM_RNG : 0;
return mctrl;
}
static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
{
struct uart_port *port = &one->port;
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
unsigned int status, changed;
lockdep_assert_held_once(&s->efr_lock);
status = sc16is7xx_get_hwmctrl(port);
changed = status ^ one->old_mctrl;
if (changed == 0)
return;
one->old_mctrl = status;
spin_lock_irqsave(&port->lock, flags);
if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
port->icount.rng++;
if (changed & TIOCM_DSR)
port->icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(port, status & TIOCM_CTS);
wake_up_interruptible(&port->state->port.delta_msr_wait);
spin_unlock_irqrestore(&port->lock, flags);
}
static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
{
struct uart_port *port = &s->p[portno].port;
do {
unsigned int iir, rxlen;
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
if (iir & SC16IS7XX_IIR_NO_INT_BIT)
return false;
iir &= SC16IS7XX_IIR_ID_MASK;
switch (iir) {
case SC16IS7XX_IIR_RDI_SRC:
case SC16IS7XX_IIR_RLSE_SRC:
case SC16IS7XX_IIR_RTOI_SRC:
case SC16IS7XX_IIR_XOFFI_SRC:
rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
if (rxlen)
sc16is7xx_handle_rx(port, rxlen, iir);
break;
/* CTSRTS interrupt comes only when CTS goes inactive */
case SC16IS7XX_IIR_CTSRTS_SRC:
case SC16IS7XX_IIR_MSI_SRC:
sc16is7xx_update_mlines(one);
break;
case SC16IS7XX_IIR_THRI_SRC:
sc16is7xx_handle_tx(port);
break;
default:
dev_err_ratelimited(port->dev,
"ttySC%i: Unexpected interrupt: %x",
port->line, iir);
break;
}
} while (0);
return true;
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
mutex_lock(&s->efr_lock);
while (1) {
bool keep_polling = false;
int i;
for (i = 0; i < s->devtype->nr_uart; ++i)
keep_polling |= sc16is7xx_port_irq(s, i);
if (!keep_polling)
break;
}
mutex_unlock(&s->efr_lock);
return IRQ_HANDLED;
}
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
msleep(port->rs485.delay_rts_before_send);
mutex_lock(&s->efr_lock);
sc16is7xx_handle_tx(port);
mutex_unlock(&s->efr_lock);
spin_lock_irqsave(&port->lock, flags);
sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
spin_unlock_irqrestore(&port->lock, flags);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
{
const u32 mask = SC16IS7XX_EFCR_AUTO_RS485_BIT |
SC16IS7XX_EFCR_RTS_INVERT_BIT;
u32 efcr = 0;
struct serial_rs485 *rs485 = &port->rs485;
unsigned long irqflags;
spin_lock_irqsave(&port->lock, irqflags);
if (rs485->flags & SER_RS485_ENABLED) {
efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
}
spin_unlock_irqrestore(&port->lock, irqflags);
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
}
static void sc16is7xx_reg_proc(struct kthread_work *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, reg_work);
struct sc16is7xx_one_config config;
unsigned long irqflags;
spin_lock_irqsave(&one->port.lock, irqflags);
config = one->config;
memset(&one->config, 0, sizeof(one->config));
spin_unlock_irqrestore(&one->port.lock, irqflags);
if (config.flags & SC16IS7XX_RECONF_MD) {
u8 mcr = 0;
/* Device ignores RTS setting when hardware flow is enabled */
if (one->port.mctrl & TIOCM_RTS)
mcr |= SC16IS7XX_MCR_RTS_BIT;
if (one->port.mctrl & TIOCM_DTR)
mcr |= SC16IS7XX_MCR_DTR_BIT;
if (one->port.mctrl & TIOCM_LOOP)
mcr |= SC16IS7XX_MCR_LOOP_BIT;
sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_RTS_BIT |
SC16IS7XX_MCR_DTR_BIT |
SC16IS7XX_MCR_LOOP_BIT,
mcr);
}
if (config.flags & SC16IS7XX_RECONF_IER)
sc16is7xx_port_update(&one->port, SC16IS7XX_IER_REG,
config.ier_mask, config.ier_val);
if (config.flags & SC16IS7XX_RECONF_RS485)
sc16is7xx_reconf_rs485(&one->port);
}
static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
lockdep_assert_held_once(&port->lock);
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_mask |= bit;
one->config.ier_val &= ~bit;
kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
lockdep_assert_held_once(&port->lock);
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_mask |= bit;
one->config.ier_val |= bit;
kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_stop_tx(struct uart_port *port)
{
sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
}
static void sc16is7xx_stop_rx(struct uart_port *port)
{
sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
}
static void sc16is7xx_ms_proc(struct kthread_work *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work);
struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
if (one->port.state) {
mutex_lock(&s->efr_lock);
sc16is7xx_update_mlines(one);
mutex_unlock(&s->efr_lock);
kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ);
}
}
static void sc16is7xx_enable_ms(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
lockdep_assert_held_once(&port->lock);
kthread_queue_delayed_work(&s->kworker, &one->ms_work, 0);
}
static void sc16is7xx_start_tx(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
kthread_queue_work(&s->kworker, &one->tx_work);
}
static void sc16is7xx_throttle(struct uart_port *port)
{
unsigned long flags;
/*
* Hardware flow control is enabled and thus the device ignores RTS
* value set in MCR register. Stop reading data from RX FIFO so the
* AutoRTS feature will de-activate RTS output.
*/
spin_lock_irqsave(&port->lock, flags);
sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
spin_unlock_irqrestore(&port->lock, flags);
}
static void sc16is7xx_unthrottle(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
spin_unlock_irqrestore(&port->lock, flags);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
{
unsigned int lsr;
lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG);
return (lsr & SC16IS7XX_LSR_TEMT_BIT) ? TIOCSER_TEMT : 0;
}
static unsigned int sc16is7xx_get_mctrl(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
/* Called with port lock taken so we can only return cached value */
return one->old_mctrl;
}
static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
one->config.flags |= SC16IS7XX_RECONF_MD;
kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
{
sc16is7xx_port_update(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_TXBREAK_BIT,
break_state ? SC16IS7XX_LCR_TXBREAK_BIT : 0);
}
static void sc16is7xx_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
unsigned int lcr, flow = 0;
int baud;
unsigned long flags;
kthread_cancel_delayed_work_sync(&one->ms_work);
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr = SC16IS7XX_LCR_WORD_LEN_5;
break;
case CS6:
lcr = SC16IS7XX_LCR_WORD_LEN_6;
break;
case CS7:
lcr = SC16IS7XX_LCR_WORD_LEN_7;
break;
case CS8:
lcr = SC16IS7XX_LCR_WORD_LEN_8;
break;
default:
lcr = SC16IS7XX_LCR_WORD_LEN_8;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
break;
}
/* Parity */
if (termios->c_cflag & PARENB) {
lcr |= SC16IS7XX_LCR_PARITY_BIT;
if (!(termios->c_cflag & PARODD))
lcr |= SC16IS7XX_LCR_EVENPARITY_BIT;
}
/* Stop bits */
if (termios->c_cflag & CSTOPB)
lcr |= SC16IS7XX_LCR_STOPLEN_BIT; /* 2 stops */
/* Set read status mask */
port->read_status_mask = SC16IS7XX_LSR_OE_BIT;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SC16IS7XX_LSR_PE_BIT |
SC16IS7XX_LSR_FE_BIT;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= SC16IS7XX_LSR_BI_BIT;
/* Set status ignore mask */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNBRK)
port->ignore_status_mask |= SC16IS7XX_LSR_BI_BIT;
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
/* As above, claim the mutex while accessing the EFR. */
mutex_lock(&s->efr_lock);
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
/* Configure flow control */
regcache_cache_bypass(s->regmap, true);
sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
if (termios->c_cflag & CRTSCTS) {
flow |= SC16IS7XX_EFR_AUTOCTS_BIT |
SC16IS7XX_EFR_AUTORTS_BIT;
port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
}
if (termios->c_iflag & IXON)
flow |= SC16IS7XX_EFR_SWFLOW3_BIT;
if (termios->c_iflag & IXOFF)
flow |= SC16IS7XX_EFR_SWFLOW1_BIT;
sc16is7xx_port_update(port,
SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_FLOWCTRL_BITS,
flow);
regcache_cache_bypass(s->regmap, false);
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 4 / 0xffff,
port->uartclk / 16);
/* Setup baudrate generator */
baud = sc16is7xx_set_baud(port, baud);
spin_lock_irqsave(&port->lock, flags);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
if (UART_ENABLE_MS(port, termios->c_cflag))
sc16is7xx_enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
if (rs485->flags & SER_RS485_ENABLED) {
/*
* RTS signal is handled by HW, it's timing can't be influenced.
* However, it's sometimes useful to delay TX even without RTS
* control therefore we try to handle .delay_rts_before_send.
*/
if (rs485->delay_rts_after_send)
return -EINVAL;
}
one->config.flags |= SC16IS7XX_RECONF_RS485;
kthread_queue_work(&s->kworker, &one->reg_work);
return 0;
}
static int sc16is7xx_startup(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
unsigned long flags;
sc16is7xx_power(port, 1);
/* Reset FIFOs*/
val = SC16IS7XX_FCR_RXRESET_BIT | SC16IS7XX_FCR_TXRESET_BIT;
sc16is7xx_port_write(port, SC16IS7XX_FCR_REG, val);
udelay(5);
sc16is7xx_port_write(port, SC16IS7XX_FCR_REG,
SC16IS7XX_FCR_FIFO_BIT);
/* Enable EFR */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
regcache_cache_bypass(s->regmap, true);
/* Enable write access to enhanced features and internal clock div */
sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_ENABLE_BIT,
SC16IS7XX_EFR_ENABLE_BIT);
/* Enable TCR/TLR */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_TCRTLR_BIT,
SC16IS7XX_MCR_TCRTLR_BIT);
/* Configure flow control levels */
/* Flow control halt level 48, resume level 24 */
sc16is7xx_port_write(port, SC16IS7XX_TCR_REG,
SC16IS7XX_TCR_RX_RESUME(24) |
SC16IS7XX_TCR_RX_HALT(48));
regcache_cache_bypass(s->regmap, false);
/* Now, initialize the UART */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
/* Enable IrDA mode if requested in DT */
/* This bit must be written with LCR[7] = 0 */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_IRDA_BIT,
one->irda_mode ?
SC16IS7XX_MCR_IRDA_BIT : 0);
/* Enable the Rx and Tx FIFO */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT,
0);
/* Enable RX, CTS change and modem lines interrupts */
val = SC16IS7XX_IER_RDI_BIT | SC16IS7XX_IER_CTSI_BIT |
SC16IS7XX_IER_MSI_BIT;
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
/* Enable modem status polling */
spin_lock_irqsave(&port->lock, flags);
sc16is7xx_enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void sc16is7xx_shutdown(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
kthread_cancel_delayed_work_sync(&one->ms_work);
/* Disable all interrupts */
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
sc16is7xx_power(port, 0);
kthread_flush_worker(&s->kworker);
}
static const char *sc16is7xx_type(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
return (port->type == PORT_SC16IS7XX) ? s->devtype->name : NULL;
}
static int sc16is7xx_request_port(struct uart_port *port)
{
/* Do nothing */
return 0;
}
static void sc16is7xx_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_SC16IS7XX;
}
static int sc16is7xx_verify_port(struct uart_port *port,
struct serial_struct *s)
{
if ((s->type != PORT_UNKNOWN) && (s->type != PORT_SC16IS7XX))
return -EINVAL;
if (s->irq != port->irq)
return -EINVAL;
return 0;
}
static void sc16is7xx_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
sc16is7xx_power(port, (state == UART_PM_STATE_ON) ? 1 : 0);
}
static void sc16is7xx_null_void(struct uart_port *port)
{
/* Do nothing */
}
static const struct uart_ops sc16is7xx_ops = {
.tx_empty = sc16is7xx_tx_empty,
.set_mctrl = sc16is7xx_set_mctrl,
.get_mctrl = sc16is7xx_get_mctrl,
.stop_tx = sc16is7xx_stop_tx,
.start_tx = sc16is7xx_start_tx,
.throttle = sc16is7xx_throttle,
.unthrottle = sc16is7xx_unthrottle,
.stop_rx = sc16is7xx_stop_rx,
.enable_ms = sc16is7xx_enable_ms,
.break_ctl = sc16is7xx_break_ctl,
.startup = sc16is7xx_startup,
.shutdown = sc16is7xx_shutdown,
.set_termios = sc16is7xx_set_termios,
.type = sc16is7xx_type,
.request_port = sc16is7xx_request_port,
.release_port = sc16is7xx_null_void,
.config_port = sc16is7xx_config_port,
.verify_port = sc16is7xx_verify_port,
.pm = sc16is7xx_pm,
};
#ifdef CONFIG_GPIOLIB
static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
unsigned int val;
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
val = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
return !!(val & BIT(offset));
}
static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
val ? BIT(offset) : 0);
}
static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), 0);
return 0;
}
static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int val)
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
if (val)
state |= BIT(offset);
else
state &= ~BIT(offset);
/*
* If we write IOSTATE first, and then IODIR, the output value is not
* transferred to the corresponding I/O pin.
* The datasheet states that each register bit will be transferred to
* the corresponding I/O pin programmed as output when writing to
* IOSTATE. Therefore, configure direction first with IODIR, and then
* set value after with IOSTATE.
*/
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
return 0;
}
static int sc16is7xx_gpio_init_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
*valid_mask = s->gpio_valid_mask;
return 0;
}
static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
{
struct device *dev = s->p[0].port.dev;
if (!s->devtype->nr_gpio)
return 0;
switch (s->mctrl_mask) {
case 0:
s->gpio_valid_mask = GENMASK(7, 0);
break;
case SC16IS7XX_IOCONTROL_MODEM_A_BIT:
s->gpio_valid_mask = GENMASK(3, 0);
break;
case SC16IS7XX_IOCONTROL_MODEM_B_BIT:
s->gpio_valid_mask = GENMASK(7, 4);
break;
default:
break;
}
if (s->gpio_valid_mask == 0)
return 0;
s->gpio.owner = THIS_MODULE;
s->gpio.parent = dev;
s->gpio.label = dev_name(dev);
s->gpio.init_valid_mask = sc16is7xx_gpio_init_valid_mask;
s->gpio.direction_input = sc16is7xx_gpio_direction_input;
s->gpio.get = sc16is7xx_gpio_get;
s->gpio.direction_output = sc16is7xx_gpio_direction_output;
s->gpio.set = sc16is7xx_gpio_set;
s->gpio.base = -1;
s->gpio.ngpio = s->devtype->nr_gpio;
s->gpio.can_sleep = 1;
return gpiochip_add_data(&s->gpio, s);
}
#endif
/*
* Configure ports designated to operate as modem control lines.
*/
static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
{
int i;
int ret;
int count;
u32 mctrl_port[2];
struct device *dev = s->p[0].port.dev;
count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
if (count < 0 || count > ARRAY_SIZE(mctrl_port))
return 0;
ret = device_property_read_u32_array(dev, "nxp,modem-control-line-ports",
mctrl_port, count);
if (ret)
return ret;
s->mctrl_mask = 0;
for (i = 0; i < count; i++) {
/* Use GPIO lines as modem control lines */
if (mctrl_port[i] == 0)
s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_A_BIT;
else if (mctrl_port[i] == 1)
s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_B_BIT;
}
if (s->mctrl_mask)
regmap_update_bits(
s->regmap,
SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
SC16IS7XX_IOCONTROL_MODEM_A_BIT |
SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
return 0;
}
static const struct serial_rs485 sc16is7xx_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */
};
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq)
{
unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
unsigned int val;
u32 uartclk = 0;
int i, ret;
struct sc16is7xx_port *s;
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/*
* This device does not have an identification register that would
* tell us if we are really connected to the correct device.
* The best we can do is to check if communication is at all possible.
*/
ret = regmap_read(regmap,
SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
if (ret < 0)
return -EPROBE_DEFER;
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
if (!s) {
dev_err(dev, "Error allocating port structure\n");
return -ENOMEM;
}
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
s->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
ret = clk_prepare_enable(s->clk);
if (ret)
return ret;
freq = clk_get_rate(s->clk);
if (freq == 0) {
if (uartclk)
freq = uartclk;
if (pfreq)
freq = *pfreq;
if (freq)
dev_dbg(dev, "Clock frequency: %luHz\n", freq);
else
return -EINVAL;
}
s->regmap = regmap;
s->devtype = devtype;
dev_set_drvdata(dev, s);
mutex_init(&s->efr_lock);
kthread_init_worker(&s->kworker);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
ret = PTR_ERR(s->kworker_task);
goto out_clk;
}
sched_set_fifo(s->kworker_task);
/* reset device, purging any pending irq / data */
regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
SC16IS7XX_IOCONTROL_SRESET_BIT);
for (i = 0; i < devtype->nr_uart; ++i) {
s->p[i].line = i;
/* Initialize port data */
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_SC16IS7XX;
s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iobase = i;
/*
* Use all ones as membase to make sure uart_configure_port() in
* serial_core.c does not abort for SPI/I2C devices where the
* membase address is not applicable.
*/
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
s->p[i].port.line = sc16is7xx_alloc_line();
if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
ret = -ENOMEM;
goto out_ports;
}
ret = uart_get_rs485_mode(&s->p[i].port);
if (ret)
goto out_ports;
/* Disable all interrupts */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
/* Initialize kthread work structs */
kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
/* Register port */
uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
/* Enable EFR */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
regcache_cache_bypass(s->regmap, true);
/* Enable write access to enhanced features */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_ENABLE_BIT);
regcache_cache_bypass(s->regmap, false);
/* Restore access to general registers */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
/* Go to suspend mode */
sc16is7xx_power(&s->p[i].port, 0);
}
if (dev->of_node) {
struct property *prop;
const __be32 *p;
u32 u;
of_property_for_each_u32(dev->of_node, "irda-mode-ports",
prop, p, u)
if (u < devtype->nr_uart)
s->p[u].irda_mode = true;
}
ret = sc16is7xx_setup_mctrl_ports(s);
if (ret)
goto out_ports;
#ifdef CONFIG_GPIOLIB
ret = sc16is7xx_setup_gpio_chip(s);
if (ret)
goto out_ports;
#endif
/*
* Setup interrupt. We first try to acquire the IRQ line as level IRQ.
* If that succeeds, we can allow sharing the interrupt as well.
* In case the interrupt controller doesn't support that, we fall
* back to a non-shared falling-edge trigger.
*/
ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
IRQF_TRIGGER_LOW | IRQF_SHARED |
IRQF_ONESHOT,
dev_name(dev), s);
if (!ret)
return 0;
ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), s);
if (!ret)
return 0;
#ifdef CONFIG_GPIOLIB
if (s->gpio_valid_mask)
gpiochip_remove(&s->gpio);
#endif
out_ports:
for (i--; i >= 0; i--) {
uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
clear_bit(s->p[i].port.line, &sc16is7xx_lines);
}
kthread_stop(s->kworker_task);
out_clk:
clk_disable_unprepare(s->clk);
return ret;
}
static void sc16is7xx_remove(struct device *dev)
{
struct sc16is7xx_port *s = dev_get_drvdata(dev);
int i;
#ifdef CONFIG_GPIOLIB
if (s->gpio_valid_mask)
gpiochip_remove(&s->gpio);
#endif
for (i = 0; i < s->devtype->nr_uart; i++) {
kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
clear_bit(s->p[i].port.line, &sc16is7xx_lines);
sc16is7xx_power(&s->p[i].port, 0);
}
kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
clk_disable_unprepare(s->clk);
}
static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = {
{ .compatible = "nxp,sc16is740", .data = &sc16is74x_devtype, },
{ .compatible = "nxp,sc16is741", .data = &sc16is74x_devtype, },
{ .compatible = "nxp,sc16is750", .data = &sc16is750_devtype, },
{ .compatible = "nxp,sc16is752", .data = &sc16is752_devtype, },
{ .compatible = "nxp,sc16is760", .data = &sc16is760_devtype, },
{ .compatible = "nxp,sc16is762", .data = &sc16is762_devtype, },
{ }
};
MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids);
static struct regmap_config regcfg = {
.reg_bits = 7,
.pad_bits = 1,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = sc16is7xx_regmap_volatile,
.precious_reg = sc16is7xx_regmap_precious,
};
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
const struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
int ret;
/* Setup SPI bus */
spi->bits_per_word = 8;
/* only supports mode 0 on SC16IS762 */
spi->mode = spi->mode ? : SPI_MODE_0;
spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
ret = spi_setup(spi);
if (ret)
return ret;
if (spi->dev.of_node) {
devtype = device_get_match_data(&spi->dev);
if (!devtype)
return -ENODEV;
} else {
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_spi(spi, ®cfg);
return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
}
static void sc16is7xx_spi_remove(struct spi_device *spi)
{
sc16is7xx_remove(&spi->dev);
}
static const struct spi_device_id sc16is7xx_spi_id_table[] = {
{ "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is750", (kernel_ulong_t)&sc16is750_devtype, },
{ "sc16is752", (kernel_ulong_t)&sc16is752_devtype, },
{ "sc16is760", (kernel_ulong_t)&sc16is760_devtype, },
{ "sc16is762", (kernel_ulong_t)&sc16is762_devtype, },
{ }
};
MODULE_DEVICE_TABLE(spi, sc16is7xx_spi_id_table);
static struct spi_driver sc16is7xx_spi_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
.of_match_table = sc16is7xx_dt_ids,
},
.probe = sc16is7xx_spi_probe,
.remove = sc16is7xx_spi_remove,
.id_table = sc16is7xx_spi_id_table,
};
MODULE_ALIAS("spi:sc16is7xx");
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
{
const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
const struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
if (i2c->dev.of_node) {
devtype = device_get_match_data(&i2c->dev);
if (!devtype)
return -ENODEV;
} else {
devtype = (struct sc16is7xx_devtype *)id->driver_data;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_i2c(i2c, ®cfg);
return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
}
static void sc16is7xx_i2c_remove(struct i2c_client *client)
{
sc16is7xx_remove(&client->dev);
}
static const struct i2c_device_id sc16is7xx_i2c_id_table[] = {
{ "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is750", (kernel_ulong_t)&sc16is750_devtype, },
{ "sc16is752", (kernel_ulong_t)&sc16is752_devtype, },
{ "sc16is760", (kernel_ulong_t)&sc16is760_devtype, },
{ "sc16is762", (kernel_ulong_t)&sc16is762_devtype, },
{ }
};
MODULE_DEVICE_TABLE(i2c, sc16is7xx_i2c_id_table);
static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
.of_match_table = sc16is7xx_dt_ids,
},
.probe = sc16is7xx_i2c_probe,
.remove = sc16is7xx_i2c_remove,
.id_table = sc16is7xx_i2c_id_table,
};
#endif
static int __init sc16is7xx_init(void)
{
int ret;
ret = uart_register_driver(&sc16is7xx_uart);
if (ret) {
pr_err("Registering UART driver failed\n");
return ret;
}
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
goto err_i2c;
}
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
goto err_spi;
}
#endif
return ret;
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
err_spi:
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
i2c_del_driver(&sc16is7xx_i2c_uart_driver);
err_i2c:
#endif
uart_unregister_driver(&sc16is7xx_uart);
return ret;
}
module_init(sc16is7xx_init);
static void __exit sc16is7xx_exit(void)
{
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
i2c_del_driver(&sc16is7xx_i2c_uart_driver);
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
spi_unregister_driver(&sc16is7xx_spi_uart_driver);
#endif
uart_unregister_driver(&sc16is7xx_uart);
}
module_exit(sc16is7xx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jon Ringle <[email protected]>");
MODULE_DESCRIPTION("SC16IS7XX serial driver");
| linux-master | drivers/tty/serial/sc16is7xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MPS2 UART driver
*
* Copyright (C) 2015 ARM Limited
*
* Author: Vladimir Murzin <[email protected]>
*
* TODO: support for SysRq
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/idr.h>
#define SERIAL_NAME "ttyMPS"
#define DRIVER_NAME "mps2-uart"
#define MAKE_NAME(x) (DRIVER_NAME # x)
#define UARTn_DATA 0x00
#define UARTn_STATE 0x04
#define UARTn_STATE_TX_FULL BIT(0)
#define UARTn_STATE_RX_FULL BIT(1)
#define UARTn_STATE_TX_OVERRUN BIT(2)
#define UARTn_STATE_RX_OVERRUN BIT(3)
#define UARTn_CTRL 0x08
#define UARTn_CTRL_TX_ENABLE BIT(0)
#define UARTn_CTRL_RX_ENABLE BIT(1)
#define UARTn_CTRL_TX_INT_ENABLE BIT(2)
#define UARTn_CTRL_RX_INT_ENABLE BIT(3)
#define UARTn_CTRL_TX_OVERRUN_INT_ENABLE BIT(4)
#define UARTn_CTRL_RX_OVERRUN_INT_ENABLE BIT(5)
#define UARTn_INT 0x0c
#define UARTn_INT_TX BIT(0)
#define UARTn_INT_RX BIT(1)
#define UARTn_INT_TX_OVERRUN BIT(2)
#define UARTn_INT_RX_OVERRUN BIT(3)
#define UARTn_BAUDDIV 0x10
#define UARTn_BAUDDIV_MASK GENMASK(20, 0)
/*
* Helpers to make typical enable/disable operations more readable.
*/
#define UARTn_CTRL_TX_GRP (UARTn_CTRL_TX_ENABLE |\
UARTn_CTRL_TX_INT_ENABLE |\
UARTn_CTRL_TX_OVERRUN_INT_ENABLE)
#define UARTn_CTRL_RX_GRP (UARTn_CTRL_RX_ENABLE |\
UARTn_CTRL_RX_INT_ENABLE |\
UARTn_CTRL_RX_OVERRUN_INT_ENABLE)
#define MPS2_MAX_PORTS 3
#define UART_PORT_COMBINED_IRQ BIT(0)
struct mps2_uart_port {
struct uart_port port;
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
unsigned int flags;
};
static inline struct mps2_uart_port *to_mps2_port(struct uart_port *port)
{
return container_of(port, struct mps2_uart_port, port);
}
static void mps2_uart_write8(struct uart_port *port, u8 val, unsigned int off)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
writeb(val, mps_port->port.membase + off);
}
static u8 mps2_uart_read8(struct uart_port *port, unsigned int off)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
return readb(mps_port->port.membase + off);
}
static void mps2_uart_write32(struct uart_port *port, u32 val, unsigned int off)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
writel_relaxed(val, mps_port->port.membase + off);
}
static unsigned int mps2_uart_tx_empty(struct uart_port *port)
{
u8 status = mps2_uart_read8(port, UARTn_STATE);
return (status & UARTn_STATE_TX_FULL) ? 0 : TIOCSER_TEMT;
}
static void mps2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static unsigned int mps2_uart_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR;
}
static void mps2_uart_stop_tx(struct uart_port *port)
{
u8 control = mps2_uart_read8(port, UARTn_CTRL);
control &= ~UARTn_CTRL_TX_INT_ENABLE;
mps2_uart_write8(port, control, UARTn_CTRL);
}
static void mps2_uart_tx_chars(struct uart_port *port)
{
u8 ch;
uart_port_tx(port, ch,
mps2_uart_tx_empty(port),
mps2_uart_write8(port, ch, UARTn_DATA));
}
static void mps2_uart_start_tx(struct uart_port *port)
{
u8 control = mps2_uart_read8(port, UARTn_CTRL);
control |= UARTn_CTRL_TX_INT_ENABLE;
mps2_uart_write8(port, control, UARTn_CTRL);
/*
* We've just unmasked the TX IRQ and now slow-starting via
* polling; if there is enough data to fill up the internal
* write buffer in one go, the TX IRQ should assert, at which
* point we switch to fully interrupt-driven TX.
*/
mps2_uart_tx_chars(port);
}
static void mps2_uart_stop_rx(struct uart_port *port)
{
u8 control = mps2_uart_read8(port, UARTn_CTRL);
control &= ~UARTn_CTRL_RX_GRP;
mps2_uart_write8(port, control, UARTn_CTRL);
}
static void mps2_uart_break_ctl(struct uart_port *port, int ctl)
{
}
static void mps2_uart_rx_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_RX_FULL) {
u8 rxdata = mps2_uart_read8(port, UARTn_DATA);
port->icount.rx++;
tty_insert_flip_char(&port->state->port, rxdata, TTY_NORMAL);
}
tty_flip_buffer_push(tport);
}
static irqreturn_t mps2_uart_rxirq(int irq, void *data)
{
struct uart_port *port = data;
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
if (unlikely(!(irqflag & UARTn_INT_RX)))
return IRQ_NONE;
spin_lock(&port->lock);
mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
mps2_uart_rx_chars(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static irqreturn_t mps2_uart_txirq(int irq, void *data)
{
struct uart_port *port = data;
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
if (unlikely(!(irqflag & UARTn_INT_TX)))
return IRQ_NONE;
spin_lock(&port->lock);
mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
mps2_uart_tx_chars(port);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
{
irqreturn_t handled = IRQ_NONE;
struct uart_port *port = data;
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
spin_lock(&port->lock);
if (irqflag & UARTn_INT_RX_OVERRUN) {
struct tty_port *tport = &port->state->port;
mps2_uart_write8(port, UARTn_INT_RX_OVERRUN, UARTn_INT);
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(tport);
handled = IRQ_HANDLED;
}
/*
* It's never been seen in practice and it never *should* happen since
* we check if there is enough room in TX buffer before sending data.
* So we keep this check in case something suspicious has happened.
*/
if (irqflag & UARTn_INT_TX_OVERRUN) {
mps2_uart_write8(port, UARTn_INT_TX_OVERRUN, UARTn_INT);
handled = IRQ_HANDLED;
}
spin_unlock(&port->lock);
return handled;
}
static irqreturn_t mps2_uart_combinedirq(int irq, void *data)
{
if (mps2_uart_rxirq(irq, data) == IRQ_HANDLED)
return IRQ_HANDLED;
if (mps2_uart_txirq(irq, data) == IRQ_HANDLED)
return IRQ_HANDLED;
if (mps2_uart_oerrirq(irq, data) == IRQ_HANDLED)
return IRQ_HANDLED;
return IRQ_NONE;
}
static int mps2_uart_startup(struct uart_port *port)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
u8 control = mps2_uart_read8(port, UARTn_CTRL);
int ret;
control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP);
mps2_uart_write8(port, control, UARTn_CTRL);
if (mps_port->flags & UART_PORT_COMBINED_IRQ) {
ret = request_irq(port->irq, mps2_uart_combinedirq, 0,
MAKE_NAME(-combined), mps_port);
if (ret) {
dev_err(port->dev, "failed to register combinedirq (%d)\n", ret);
return ret;
}
} else {
ret = request_irq(port->irq, mps2_uart_oerrirq, IRQF_SHARED,
MAKE_NAME(-overrun), mps_port);
if (ret) {
dev_err(port->dev, "failed to register oerrirq (%d)\n", ret);
return ret;
}
ret = request_irq(mps_port->rx_irq, mps2_uart_rxirq, 0,
MAKE_NAME(-rx), mps_port);
if (ret) {
dev_err(port->dev, "failed to register rxirq (%d)\n", ret);
goto err_free_oerrirq;
}
ret = request_irq(mps_port->tx_irq, mps2_uart_txirq, 0,
MAKE_NAME(-tx), mps_port);
if (ret) {
dev_err(port->dev, "failed to register txirq (%d)\n", ret);
goto err_free_rxirq;
}
}
control |= UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP;
mps2_uart_write8(port, control, UARTn_CTRL);
return 0;
err_free_rxirq:
free_irq(mps_port->rx_irq, mps_port);
err_free_oerrirq:
free_irq(port->irq, mps_port);
return ret;
}
static void mps2_uart_shutdown(struct uart_port *port)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
u8 control = mps2_uart_read8(port, UARTn_CTRL);
control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP);
mps2_uart_write8(port, control, UARTn_CTRL);
if (!(mps_port->flags & UART_PORT_COMBINED_IRQ)) {
free_irq(mps_port->rx_irq, mps_port);
free_irq(mps_port->tx_irq, mps_port);
}
free_irq(port->irq, mps_port);
}
static void
mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, bauddiv;
termios->c_cflag &= ~(CRTSCTS | CMSPAR);
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
termios->c_cflag &= ~PARENB;
termios->c_cflag &= ~CSTOPB;
baud = uart_get_baud_rate(port, termios, old,
DIV_ROUND_CLOSEST(port->uartclk, UARTn_BAUDDIV_MASK),
DIV_ROUND_CLOSEST(port->uartclk, 16));
bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
spin_unlock_irqrestore(&port->lock, flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
}
static const char *mps2_uart_type(struct uart_port *port)
{
return (port->type == PORT_MPS2UART) ? DRIVER_NAME : NULL;
}
static void mps2_uart_release_port(struct uart_port *port)
{
}
static int mps2_uart_request_port(struct uart_port *port)
{
return 0;
}
static void mps2_uart_config_port(struct uart_port *port, int type)
{
if (type & UART_CONFIG_TYPE && !mps2_uart_request_port(port))
port->type = PORT_MPS2UART;
}
static int mps2_uart_verify_port(struct uart_port *port, struct serial_struct *serinfo)
{
return -EINVAL;
}
static const struct uart_ops mps2_uart_pops = {
.tx_empty = mps2_uart_tx_empty,
.set_mctrl = mps2_uart_set_mctrl,
.get_mctrl = mps2_uart_get_mctrl,
.stop_tx = mps2_uart_stop_tx,
.start_tx = mps2_uart_start_tx,
.stop_rx = mps2_uart_stop_rx,
.break_ctl = mps2_uart_break_ctl,
.startup = mps2_uart_startup,
.shutdown = mps2_uart_shutdown,
.set_termios = mps2_uart_set_termios,
.type = mps2_uart_type,
.release_port = mps2_uart_release_port,
.request_port = mps2_uart_request_port,
.config_port = mps2_uart_config_port,
.verify_port = mps2_uart_verify_port,
};
static DEFINE_IDR(ports_idr);
#ifdef CONFIG_SERIAL_MPS2_UART_CONSOLE
static void mps2_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL)
cpu_relax();
mps2_uart_write8(port, ch, UARTn_DATA);
}
static void mps2_uart_console_write(struct console *co, const char *s, unsigned int cnt)
{
struct mps2_uart_port *mps_port = idr_find(&ports_idr, co->index);
struct uart_port *port = &mps_port->port;
uart_console_write(port, s, cnt, mps2_uart_console_putchar);
}
static int mps2_uart_console_setup(struct console *co, char *options)
{
struct mps2_uart_port *mps_port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= MPS2_MAX_PORTS)
return -ENODEV;
mps_port = idr_find(&ports_idr, co->index);
if (!mps_port)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&mps_port->port, co, baud, parity, bits, flow);
}
static struct uart_driver mps2_uart_driver;
static struct console mps2_uart_console = {
.name = SERIAL_NAME,
.device = uart_console_device,
.write = mps2_uart_console_write,
.setup = mps2_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mps2_uart_driver,
};
#define MPS2_SERIAL_CONSOLE (&mps2_uart_console)
static void mps2_early_putchar(struct uart_port *port, unsigned char ch)
{
while (readb(port->membase + UARTn_STATE) & UARTn_STATE_TX_FULL)
cpu_relax();
writeb((unsigned char)ch, port->membase + UARTn_DATA);
}
static void mps2_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, mps2_early_putchar);
}
static int __init mps2_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = mps2_early_write;
return 0;
}
OF_EARLYCON_DECLARE(mps2, "arm,mps2-uart", mps2_early_console_setup);
#else
#define MPS2_SERIAL_CONSOLE NULL
#endif
static struct uart_driver mps2_uart_driver = {
.driver_name = DRIVER_NAME,
.dev_name = SERIAL_NAME,
.nr = MPS2_MAX_PORTS,
.cons = MPS2_SERIAL_CONSOLE,
};
static int mps2_of_get_port(struct platform_device *pdev,
struct mps2_uart_port *mps_port)
{
struct device_node *np = pdev->dev.of_node;
int id;
if (!np)
return -ENODEV;
id = of_alias_get_id(np, "serial");
if (id < 0)
id = idr_alloc_cyclic(&ports_idr, (void *)mps_port, 0, MPS2_MAX_PORTS, GFP_KERNEL);
else
id = idr_alloc(&ports_idr, (void *)mps_port, id, MPS2_MAX_PORTS, GFP_KERNEL);
if (id < 0)
return id;
/* Only combined irq is presesnt */
if (platform_irq_count(pdev) == 1)
mps_port->flags |= UART_PORT_COMBINED_IRQ;
mps_port->port.line = id;
return 0;
}
static int mps2_init_port(struct platform_device *pdev,
struct mps2_uart_port *mps_port)
{
struct resource *res;
int ret;
mps_port->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mps_port->port.membase))
return PTR_ERR(mps_port->port.membase);
mps_port->port.mapbase = res->start;
mps_port->port.mapsize = resource_size(res);
mps_port->port.iotype = UPIO_MEM;
mps_port->port.flags = UPF_BOOT_AUTOCONF;
mps_port->port.fifosize = 1;
mps_port->port.ops = &mps2_uart_pops;
mps_port->port.dev = &pdev->dev;
mps_port->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mps_port->clk))
return PTR_ERR(mps_port->clk);
ret = clk_prepare_enable(mps_port->clk);
if (ret)
return ret;
mps_port->port.uartclk = clk_get_rate(mps_port->clk);
clk_disable_unprepare(mps_port->clk);
if (mps_port->flags & UART_PORT_COMBINED_IRQ) {
mps_port->port.irq = platform_get_irq(pdev, 0);
} else {
mps_port->rx_irq = platform_get_irq(pdev, 0);
mps_port->tx_irq = platform_get_irq(pdev, 1);
mps_port->port.irq = platform_get_irq(pdev, 2);
}
return ret;
}
static int mps2_serial_probe(struct platform_device *pdev)
{
struct mps2_uart_port *mps_port;
int ret;
mps_port = devm_kzalloc(&pdev->dev, sizeof(struct mps2_uart_port), GFP_KERNEL);
if (!mps_port)
return -ENOMEM;
ret = mps2_of_get_port(pdev, mps_port);
if (ret)
return ret;
ret = mps2_init_port(pdev, mps_port);
if (ret)
return ret;
ret = uart_add_one_port(&mps2_uart_driver, &mps_port->port);
if (ret)
return ret;
platform_set_drvdata(pdev, mps_port);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id mps2_match[] = {
{ .compatible = "arm,mps2-uart", },
{},
};
#endif
static struct platform_driver mps2_serial_driver = {
.probe = mps2_serial_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(mps2_match),
.suppress_bind_attrs = true,
},
};
static int __init mps2_uart_init(void)
{
int ret;
ret = uart_register_driver(&mps2_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&mps2_serial_driver);
if (ret)
uart_unregister_driver(&mps2_uart_driver);
return ret;
}
arch_initcall(mps2_uart_init);
| linux-master | drivers/tty/serial/mps2-uart.c |
// SPDX-License-Identifier: GPL-2.0
/* sunhv.c: Serial driver for SUN4V hypervisor console.
*
* Copyright (C) 2006, 2007 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/circ_buf.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
#define CON_BREAK ((long)-1)
#define CON_HUP ((long)-2)
#define IGNORE_BREAK 0x1
#define IGNORE_ALL 0x2
static char *con_write_page;
static char *con_read_page;
static int hung_up = 0;
static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit)
{
while (!uart_circ_empty(xmit)) {
long status = sun4v_con_putchar(xmit->buf[xmit->tail]);
if (status != HV_EOK)
break;
uart_xmit_advance(port, 1);
}
}
static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit)
{
while (!uart_circ_empty(xmit)) {
unsigned long ra = __pa(xmit->buf + xmit->tail);
unsigned long len, status, sent;
len = CIRC_CNT_TO_END(xmit->head, xmit->tail,
UART_XMIT_SIZE);
status = sun4v_con_write(ra, len, &sent);
if (status != HV_EOK)
break;
uart_xmit_advance(port, sent);
}
}
static int receive_chars_getchar(struct uart_port *port)
{
int saw_console_brk = 0;
int limit = 10000;
while (limit-- > 0) {
long status;
long c = sun4v_con_getchar(&status);
if (status == HV_EWOULDBLOCK)
break;
if (c == CON_BREAK) {
if (uart_handle_break(port))
continue;
saw_console_brk = 1;
c = 0;
}
if (c == CON_HUP) {
hung_up = 1;
uart_handle_dcd_change(port, false);
} else if (hung_up) {
hung_up = 0;
uart_handle_dcd_change(port, true);
}
if (port->state == NULL) {
uart_handle_sysrq_char(port, c);
continue;
}
port->icount.rx++;
if (uart_handle_sysrq_char(port, c))
continue;
tty_insert_flip_char(&port->state->port, c, TTY_NORMAL);
}
return saw_console_brk;
}
static int receive_chars_read(struct uart_port *port)
{
static int saw_console_brk;
int limit = 10000;
while (limit-- > 0) {
unsigned long ra = __pa(con_read_page);
unsigned long bytes_read, i;
long stat = sun4v_con_read(ra, PAGE_SIZE, &bytes_read);
if (stat != HV_EOK) {
bytes_read = 0;
if (stat == CON_BREAK) {
if (saw_console_brk)
sun_do_break();
if (uart_handle_break(port))
continue;
saw_console_brk = 1;
*con_read_page = 0;
bytes_read = 1;
} else if (stat == CON_HUP) {
hung_up = 1;
uart_handle_dcd_change(port, false);
continue;
} else {
/* HV_EWOULDBLOCK, etc. */
break;
}
}
if (hung_up) {
hung_up = 0;
uart_handle_dcd_change(port, true);
}
if (port->sysrq != 0 && *con_read_page) {
for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]);
saw_console_brk = 0;
}
if (port->state == NULL)
continue;
port->icount.rx += bytes_read;
tty_insert_flip_string(&port->state->port, con_read_page,
bytes_read);
}
return saw_console_brk;
}
struct sunhv_ops {
void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit);
int (*receive_chars)(struct uart_port *port);
};
static const struct sunhv_ops bychar_ops = {
.transmit_chars = transmit_chars_putchar,
.receive_chars = receive_chars_getchar,
};
static const struct sunhv_ops bywrite_ops = {
.transmit_chars = transmit_chars_write,
.receive_chars = receive_chars_read,
};
static const struct sunhv_ops *sunhv_ops = &bychar_ops;
static struct tty_port *receive_chars(struct uart_port *port)
{
struct tty_port *tport = NULL;
if (port->state != NULL) /* Unopened serial console */
tport = &port->state->port;
if (sunhv_ops->receive_chars(port))
sun_do_break();
return tport;
}
static void transmit_chars(struct uart_port *port)
{
struct circ_buf *xmit;
if (!port->state)
return;
xmit = &port->state->xmit;
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
sunhv_ops->transmit_chars(port, xmit);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct tty_port *tport;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
tport = receive_chars(port);
transmit_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
if (tport)
tty_flip_buffer_push(tport);
return IRQ_HANDLED;
}
/* port->lock is not held. */
static unsigned int sunhv_tx_empty(struct uart_port *port)
{
/* Transmitter is always empty for us. If the circ buffer
* is non-empty or there is an x_char pending, our caller
* will do the right thing and ignore what we return here.
*/
return TIOCSER_TEMT;
}
/* port->lock held by caller. */
static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
return;
}
/* port->lock is held by caller and interrupts are disabled. */
static unsigned int sunhv_get_mctrl(struct uart_port *port)
{
return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
}
/* port->lock held by caller. */
static void sunhv_stop_tx(struct uart_port *port)
{
return;
}
/* port->lock held by caller. */
static void sunhv_start_tx(struct uart_port *port)
{
transmit_chars(port);
}
/* port->lock is not held. */
static void sunhv_send_xchar(struct uart_port *port, char ch)
{
unsigned long flags;
int limit = 10000;
if (ch == __DISABLED_CHAR)
return;
spin_lock_irqsave(&port->lock, flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(ch);
if (status == HV_EOK)
break;
udelay(1);
}
spin_unlock_irqrestore(&port->lock, flags);
}
/* port->lock held by caller. */
static void sunhv_stop_rx(struct uart_port *port)
{
}
/* port->lock is not held. */
static void sunhv_break_ctl(struct uart_port *port, int break_state)
{
if (break_state) {
unsigned long flags;
int limit = 10000;
spin_lock_irqsave(&port->lock, flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(CON_BREAK);
if (status == HV_EOK)
break;
udelay(1);
}
spin_unlock_irqrestore(&port->lock, flags);
}
}
/* port->lock is not held. */
static int sunhv_startup(struct uart_port *port)
{
return 0;
}
/* port->lock is not held. */
static void sunhv_shutdown(struct uart_port *port)
{
}
/* port->lock is not held. */
static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
unsigned int iflag, cflag;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
iflag = termios->c_iflag;
cflag = termios->c_cflag;
port->ignore_status_mask = 0;
if (iflag & IGNBRK)
port->ignore_status_mask |= IGNORE_BREAK;
if ((cflag & CREAD) == 0)
port->ignore_status_mask |= IGNORE_ALL;
/* XXX */
uart_update_timeout(port, cflag,
(port->uartclk / (16 * quot)));
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *sunhv_type(struct uart_port *port)
{
return "SUN4V HCONS";
}
static void sunhv_release_port(struct uart_port *port)
{
}
static int sunhv_request_port(struct uart_port *port)
{
return 0;
}
static void sunhv_config_port(struct uart_port *port, int flags)
{
}
static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser)
{
return -EINVAL;
}
static const struct uart_ops sunhv_pops = {
.tx_empty = sunhv_tx_empty,
.set_mctrl = sunhv_set_mctrl,
.get_mctrl = sunhv_get_mctrl,
.stop_tx = sunhv_stop_tx,
.start_tx = sunhv_start_tx,
.send_xchar = sunhv_send_xchar,
.stop_rx = sunhv_stop_rx,
.break_ctl = sunhv_break_ctl,
.startup = sunhv_startup,
.shutdown = sunhv_shutdown,
.set_termios = sunhv_set_termios,
.type = sunhv_type,
.release_port = sunhv_release_port,
.request_port = sunhv_request_port,
.config_port = sunhv_config_port,
.verify_port = sunhv_verify_port,
};
static struct uart_driver sunhv_reg = {
.owner = THIS_MODULE,
.driver_name = "sunhv",
.dev_name = "ttyHV",
.major = TTY_MAJOR,
};
static struct uart_port *sunhv_port;
void sunhv_migrate_hvcons_irq(int cpu)
{
/* Migrate hvcons irq to param cpu */
irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
}
/* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance
* 's' and also how many bytes to output via con_write_page.
*/
static int fill_con_write_page(const char *s, unsigned int n,
unsigned long *page_bytes)
{
const char *orig_s = s;
char *p = con_write_page;
int left = PAGE_SIZE;
while (n--) {
if (*s == '\n') {
if (left < 2)
break;
*p++ = '\r';
left--;
} else if (left < 1)
break;
*p++ = *s++;
left--;
}
*page_bytes = p - con_write_page;
return s - orig_s;
}
static void sunhv_console_write_paged(struct console *con, const char *s, unsigned n)
{
struct uart_port *port = sunhv_port;
unsigned long flags;
int locked = 1;
if (port->sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
unsigned long page_bytes;
unsigned int cpy = fill_con_write_page(s, n,
&page_bytes);
n -= cpy;
s += cpy;
while (page_bytes > 0) {
unsigned long written;
int limit = 1000000;
while (limit--) {
unsigned long stat;
stat = sun4v_con_write(ra, page_bytes,
&written);
if (stat == HV_EOK)
break;
udelay(1);
}
if (limit < 0)
break;
page_bytes -= written;
ra += written;
}
}
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
{
int limit = 1000000;
while (limit-- > 0) {
long status = sun4v_con_putchar(c);
if (status == HV_EOK)
break;
udelay(1);
}
}
static void sunhv_console_write_bychar(struct console *con, const char *s, unsigned n)
{
struct uart_port *port = sunhv_port;
unsigned long flags;
int i, locked = 1;
if (port->sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
for (i = 0; i < n; i++) {
if (*s == '\n')
sunhv_console_putchar(port, '\r');
sunhv_console_putchar(port, *s++);
}
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {
.name = "ttyHV",
.write = sunhv_console_write_bychar,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sunhv_reg,
};
static int hv_probe(struct platform_device *op)
{
struct uart_port *port;
unsigned long minor;
int err;
if (op->archdata.irqs[0] == 0xffffffff)
return -ENODEV;
port = kzalloc(sizeof(struct uart_port), GFP_KERNEL);
if (unlikely(!port))
return -ENOMEM;
minor = 1;
if (sun4v_hvapi_register(HV_GRP_CORE, 1, &minor) == 0 &&
minor >= 1) {
err = -ENOMEM;
con_write_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!con_write_page)
goto out_free_port;
con_read_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!con_read_page)
goto out_free_con_write_page;
sunhv_console.write = sunhv_console_write_paged;
sunhv_ops = &bywrite_ops;
}
sunhv_port = port;
port->has_sysrq = 1;
port->line = 0;
port->ops = &sunhv_pops;
port->type = PORT_SUNHV;
port->uartclk = ( 29491200 / 16 ); /* arbitrary */
port->membase = (unsigned char __iomem *) __pa(port);
port->irq = op->archdata.irqs[0];
port->dev = &op->dev;
err = sunserial_register_minors(&sunhv_reg, 1);
if (err)
goto out_free_con_read_page;
sunserial_console_match(&sunhv_console, op->dev.of_node,
&sunhv_reg, port->line, false);
err = uart_add_one_port(&sunhv_reg, port);
if (err)
goto out_unregister_driver;
err = request_irq(port->irq, sunhv_interrupt, 0, "hvcons", port);
if (err)
goto out_remove_port;
platform_set_drvdata(op, port);
return 0;
out_remove_port:
uart_remove_one_port(&sunhv_reg, port);
out_unregister_driver:
sunserial_unregister_minors(&sunhv_reg, 1);
out_free_con_read_page:
kfree(con_read_page);
out_free_con_write_page:
kfree(con_write_page);
out_free_port:
kfree(port);
sunhv_port = NULL;
return err;
}
static int hv_remove(struct platform_device *dev)
{
struct uart_port *port = platform_get_drvdata(dev);
free_irq(port->irq, port);
uart_remove_one_port(&sunhv_reg, port);
sunserial_unregister_minors(&sunhv_reg, 1);
kfree(con_read_page);
kfree(con_write_page);
kfree(port);
sunhv_port = NULL;
return 0;
}
static const struct of_device_id hv_match[] = {
{
.name = "console",
.compatible = "qcn",
},
{
.name = "console",
.compatible = "SUNW,sun4v-console",
},
{},
};
static struct platform_driver hv_driver = {
.driver = {
.name = "hv",
.of_match_table = hv_match,
},
.probe = hv_probe,
.remove = hv_remove,
};
static int __init sunhv_init(void)
{
if (tlb_type != hypervisor)
return -ENODEV;
return platform_driver_register(&hv_driver);
}
device_initcall(sunhv_init);
#if 0 /* ...def MODULE ; never supported as such */
MODULE_AUTHOR("David S. Miller");
MODULE_DESCRIPTION("SUN4V Hypervisor console driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
#endif
| linux-master | drivers/tty/serial/sunhv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Atheros AR933X SoC built-in UART driver
*
* Copyright (C) 2011 Gabor Juhos <[email protected]>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/div64.h>
#include <asm/mach-ath79/ar933x_uart.h>
#include "serial_mctrl_gpio.h"
#define DRIVER_NAME "ar933x-uart"
#define AR933X_UART_MAX_SCALE 0xff
#define AR933X_UART_MAX_STEP 0xffff
#define AR933X_UART_MIN_BAUD 300
#define AR933X_UART_MAX_BAUD 3000000
#define AR933X_DUMMY_STATUS_RD 0x01
static struct uart_driver ar933x_uart_driver;
struct ar933x_uart_port {
struct uart_port port;
unsigned int ier; /* shadow Interrupt Enable Register */
unsigned int min_baud;
unsigned int max_baud;
struct clk *clk;
struct mctrl_gpios *gpios;
struct gpio_desc *rts_gpiod;
};
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
return readl(up->port.membase + offset);
}
static inline void ar933x_uart_write(struct ar933x_uart_port *up,
int offset, unsigned int value)
{
writel(value, up->port.membase + offset);
}
static inline void ar933x_uart_rmw(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int mask,
unsigned int val)
{
unsigned int t;
t = ar933x_uart_read(up, offset);
t &= ~mask;
t |= val;
ar933x_uart_write(up, offset, t);
}
static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int val)
{
ar933x_uart_rmw(up, offset, 0, val);
}
static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int val)
{
ar933x_uart_rmw(up, offset, val, 0);
}
static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up)
{
up->ier |= AR933X_UART_INT_TX_EMPTY;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up)
{
up->ier &= ~AR933X_UART_INT_TX_EMPTY;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_start_rx_interrupt(struct ar933x_uart_port *up)
{
up->ier |= AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_stop_rx_interrupt(struct ar933x_uart_port *up)
{
up->ier &= ~AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
{
unsigned int rdata;
rdata = ch & AR933X_UART_DATA_TX_RX_MASK;
rdata |= AR933X_UART_DATA_TX_CSR;
ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata);
}
static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
unsigned int rdata;
spin_lock_irqsave(&up->port.lock, flags);
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
spin_unlock_irqrestore(&up->port.lock, flags);
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
}
static unsigned int ar933x_uart_get_mctrl(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
mctrl_gpio_get(up->gpios, &ret);
return ret;
}
static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
mctrl_gpio_set(up->gpios, mctrl);
}
static void ar933x_uart_start_tx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_start_tx_interrupt(up);
}
static void ar933x_uart_wait_tx_complete(struct ar933x_uart_port *up)
{
unsigned int status;
unsigned int timeout = 60000;
/* Wait up to 60ms for the character(s) to be sent. */
do {
status = ar933x_uart_read(up, AR933X_UART_CS_REG);
if (--timeout == 0)
break;
udelay(1);
} while (status & AR933X_UART_CS_TX_BUSY);
if (timeout == 0)
dev_err(up->port.dev, "waiting for TX timed out\n");
}
static void ar933x_uart_rx_flush(struct ar933x_uart_port *up)
{
unsigned int status;
/* clear RX_VALID interrupt */
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_RX_VALID);
/* remove characters from the RX FIFO */
do {
ar933x_uart_write(up, AR933X_UART_DATA_REG, AR933X_UART_DATA_RX_CSR);
status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
} while (status & AR933X_UART_DATA_RX_CSR);
}
static void ar933x_uart_stop_tx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_stop_tx_interrupt(up);
}
static void ar933x_uart_stop_rx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_stop_rx_interrupt(up);
}
static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
else
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
* baudrate = (clk / (scale + 1)) * (step * (1 / 2^17))
*/
static unsigned long ar933x_uart_get_baud(unsigned int clk,
unsigned int scale,
unsigned int step)
{
u64 t;
u32 div;
div = (2 << 16) * (scale + 1);
t = clk;
t *= step;
t += (div / 2);
do_div(t, div);
return t;
}
static void ar933x_uart_get_scale_step(unsigned int clk,
unsigned int baud,
unsigned int *scale,
unsigned int *step)
{
unsigned int tscale;
long min_diff;
*scale = 0;
*step = 0;
min_diff = baud;
for (tscale = 0; tscale < AR933X_UART_MAX_SCALE; tscale++) {
u64 tstep;
int diff;
tstep = baud * (tscale + 1);
tstep *= (2 << 16);
do_div(tstep, clk);
if (tstep > AR933X_UART_MAX_STEP)
break;
diff = abs(ar933x_uart_get_baud(clk, tscale, tstep) - baud);
if (diff < min_diff) {
min_diff = diff;
*scale = tscale;
*step = tstep;
}
}
}
static void ar933x_uart_set_termios(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned int cs;
unsigned long flags;
unsigned int baud, scale, step;
/* Only CS8 is supported */
new->c_cflag &= ~CSIZE;
new->c_cflag |= CS8;
/* Only one stop bit is supported */
new->c_cflag &= ~CSTOPB;
cs = 0;
if (new->c_cflag & PARENB) {
if (!(new->c_cflag & PARODD))
cs |= AR933X_UART_CS_PARITY_EVEN;
else
cs |= AR933X_UART_CS_PARITY_ODD;
} else {
cs |= AR933X_UART_CS_PARITY_NONE;
}
/* Mark/space parity is not supported */
new->c_cflag &= ~CMSPAR;
baud = uart_get_baud_rate(port, new, old, up->min_baud, up->max_baud);
ar933x_uart_get_scale_step(port->uartclk, baud, &scale, &step);
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/* disable the UART */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S);
/* Update the per-port timeout. */
uart_update_timeout(port, new->c_cflag, baud);
up->port.ignore_status_mask = 0;
/* ignore all characters if CREAD is not set */
if ((new->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD;
ar933x_uart_write(up, AR933X_UART_CLOCK_REG,
scale << AR933X_UART_CLOCK_SCALE_S | step);
/* setup configuration register */
ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs);
/* enable host interrupt */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* reenable the UART */
ar933x_uart_rmw(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
spin_unlock_irqrestore(&up->port.lock, flags);
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
}
static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
{
struct tty_port *port = &up->port.state->port;
int max_count = 256;
do {
unsigned int rdata;
unsigned char ch;
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if ((rdata & AR933X_UART_DATA_RX_CSR) == 0)
break;
/* remove the character from the FIFO */
ar933x_uart_write(up, AR933X_UART_DATA_REG,
AR933X_UART_DATA_RX_CSR);
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
if (uart_handle_sysrq_char(&up->port, ch))
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (max_count-- > 0);
tty_flip_buffer_push(port);
}
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
struct serial_rs485 *rs485conf = &up->port.rs485;
int count;
bool half_duplex_send = false;
if (uart_tx_stopped(&up->port))
return;
if ((rs485conf->flags & SER_RS485_ENABLED) &&
(up->port.x_char || !uart_circ_empty(xmit))) {
ar933x_uart_stop_rx_interrupt(up);
gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_ON_SEND));
half_duplex_send = true;
}
count = up->port.fifosize;
do {
unsigned int rdata;
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if ((rdata & AR933X_UART_DATA_TX_CSR) == 0)
break;
if (up->port.x_char) {
ar933x_uart_putc(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
continue;
}
if (uart_circ_empty(xmit))
break;
ar933x_uart_putc(up, xmit->buf[xmit->tail]);
uart_xmit_advance(&up->port, 1);
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (!uart_circ_empty(xmit)) {
ar933x_uart_start_tx_interrupt(up);
} else if (half_duplex_send) {
ar933x_uart_wait_tx_complete(up);
ar933x_uart_rx_flush(up);
ar933x_uart_start_rx_interrupt(up);
gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
}
}
static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
{
struct ar933x_uart_port *up = dev_id;
unsigned int status;
status = ar933x_uart_read(up, AR933X_UART_CS_REG);
if ((status & AR933X_UART_CS_HOST_INT) == 0)
return IRQ_NONE;
spin_lock(&up->port.lock);
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
if (status & AR933X_UART_INT_RX_VALID) {
ar933x_uart_write(up, AR933X_UART_INT_REG,
AR933X_UART_INT_RX_VALID);
ar933x_uart_rx_chars(up);
}
if (status & AR933X_UART_INT_TX_EMPTY) {
ar933x_uart_write(up, AR933X_UART_INT_REG,
AR933X_UART_INT_TX_EMPTY);
ar933x_uart_stop_tx_interrupt(up);
ar933x_uart_tx_chars(up);
}
spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
static int ar933x_uart_startup(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
int ret;
ret = request_irq(up->port.irq, ar933x_uart_interrupt,
up->port.irqflags, dev_name(up->port.dev), up);
if (ret)
return ret;
spin_lock_irqsave(&up->port.lock, flags);
/* Enable HOST interrupts */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* Enable RX interrupts */
ar933x_uart_start_rx_interrupt(up);
spin_unlock_irqrestore(&up->port.lock, flags);
return 0;
}
static void ar933x_uart_shutdown(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
/* Disable all interrupts */
up->ier = 0;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
/* Disable break condition */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
free_irq(up->port.irq, up);
}
static const char *ar933x_uart_type(struct uart_port *port)
{
return (port->type == PORT_AR933X) ? "AR933X UART" : NULL;
}
static void ar933x_uart_release_port(struct uart_port *port)
{
/* Nothing to release ... */
}
static int ar933x_uart_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
static void ar933x_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_AR933X;
}
static int ar933x_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
if (ser->type != PORT_UNKNOWN &&
ser->type != PORT_AR933X)
return -EINVAL;
if (ser->irq < 0 || ser->irq >= NR_IRQS)
return -EINVAL;
if (ser->baud_base < up->min_baud ||
ser->baud_base > up->max_baud)
return -EINVAL;
return 0;
}
static const struct uart_ops ar933x_uart_ops = {
.tx_empty = ar933x_uart_tx_empty,
.set_mctrl = ar933x_uart_set_mctrl,
.get_mctrl = ar933x_uart_get_mctrl,
.stop_tx = ar933x_uart_stop_tx,
.start_tx = ar933x_uart_start_tx,
.stop_rx = ar933x_uart_stop_rx,
.break_ctl = ar933x_uart_break_ctl,
.startup = ar933x_uart_startup,
.shutdown = ar933x_uart_shutdown,
.set_termios = ar933x_uart_set_termios,
.type = ar933x_uart_type,
.release_port = ar933x_uart_release_port,
.request_port = ar933x_uart_request_port,
.config_port = ar933x_uart_config_port,
.verify_port = ar933x_uart_verify_port,
};
static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
if (port->rs485.flags & SER_RS485_ENABLED)
gpiod_set_value(up->rts_gpiod,
!!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
return 0;
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up)
{
unsigned int status;
unsigned int timeout = 60000;
/* Wait up to 60ms for the character(s) to be sent. */
do {
status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if (--timeout == 0)
break;
udelay(1);
} while ((status & AR933X_UART_DATA_TX_CSR) == 0);
}
static void ar933x_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_wait_xmitr(up);
ar933x_uart_putc(up, ch);
}
static void ar933x_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct ar933x_uart_port *up = ar933x_console_ports[co->index];
unsigned long flags;
unsigned int int_en;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0);
uart_console_write(&up->port, s, count, ar933x_uart_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
ar933x_uart_wait_xmitr(up);
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en);
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static int ar933x_uart_console_setup(struct console *co, char *options)
{
struct ar933x_uart_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
up = ar933x_console_ports[co->index];
if (!up)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console ar933x_uart_console = {
.name = "ttyATH",
.write = ar933x_uart_console_write,
.device = uart_console_device,
.setup = ar933x_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &ar933x_uart_driver,
};
#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = "ttyATH",
.nr = CONFIG_SERIAL_AR933X_NR_UARTS,
.cons = NULL, /* filled in runtime */
};
static const struct serial_rs485 ar933x_no_rs485 = {};
static const struct serial_rs485 ar933x_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
};
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
struct uart_port *port;
struct resource *mem_res;
struct device_node *np;
unsigned int baud;
int id;
int ret;
int irq;
np = pdev->dev.of_node;
if (IS_ENABLED(CONFIG_OF) && np) {
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
id);
return id;
}
} else {
id = pdev->id;
if (id == -1)
id = 0;
}
if (id >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port),
GFP_KERNEL);
if (!up)
return -ENOMEM;
up->clk = devm_clk_get(&pdev->dev, "uart");
if (IS_ERR(up->clk)) {
dev_err(&pdev->dev, "unable to get UART clock\n");
return PTR_ERR(up->clk);
}
port = &up->port;
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
ret = clk_prepare_enable(up->clk);
if (ret)
return ret;
port->uartclk = clk_get_rate(up->clk);
if (!port->uartclk) {
ret = -EINVAL;
goto err_disable_clk;
}
port->mapbase = mem_res->start;
port->line = id;
port->irq = irq;
port->dev = &pdev->dev;
port->type = PORT_AR933X;
port->iotype = UPIO_MEM32;
port->regshift = 2;
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
ret = uart_get_rs485_mode(port);
if (ret)
goto err_disable_clk;
up->gpios = mctrl_gpio_init(port, 0);
if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS) {
ret = PTR_ERR(up->gpios);
goto err_disable_clk;
}
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
if (!up->rts_gpiod) {
port->rs485_supported = ar933x_no_rs485;
if (port->rs485.flags & SER_RS485_ENABLED) {
dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
port->rs485.flags &= ~SER_RS485_ENABLED;
}
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
ar933x_console_ports[up->port.line] = up;
#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
goto err_disable_clk;
platform_set_drvdata(pdev, up);
return 0;
err_disable_clk:
clk_disable_unprepare(up->clk);
return ret;
}
static int ar933x_uart_remove(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
up = platform_get_drvdata(pdev);
if (up) {
uart_remove_one_port(&ar933x_uart_driver, &up->port);
clk_disable_unprepare(up->clk);
}
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id ar933x_uart_of_ids[] = {
{ .compatible = "qca,ar9330-uart" },
{},
};
MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids);
#endif
static struct platform_driver ar933x_uart_platform_driver = {
.probe = ar933x_uart_probe,
.remove = ar933x_uart_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ar933x_uart_of_ids),
},
};
static int __init ar933x_uart_init(void)
{
int ret;
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
ar933x_uart_driver.cons = &ar933x_uart_console;
#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
goto err_out;
ret = platform_driver_register(&ar933x_uart_platform_driver);
if (ret)
goto err_unregister_uart_driver;
return 0;
err_unregister_uart_driver:
uart_unregister_driver(&ar933x_uart_driver);
err_out:
return ret;
}
static void __exit ar933x_uart_exit(void)
{
platform_driver_unregister(&ar933x_uart_platform_driver);
uart_unregister_driver(&ar933x_uart_driver);
}
module_init(ar933x_uart_init);
module_exit(ar933x_uart_exit);
MODULE_DESCRIPTION("Atheros AR933X UART driver");
MODULE_AUTHOR("Gabor Juhos <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/tty/serial/ar933x_uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zs.c: Serial port driver for IOASIC DECstations.
*
* Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
* Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
*
* DECstation changes
* Copyright (C) 1998-2000 Harald Koerfgen
* Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
*
* For the rest of the code the original Copyright applies:
* Copyright (C) 1996 Paul Mackerras ([email protected])
* Copyright (C) 1995 David S. Miller ([email protected])
*
*
* Note: for IOASIC systems the wiring is as follows:
*
* mouse/keyboard:
* DIN-7 MJ-4 signal SCC
* 2 1 TxD <- A.TxD
* 3 4 RxD -> A.RxD
*
* EIA-232/EIA-423:
* DB-25 MMJ-6 signal SCC
* 2 2 TxD <- B.TxD
* 3 5 RxD -> B.RxD
* 4 RTS <- ~A.RTS
* 5 CTS -> ~B.CTS
* 6 6 DSR -> ~A.SYNC
* 8 CD -> ~B.DCD
* 12 DSRS(DCE) -> ~A.CTS (*)
* 15 TxC -> B.TxC
* 17 RxC -> B.RxC
* 20 1 DTR <- ~A.DTR
* 22 RI -> ~A.DCD
* 23 DSRS(DTE) <- ~B.RTS
*
* (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
* is shared with DSRS(DTE) at pin 23.
*
* As you can immediately notice the wiring of the RTS, DTR and DSR signals
* is a bit odd. This makes the handling of port B unnecessarily
* complicated and prevents the use of some automatic modes of operation.
*/
#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irqflags.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/system.h>
#include "zs.h"
MODULE_AUTHOR("Maciej W. Rozycki <[email protected]>");
MODULE_DESCRIPTION("DECstation Z85C30 serial driver");
MODULE_LICENSE("GPL");
static char zs_name[] __initdata = "DECstation Z85C30 serial driver version ";
static char zs_version[] __initdata = "0.10";
/*
* It would be nice to dynamically allocate everything that
* depends on ZS_NUM_SCCS, so we could support any number of
* Z85C30s, but for now...
*/
#define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */
#define ZS_NUM_CHAN 2 /* 2 channels per chip. */
#define ZS_CHAN_A 0 /* Index of the channel A. */
#define ZS_CHAN_B 1 /* Index of the channel B. */
#define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */
#define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */
#define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte
of the 16-bit IOBUS. */
#define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */
#define to_zport(uport) container_of(uport, struct zs_port, port)
struct zs_parms {
resource_size_t scc[ZS_NUM_SCCS];
int irq[ZS_NUM_SCCS];
};
static struct zs_scc zs_sccs[ZS_NUM_SCCS];
static u8 zs_init_regs[ZS_NUM_REGS] __initdata = {
0, /* write 0 */
PAR_SPEC, /* write 1 */
0, /* write 2 */
0, /* write 3 */
X16CLK | SB1, /* write 4 */
0, /* write 5 */
0, 0, 0, /* write 6, 7, 8 */
MIE | DLC | NV, /* write 9 */
NRZ, /* write 10 */
TCBR | RCBR, /* write 11 */
0, 0, /* BRG time constant, write 12 + 13 */
BRSRC | BRENABL, /* write 14 */
0, /* write 15 */
};
/*
* Debugging.
*/
#undef ZS_DEBUG_REGS
/*
* Reading and writing Z85C30 registers.
*/
static void recovery_delay(void)
{
udelay(2);
}
static u8 read_zsreg(struct zs_port *zport, int reg)
{
void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
u8 retval;
if (reg != 0) {
writeb(reg & 0xf, control);
fast_iob();
recovery_delay();
}
retval = readb(control);
recovery_delay();
return retval;
}
static void write_zsreg(struct zs_port *zport, int reg, u8 value)
{
void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
if (reg != 0) {
writeb(reg & 0xf, control);
fast_iob(); recovery_delay();
}
writeb(value, control);
fast_iob();
recovery_delay();
return;
}
static u8 read_zsdata(struct zs_port *zport)
{
void __iomem *data = zport->port.membase +
ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
u8 retval;
retval = readb(data);
recovery_delay();
return retval;
}
static void write_zsdata(struct zs_port *zport, u8 value)
{
void __iomem *data = zport->port.membase +
ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
writeb(value, data);
fast_iob();
recovery_delay();
return;
}
#ifdef ZS_DEBUG_REGS
void zs_dump(void)
{
struct zs_port *zport;
int i, j;
for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN];
if (!zport->scc)
continue;
for (j = 0; j < 16; j++)
printk("W%-2d = 0x%02x\t", j, zport->regs[j]);
printk("\n");
for (j = 0; j < 16; j++)
printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j));
printk("\n\n");
}
}
#endif
static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq)
{
if (irq)
spin_lock_irq(lock);
else
spin_lock(lock);
}
static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq)
{
if (irq)
spin_unlock_irq(lock);
else
spin_unlock(lock);
}
static int zs_receive_drain(struct zs_port *zport)
{
int loops = 10000;
while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
read_zsdata(zport);
return loops;
}
static int zs_transmit_drain(struct zs_port *zport, int irq)
{
struct zs_scc *scc = zport->scc;
int loops = 10000;
while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
}
return loops;
}
static int zs_line_drain(struct zs_port *zport, int irq)
{
struct zs_scc *scc = zport->scc;
int loops = 10000;
while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
}
return loops;
}
static void load_zsregs(struct zs_port *zport, u8 *regs, int irq)
{
/* Let the current transmission finish. */
zs_line_drain(zport, irq);
/* Load 'em up. */
write_zsreg(zport, R3, regs[3] & ~RxENABLE);
write_zsreg(zport, R5, regs[5] & ~TxENAB);
write_zsreg(zport, R4, regs[4]);
write_zsreg(zport, R9, regs[9]);
write_zsreg(zport, R1, regs[1]);
write_zsreg(zport, R2, regs[2]);
write_zsreg(zport, R10, regs[10]);
write_zsreg(zport, R14, regs[14] & ~BRENABL);
write_zsreg(zport, R11, regs[11]);
write_zsreg(zport, R12, regs[12]);
write_zsreg(zport, R13, regs[13]);
write_zsreg(zport, R14, regs[14]);
write_zsreg(zport, R15, regs[15]);
if (regs[3] & RxENABLE)
write_zsreg(zport, R3, regs[3]);
if (regs[5] & TxENAB)
write_zsreg(zport, R5, regs[5]);
return;
}
/*
* Status handling routines.
*/
/*
* zs_tx_empty() -- get the transmitter empty status
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static unsigned int zs_tx_empty(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
unsigned long flags;
u8 status;
spin_lock_irqsave(&scc->zlock, flags);
status = read_zsreg(zport, R1);
spin_unlock_irqrestore(&scc->zlock, flags);
return status & ALL_SNT ? TIOCSER_TEMT : 0;
}
static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a,
struct zs_port *zport_b)
{
u8 status_a, status_b;
unsigned int mctrl;
status_a = read_zsreg(zport_a, R0);
status_b = read_zsreg(zport_b, R0);
mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) |
((status_b & DCD) ? TIOCM_CAR : 0) |
((status_a & DCD) ? TIOCM_RNG : 0) |
((status_a & SYNC_HUNT) ? TIOCM_DSR : 0);
return mctrl;
}
static unsigned int zs_raw_get_mctrl(struct zs_port *zport)
{
struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0;
}
static unsigned int zs_raw_xor_mctrl(struct zs_port *zport)
{
struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
unsigned int mmask, mctrl, delta;
u8 mask_a, mask_b;
if (zport == zport_a)
return 0;
mask_a = zport_a->regs[15];
mask_b = zport->regs[15];
mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) |
((mask_b & DCDIE) ? TIOCM_CAR : 0) |
((mask_a & DCDIE) ? TIOCM_RNG : 0) |
((mask_a & SYNCIE) ? TIOCM_DSR : 0);
mctrl = zport->mctrl;
if (mmask) {
mctrl &= ~mmask;
mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask;
}
delta = mctrl ^ zport->mctrl;
if (delta)
zport->mctrl = mctrl;
return delta;
}
static unsigned int zs_get_mctrl(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
unsigned int mctrl;
spin_lock(&scc->zlock);
mctrl = zs_raw_get_mctrl(zport);
spin_unlock(&scc->zlock);
return mctrl;
}
static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
u8 oldloop, newloop;
spin_lock(&scc->zlock);
if (zport != zport_a) {
if (mctrl & TIOCM_DTR)
zport_a->regs[5] |= DTR;
else
zport_a->regs[5] &= ~DTR;
if (mctrl & TIOCM_RTS)
zport_a->regs[5] |= RTS;
else
zport_a->regs[5] &= ~RTS;
write_zsreg(zport_a, R5, zport_a->regs[5]);
}
/* Rarely modified, so don't poke at hardware unless necessary. */
oldloop = zport->regs[14];
newloop = oldloop;
if (mctrl & TIOCM_LOOP)
newloop |= LOOPBAK;
else
newloop &= ~LOOPBAK;
if (newloop != oldloop) {
zport->regs[14] = newloop;
write_zsreg(zport, R14, zport->regs[14]);
}
spin_unlock(&scc->zlock);
}
static void zs_raw_stop_tx(struct zs_port *zport)
{
write_zsreg(zport, R0, RES_Tx_P);
zport->tx_stopped = 1;
}
static void zs_stop_tx(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
spin_lock(&scc->zlock);
zs_raw_stop_tx(zport);
spin_unlock(&scc->zlock);
}
static void zs_raw_transmit_chars(struct zs_port *);
static void zs_start_tx(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
spin_lock(&scc->zlock);
if (zport->tx_stopped) {
zs_transmit_drain(zport, 0);
zport->tx_stopped = 0;
zs_raw_transmit_chars(zport);
}
spin_unlock(&scc->zlock);
}
static void zs_stop_rx(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
spin_lock(&scc->zlock);
zport->regs[15] &= ~BRKIE;
zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB);
zport->regs[1] |= RxINT_DISAB;
if (zport != zport_a) {
/* A-side DCD tracks RI and SYNC tracks DSR. */
zport_a->regs[15] &= ~(DCDIE | SYNCIE);
write_zsreg(zport_a, R15, zport_a->regs[15]);
if (!(zport_a->regs[15] & BRKIE)) {
zport_a->regs[1] &= ~EXT_INT_ENAB;
write_zsreg(zport_a, R1, zport_a->regs[1]);
}
/* This-side DCD tracks DCD and CTS tracks CTS. */
zport->regs[15] &= ~(DCDIE | CTSIE);
zport->regs[1] &= ~EXT_INT_ENAB;
} else {
/* DCD tracks RI and SYNC tracks DSR for the B side. */
if (!(zport->regs[15] & (DCDIE | SYNCIE)))
zport->regs[1] &= ~EXT_INT_ENAB;
}
write_zsreg(zport, R15, zport->regs[15]);
write_zsreg(zport, R1, zport->regs[1]);
spin_unlock(&scc->zlock);
}
static void zs_enable_ms(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
if (zport == zport_a)
return;
spin_lock(&scc->zlock);
/* Clear Ext interrupts if not being handled already. */
if (!(zport_a->regs[1] & EXT_INT_ENAB))
write_zsreg(zport_a, R0, RES_EXT_INT);
/* A-side DCD tracks RI and SYNC tracks DSR. */
zport_a->regs[1] |= EXT_INT_ENAB;
zport_a->regs[15] |= DCDIE | SYNCIE;
/* This-side DCD tracks DCD and CTS tracks CTS. */
zport->regs[15] |= DCDIE | CTSIE;
zs_raw_xor_mctrl(zport);
write_zsreg(zport_a, R1, zport_a->regs[1]);
write_zsreg(zport_a, R15, zport_a->regs[15]);
write_zsreg(zport, R15, zport->regs[15]);
spin_unlock(&scc->zlock);
}
static void zs_break_ctl(struct uart_port *uport, int break_state)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
unsigned long flags;
spin_lock_irqsave(&scc->zlock, flags);
if (break_state == -1)
zport->regs[5] |= SND_BRK;
else
zport->regs[5] &= ~SND_BRK;
write_zsreg(zport, R5, zport->regs[5]);
spin_unlock_irqrestore(&scc->zlock, flags);
}
/*
* Interrupt handling routines.
*/
#define Rx_BRK 0x0100 /* BREAK event software flag. */
#define Rx_SYS 0x0200 /* SysRq event software flag. */
static void zs_receive_chars(struct zs_port *zport)
{
struct uart_port *uport = &zport->port;
struct zs_scc *scc = zport->scc;
struct uart_icount *icount;
unsigned int avail, status;
int count;
u8 ch, flag;
for (count = 16; count; count--) {
spin_lock(&scc->zlock);
avail = read_zsreg(zport, R0) & Rx_CH_AV;
spin_unlock(&scc->zlock);
if (!avail)
break;
spin_lock(&scc->zlock);
status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR);
ch = read_zsdata(zport);
spin_unlock(&scc->zlock);
flag = TTY_NORMAL;
icount = &uport->icount;
icount->rx++;
/* Handle the null char got when BREAK is removed. */
if (!ch)
status |= zport->tty_break;
if (unlikely(status &
(Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) {
zport->tty_break = 0;
/* Reset the error indication. */
if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) {
spin_lock(&scc->zlock);
write_zsreg(zport, R0, ERR_RES);
spin_unlock(&scc->zlock);
}
if (status & (Rx_SYS | Rx_BRK)) {
icount->brk++;
/* SysRq discards the null char. */
if (status & Rx_SYS)
continue;
} else if (status & FRM_ERR)
icount->frame++;
else if (status & PAR_ERR)
icount->parity++;
if (status & Rx_OVR)
icount->overrun++;
status &= uport->read_status_mask;
if (status & Rx_BRK)
flag = TTY_BREAK;
else if (status & FRM_ERR)
flag = TTY_FRAME;
else if (status & PAR_ERR)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(uport, ch))
continue;
uart_insert_char(uport, status, Rx_OVR, ch, flag);
}
tty_flip_buffer_push(&uport->state->port);
}
static void zs_raw_transmit_chars(struct zs_port *zport)
{
struct circ_buf *xmit = &zport->port.state->xmit;
/* XON/XOFF chars. */
if (zport->port.x_char) {
write_zsdata(zport, zport->port.x_char);
zport->port.icount.tx++;
zport->port.x_char = 0;
return;
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) {
zs_raw_stop_tx(zport);
return;
}
/* Send char. */
write_zsdata(zport, xmit->buf[xmit->tail]);
uart_xmit_advance(&zport->port, 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&zport->port);
/* Are we are done? */
if (uart_circ_empty(xmit))
zs_raw_stop_tx(zport);
}
static void zs_transmit_chars(struct zs_port *zport)
{
struct zs_scc *scc = zport->scc;
spin_lock(&scc->zlock);
zs_raw_transmit_chars(zport);
spin_unlock(&scc->zlock);
}
static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a)
{
struct uart_port *uport = &zport->port;
struct zs_scc *scc = zport->scc;
unsigned int delta;
u8 status, brk;
spin_lock(&scc->zlock);
/* Get status from Read Register 0. */
status = read_zsreg(zport, R0);
if (zport->regs[15] & BRKIE) {
brk = status & BRK_ABRT;
if (brk && !zport->brk) {
spin_unlock(&scc->zlock);
if (uart_handle_break(uport))
zport->tty_break = Rx_SYS;
else
zport->tty_break = Rx_BRK;
spin_lock(&scc->zlock);
}
zport->brk = brk;
}
if (zport != zport_a) {
delta = zs_raw_xor_mctrl(zport);
spin_unlock(&scc->zlock);
if (delta & TIOCM_CTS)
uart_handle_cts_change(uport,
zport->mctrl & TIOCM_CTS);
if (delta & TIOCM_CAR)
uart_handle_dcd_change(uport,
zport->mctrl & TIOCM_CAR);
if (delta & TIOCM_RNG)
uport->icount.dsr++;
if (delta & TIOCM_DSR)
uport->icount.rng++;
if (delta)
wake_up_interruptible(&uport->state->port.delta_msr_wait);
spin_lock(&scc->zlock);
}
/* Clear the status condition... */
write_zsreg(zport, R0, RES_EXT_INT);
spin_unlock(&scc->zlock);
}
/*
* This is the Z85C30 driver's generic interrupt routine.
*/
static irqreturn_t zs_interrupt(int irq, void *dev_id)
{
struct zs_scc *scc = dev_id;
struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
struct zs_port *zport_b = &scc->zport[ZS_CHAN_B];
irqreturn_t status = IRQ_NONE;
u8 zs_intreg;
int count;
/*
* NOTE: The read register 3, which holds the irq status,
* does so for both channels on each chip. Although
* the status value itself must be read from the A
* channel and is only valid when read from channel A.
* Yes... broken hardware...
*/
for (count = 16; count; count--) {
spin_lock(&scc->zlock);
zs_intreg = read_zsreg(zport_a, R3);
spin_unlock(&scc->zlock);
if (!zs_intreg)
break;
/*
* We do not like losing characters, so we prioritise
* interrupt sources a little bit differently than
* the SCC would, was it allowed to.
*/
if (zs_intreg & CHBRxIP)
zs_receive_chars(zport_b);
if (zs_intreg & CHARxIP)
zs_receive_chars(zport_a);
if (zs_intreg & CHBEXT)
zs_status_handle(zport_b, zport_a);
if (zs_intreg & CHAEXT)
zs_status_handle(zport_a, zport_a);
if (zs_intreg & CHBTxIP)
zs_transmit_chars(zport_b);
if (zs_intreg & CHATxIP)
zs_transmit_chars(zport_a);
status = IRQ_HANDLED;
}
return status;
}
/*
* Finally, routines used to initialize the serial port.
*/
static int zs_startup(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
unsigned long flags;
int irq_guard;
int ret;
irq_guard = atomic_add_return(1, &scc->irq_guard);
if (irq_guard == 1) {
ret = request_irq(zport->port.irq, zs_interrupt,
IRQF_SHARED, "scc", scc);
if (ret) {
atomic_add(-1, &scc->irq_guard);
printk(KERN_ERR "zs: can't get irq %d\n",
zport->port.irq);
return ret;
}
}
spin_lock_irqsave(&scc->zlock, flags);
/* Clear the receive FIFO. */
zs_receive_drain(zport);
/* Clear the interrupt registers. */
write_zsreg(zport, R0, ERR_RES);
write_zsreg(zport, R0, RES_Tx_P);
/* But Ext only if not being handled already. */
if (!(zport->regs[1] & EXT_INT_ENAB))
write_zsreg(zport, R0, RES_EXT_INT);
/* Finally, enable sequencing and interrupts. */
zport->regs[1] &= ~RxINT_MASK;
zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
zport->regs[3] |= RxENABLE;
zport->regs[15] |= BRKIE;
write_zsreg(zport, R1, zport->regs[1]);
write_zsreg(zport, R3, zport->regs[3]);
write_zsreg(zport, R5, zport->regs[5]);
write_zsreg(zport, R15, zport->regs[15]);
/* Record the current state of RR0. */
zport->mctrl = zs_raw_get_mctrl(zport);
zport->brk = read_zsreg(zport, R0) & BRK_ABRT;
zport->tx_stopped = 1;
spin_unlock_irqrestore(&scc->zlock, flags);
return 0;
}
static void zs_shutdown(struct uart_port *uport)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
unsigned long flags;
int irq_guard;
spin_lock_irqsave(&scc->zlock, flags);
zport->regs[3] &= ~RxENABLE;
write_zsreg(zport, R5, zport->regs[5]);
write_zsreg(zport, R3, zport->regs[3]);
spin_unlock_irqrestore(&scc->zlock, flags);
irq_guard = atomic_add_return(-1, &scc->irq_guard);
if (!irq_guard)
free_irq(zport->port.irq, scc);
}
static void zs_reset(struct zs_port *zport)
{
struct zs_scc *scc = zport->scc;
int irq;
unsigned long flags;
spin_lock_irqsave(&scc->zlock, flags);
irq = !irqs_disabled_flags(flags);
if (!scc->initialised) {
/* Reset the pointer first, just in case... */
read_zsreg(zport, R0);
/* And let the current transmission finish. */
zs_line_drain(zport, irq);
write_zsreg(zport, R9, FHWRES);
udelay(10);
write_zsreg(zport, R9, 0);
scc->initialised = 1;
}
load_zsregs(zport, zport->regs, irq);
spin_unlock_irqrestore(&scc->zlock, flags);
}
static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
const struct ktermios *old_termios)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
int irq;
unsigned int baud, brg;
unsigned long flags;
spin_lock_irqsave(&scc->zlock, flags);
irq = !irqs_disabled_flags(flags);
/* Byte size. */
zport->regs[3] &= ~RxNBITS_MASK;
zport->regs[5] &= ~TxNBITS_MASK;
switch (termios->c_cflag & CSIZE) {
case CS5:
zport->regs[3] |= Rx5;
zport->regs[5] |= Tx5;
break;
case CS6:
zport->regs[3] |= Rx6;
zport->regs[5] |= Tx6;
break;
case CS7:
zport->regs[3] |= Rx7;
zport->regs[5] |= Tx7;
break;
case CS8:
default:
zport->regs[3] |= Rx8;
zport->regs[5] |= Tx8;
break;
}
/* Parity and stop bits. */
zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN);
if (termios->c_cflag & CSTOPB)
zport->regs[4] |= SB2;
else
zport->regs[4] |= SB1;
if (termios->c_cflag & PARENB)
zport->regs[4] |= PAR_ENA;
if (!(termios->c_cflag & PARODD))
zport->regs[4] |= PAR_EVEN;
switch (zport->clk_mode) {
case 64:
zport->regs[4] |= X64CLK;
break;
case 32:
zport->regs[4] |= X32CLK;
break;
case 16:
zport->regs[4] |= X16CLK;
break;
case 1:
zport->regs[4] |= X1CLK;
break;
default:
BUG();
}
baud = uart_get_baud_rate(uport, termios, old_termios, 0,
uport->uartclk / zport->clk_mode / 4);
brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode);
zport->regs[12] = brg & 0xff;
zport->regs[13] = (brg >> 8) & 0xff;
uart_update_timeout(uport, termios->c_cflag, baud);
uport->read_status_mask = Rx_OVR;
if (termios->c_iflag & INPCK)
uport->read_status_mask |= FRM_ERR | PAR_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
uport->read_status_mask |= Rx_BRK;
uport->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
uport->ignore_status_mask |= FRM_ERR | PAR_ERR;
if (termios->c_iflag & IGNBRK) {
uport->ignore_status_mask |= Rx_BRK;
if (termios->c_iflag & IGNPAR)
uport->ignore_status_mask |= Rx_OVR;
}
if (termios->c_cflag & CREAD)
zport->regs[3] |= RxENABLE;
else
zport->regs[3] &= ~RxENABLE;
if (zport != zport_a) {
if (!(termios->c_cflag & CLOCAL)) {
zport->regs[15] |= DCDIE;
} else
zport->regs[15] &= ~DCDIE;
if (termios->c_cflag & CRTSCTS) {
zport->regs[15] |= CTSIE;
} else
zport->regs[15] &= ~CTSIE;
zs_raw_xor_mctrl(zport);
}
/* Load up the new values. */
load_zsregs(zport, zport->regs, irq);
spin_unlock_irqrestore(&scc->zlock, flags);
}
/*
* Hack alert!
* Required solely so that the initial PROM-based console
* works undisturbed in parallel with this one.
*/
static void zs_pm(struct uart_port *uport, unsigned int state,
unsigned int oldstate)
{
struct zs_port *zport = to_zport(uport);
if (state < 3)
zport->regs[5] |= TxENAB;
else
zport->regs[5] &= ~TxENAB;
write_zsreg(zport, R5, zport->regs[5]);
}
static const char *zs_type(struct uart_port *uport)
{
return "Z85C30 SCC";
}
static void zs_release_port(struct uart_port *uport)
{
iounmap(uport->membase);
uport->membase = NULL;
release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
}
static int zs_map_port(struct uart_port *uport)
{
if (!uport->membase)
uport->membase = ioremap(uport->mapbase,
ZS_CHAN_IO_SIZE);
if (!uport->membase) {
printk(KERN_ERR "zs: Cannot map MMIO\n");
return -ENOMEM;
}
return 0;
}
static int zs_request_port(struct uart_port *uport)
{
int ret;
if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) {
printk(KERN_ERR "zs: Unable to reserve MMIO resource\n");
return -EBUSY;
}
ret = zs_map_port(uport);
if (ret) {
release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
return ret;
}
return 0;
}
static void zs_config_port(struct uart_port *uport, int flags)
{
struct zs_port *zport = to_zport(uport);
if (flags & UART_CONFIG_TYPE) {
if (zs_request_port(uport))
return;
uport->type = PORT_ZS;
zs_reset(zport);
}
}
static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser)
{
struct zs_port *zport = to_zport(uport);
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS)
ret = -EINVAL;
if (ser->irq != uport->irq)
ret = -EINVAL;
if (ser->baud_base != uport->uartclk / zport->clk_mode / 4)
ret = -EINVAL;
return ret;
}
static const struct uart_ops zs_ops = {
.tx_empty = zs_tx_empty,
.set_mctrl = zs_set_mctrl,
.get_mctrl = zs_get_mctrl,
.stop_tx = zs_stop_tx,
.start_tx = zs_start_tx,
.stop_rx = zs_stop_rx,
.enable_ms = zs_enable_ms,
.break_ctl = zs_break_ctl,
.startup = zs_startup,
.shutdown = zs_shutdown,
.set_termios = zs_set_termios,
.pm = zs_pm,
.type = zs_type,
.release_port = zs_release_port,
.request_port = zs_request_port,
.config_port = zs_config_port,
.verify_port = zs_verify_port,
};
/*
* Initialize Z85C30 port structures.
*/
static int __init zs_probe_sccs(void)
{
static int probed;
struct zs_parms zs_parms;
int chip, side, irq;
int n_chips = 0;
int i;
if (probed)
return 0;
irq = dec_interrupt[DEC_IRQ_SCC0];
if (irq >= 0) {
zs_parms.scc[n_chips] = IOASIC_SCC0;
zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0];
n_chips++;
}
irq = dec_interrupt[DEC_IRQ_SCC1];
if (irq >= 0) {
zs_parms.scc[n_chips] = IOASIC_SCC1;
zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1];
n_chips++;
}
if (!n_chips)
return -ENXIO;
probed = 1;
for (chip = 0; chip < n_chips; chip++) {
spin_lock_init(&zs_sccs[chip].zlock);
for (side = 0; side < ZS_NUM_CHAN; side++) {
struct zs_port *zport = &zs_sccs[chip].zport[side];
struct uart_port *uport = &zport->port;
zport->scc = &zs_sccs[chip];
zport->clk_mode = 16;
uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ZS_CONSOLE);
uport->irq = zs_parms.irq[chip];
uport->uartclk = ZS_CLOCK;
uport->fifosize = 1;
uport->iotype = UPIO_MEM;
uport->flags = UPF_BOOT_AUTOCONF;
uport->ops = &zs_ops;
uport->line = chip * ZS_NUM_CHAN + side;
uport->mapbase = dec_kn_slot_base +
zs_parms.scc[chip] +
(side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE;
for (i = 0; i < ZS_NUM_REGS; i++)
zport->regs[i] = zs_init_regs[i];
}
}
return 0;
}
#ifdef CONFIG_SERIAL_ZS_CONSOLE
static void zs_console_putchar(struct uart_port *uport, unsigned char ch)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
int irq;
unsigned long flags;
spin_lock_irqsave(&scc->zlock, flags);
irq = !irqs_disabled_flags(flags);
if (zs_transmit_drain(zport, irq))
write_zsdata(zport, ch);
spin_unlock_irqrestore(&scc->zlock, flags);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*/
static void zs_console_write(struct console *co, const char *s,
unsigned int count)
{
int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
struct zs_port *zport = &zs_sccs[chip].zport[side];
struct zs_scc *scc = zport->scc;
unsigned long flags;
u8 txint, txenb;
int irq;
/* Disable transmit interrupts and enable the transmitter. */
spin_lock_irqsave(&scc->zlock, flags);
txint = zport->regs[1];
txenb = zport->regs[5];
if (txint & TxINT_ENAB) {
zport->regs[1] = txint & ~TxINT_ENAB;
write_zsreg(zport, R1, zport->regs[1]);
}
if (!(txenb & TxENAB)) {
zport->regs[5] = txenb | TxENAB;
write_zsreg(zport, R5, zport->regs[5]);
}
spin_unlock_irqrestore(&scc->zlock, flags);
uart_console_write(&zport->port, s, count, zs_console_putchar);
/* Restore transmit interrupts and the transmitter enable. */
spin_lock_irqsave(&scc->zlock, flags);
irq = !irqs_disabled_flags(flags);
zs_line_drain(zport, irq);
if (!(txenb & TxENAB)) {
zport->regs[5] &= ~TxENAB;
write_zsreg(zport, R5, zport->regs[5]);
}
if (txint & TxINT_ENAB) {
zport->regs[1] |= TxINT_ENAB;
write_zsreg(zport, R1, zport->regs[1]);
/* Resume any transmission as the TxIP bit won't be set. */
if (!zport->tx_stopped)
zs_raw_transmit_chars(zport);
}
spin_unlock_irqrestore(&scc->zlock, flags);
}
/*
* Setup serial console baud/bits/parity. We do two things here:
* - construct a cflag setting for the first uart_open()
* - initialise the serial port
* Return non-zero if we didn't find a serial port.
*/
static int __init zs_console_setup(struct console *co, char *options)
{
int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
struct zs_port *zport = &zs_sccs[chip].zport[side];
struct uart_port *uport = &zport->port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
ret = zs_map_port(uport);
if (ret)
return ret;
zs_reset(zport);
zs_pm(uport, 0, -1);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(uport, co, baud, parity, bits, flow);
}
static struct uart_driver zs_reg;
static struct console zs_console = {
.name = "ttyS",
.write = zs_console_write,
.device = uart_console_device,
.setup = zs_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &zs_reg,
};
/*
* Register console.
*/
static int __init zs_serial_console_init(void)
{
int ret;
ret = zs_probe_sccs();
if (ret)
return ret;
register_console(&zs_console);
return 0;
}
console_initcall(zs_serial_console_init);
#define SERIAL_ZS_CONSOLE &zs_console
#else
#define SERIAL_ZS_CONSOLE NULL
#endif /* CONFIG_SERIAL_ZS_CONSOLE */
static struct uart_driver zs_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = ZS_NUM_SCCS * ZS_NUM_CHAN,
.cons = SERIAL_ZS_CONSOLE,
};
/* zs_init inits the driver. */
static int __init zs_init(void)
{
int i, ret;
pr_info("%s%s\n", zs_name, zs_version);
/* Find out how many Z85C30 SCCs we have. */
ret = zs_probe_sccs();
if (ret)
return ret;
ret = uart_register_driver(&zs_reg);
if (ret)
return ret;
for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
struct uart_port *uport = &zport->port;
if (zport->scc)
uart_add_one_port(&zs_reg, uport);
}
return 0;
}
static void __exit zs_exit(void)
{
int i;
for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) {
struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
struct uart_port *uport = &zport->port;
if (zport->scc)
uart_remove_one_port(&zs_reg, uport);
}
uart_unregister_driver(&zs_reg);
}
module_init(zs_init);
module_exit(zs_exit);
| linux-master | drivers/tty/serial/zs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on meson_uart.c, by AMLOGIC, INC.
*
* Copyright (C) 2014 Carlo Caione <[email protected]>
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
/* Register offsets */
#define AML_UART_WFIFO 0x00
#define AML_UART_RFIFO 0x04
#define AML_UART_CONTROL 0x08
#define AML_UART_STATUS 0x0c
#define AML_UART_MISC 0x10
#define AML_UART_REG5 0x14
/* AML_UART_CONTROL bits */
#define AML_UART_TX_EN BIT(12)
#define AML_UART_RX_EN BIT(13)
#define AML_UART_TWO_WIRE_EN BIT(15)
#define AML_UART_STOP_BIT_LEN_MASK (0x03 << 16)
#define AML_UART_STOP_BIT_1SB (0x00 << 16)
#define AML_UART_STOP_BIT_2SB (0x01 << 16)
#define AML_UART_PARITY_TYPE BIT(18)
#define AML_UART_PARITY_EN BIT(19)
#define AML_UART_TX_RST BIT(22)
#define AML_UART_RX_RST BIT(23)
#define AML_UART_CLEAR_ERR BIT(24)
#define AML_UART_RX_INT_EN BIT(27)
#define AML_UART_TX_INT_EN BIT(28)
#define AML_UART_DATA_LEN_MASK (0x03 << 20)
#define AML_UART_DATA_LEN_8BIT (0x00 << 20)
#define AML_UART_DATA_LEN_7BIT (0x01 << 20)
#define AML_UART_DATA_LEN_6BIT (0x02 << 20)
#define AML_UART_DATA_LEN_5BIT (0x03 << 20)
/* AML_UART_STATUS bits */
#define AML_UART_PARITY_ERR BIT(16)
#define AML_UART_FRAME_ERR BIT(17)
#define AML_UART_TX_FIFO_WERR BIT(18)
#define AML_UART_RX_EMPTY BIT(20)
#define AML_UART_TX_FULL BIT(21)
#define AML_UART_TX_EMPTY BIT(22)
#define AML_UART_XMIT_BUSY BIT(25)
#define AML_UART_ERR (AML_UART_PARITY_ERR | \
AML_UART_FRAME_ERR | \
AML_UART_TX_FIFO_WERR)
/* AML_UART_MISC bits */
#define AML_UART_XMIT_IRQ(c) (((c) & 0xff) << 8)
#define AML_UART_RECV_IRQ(c) ((c) & 0xff)
/* AML_UART_REG5 bits */
#define AML_UART_BAUD_MASK 0x7fffff
#define AML_UART_BAUD_USE BIT(23)
#define AML_UART_BAUD_XTAL BIT(24)
#define AML_UART_BAUD_XTAL_DIV2 BIT(27)
#define AML_UART_PORT_NUM 12
#define AML_UART_PORT_OFFSET 6
#define AML_UART_POLL_USEC 5
#define AML_UART_TIMEOUT_USEC 10000
static struct uart_driver meson_uart_driver_ttyAML;
static struct uart_driver meson_uart_driver_ttyS;
static struct uart_port *meson_ports[AML_UART_PORT_NUM];
struct meson_uart_data {
struct uart_driver *uart_driver;
bool has_xtal_div2;
};
static void meson_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static unsigned int meson_uart_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS;
}
static unsigned int meson_uart_tx_empty(struct uart_port *port)
{
u32 val;
val = readl(port->membase + AML_UART_STATUS);
val &= (AML_UART_TX_EMPTY | AML_UART_XMIT_BUSY);
return (val == AML_UART_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static void meson_uart_stop_tx(struct uart_port *port)
{
u32 val;
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_TX_INT_EN;
writel(val, port->membase + AML_UART_CONTROL);
}
static void meson_uart_stop_rx(struct uart_port *port)
{
u32 val;
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_RX_EN;
writel(val, port->membase + AML_UART_CONTROL);
}
static void meson_uart_shutdown(struct uart_port *port)
{
unsigned long flags;
u32 val;
free_irq(port->irq, port);
spin_lock_irqsave(&port->lock, flags);
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_RX_EN;
val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
writel(val, port->membase + AML_UART_CONTROL);
spin_unlock_irqrestore(&port->lock, flags);
}
static void meson_uart_start_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
unsigned int ch;
u32 val;
if (uart_tx_stopped(port)) {
meson_uart_stop_tx(port);
return;
}
while (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) {
if (port->x_char) {
writel(port->x_char, port->membase + AML_UART_WFIFO);
port->icount.tx++;
port->x_char = 0;
continue;
}
if (uart_circ_empty(xmit))
break;
ch = xmit->buf[xmit->tail];
writel(ch, port->membase + AML_UART_WFIFO);
uart_xmit_advance(port, 1);
}
if (!uart_circ_empty(xmit)) {
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_TX_INT_EN;
writel(val, port->membase + AML_UART_CONTROL);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void meson_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
char flag;
u32 ostatus, status, ch, mode;
do {
flag = TTY_NORMAL;
port->icount.rx++;
ostatus = status = readl(port->membase + AML_UART_STATUS);
if (status & AML_UART_ERR) {
if (status & AML_UART_TX_FIFO_WERR)
port->icount.overrun++;
else if (status & AML_UART_FRAME_ERR)
port->icount.frame++;
else if (status & AML_UART_PARITY_ERR)
port->icount.frame++;
mode = readl(port->membase + AML_UART_CONTROL);
mode |= AML_UART_CLEAR_ERR;
writel(mode, port->membase + AML_UART_CONTROL);
/* It doesn't clear to 0 automatically */
mode &= ~AML_UART_CLEAR_ERR;
writel(mode, port->membase + AML_UART_CONTROL);
status &= port->read_status_mask;
if (status & AML_UART_FRAME_ERR)
flag = TTY_FRAME;
else if (status & AML_UART_PARITY_ERR)
flag = TTY_PARITY;
}
ch = readl(port->membase + AML_UART_RFIFO);
ch &= 0xff;
if ((ostatus & AML_UART_FRAME_ERR) && (ch == 0)) {
port->icount.brk++;
flag = TTY_BREAK;
if (uart_handle_break(port))
continue;
}
if (uart_handle_sysrq_char(port, ch))
continue;
if ((status & port->ignore_status_mask) == 0)
tty_insert_flip_char(tport, ch, flag);
if (status & AML_UART_TX_FIFO_WERR)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
} while (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY));
tty_flip_buffer_push(tport);
}
static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
spin_lock(&port->lock);
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
meson_receive_chars(port);
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)) {
if (readl(port->membase + AML_UART_CONTROL) & AML_UART_TX_INT_EN)
meson_uart_start_tx(port);
}
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
static const char *meson_uart_type(struct uart_port *port)
{
return (port->type == PORT_MESON) ? "meson_uart" : NULL;
}
/*
* This function is called only from probe() using a temporary io mapping
* in order to perform a reset before setting up the device. Since the
* temporarily mapped region was successfully requested, there can be no
* console on this port at this time. Hence it is not necessary for this
* function to acquire the port->lock. (Since there is no console on this
* port at this time, the port->lock is not initialized yet.)
*/
static void meson_uart_reset(struct uart_port *port)
{
u32 val;
val = readl(port->membase + AML_UART_CONTROL);
val |= (AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR);
writel(val, port->membase + AML_UART_CONTROL);
val &= ~(AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR);
writel(val, port->membase + AML_UART_CONTROL);
}
static int meson_uart_startup(struct uart_port *port)
{
unsigned long flags;
u32 val;
int ret = 0;
spin_lock_irqsave(&port->lock, flags);
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
val &= ~AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
val |= (AML_UART_RX_EN | AML_UART_TX_EN);
writel(val, port->membase + AML_UART_CONTROL);
val |= (AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
writel(val, port->membase + AML_UART_CONTROL);
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
spin_unlock_irqrestore(&port->lock, flags);
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
return ret;
}
static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
{
const struct meson_uart_data *private_data = port->private_data;
u32 val = 0;
while (!meson_uart_tx_empty(port))
cpu_relax();
if (port->uartclk == 24000000) {
unsigned int xtal_div = 3;
if (private_data && private_data->has_xtal_div2) {
xtal_div = 2;
val |= AML_UART_BAUD_XTAL_DIV2;
}
val |= DIV_ROUND_CLOSEST(port->uartclk / xtal_div, baud) - 1;
val |= AML_UART_BAUD_XTAL;
} else {
val = DIV_ROUND_CLOSEST(port->uartclk / 4, baud) - 1;
}
val |= AML_UART_BAUD_USE;
writel(val, port->membase + AML_UART_REG5);
}
static void meson_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int cflags, iflags, baud;
unsigned long flags;
u32 val;
spin_lock_irqsave(&port->lock, flags);
cflags = termios->c_cflag;
iflags = termios->c_iflag;
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_DATA_LEN_MASK;
switch (cflags & CSIZE) {
case CS8:
val |= AML_UART_DATA_LEN_8BIT;
break;
case CS7:
val |= AML_UART_DATA_LEN_7BIT;
break;
case CS6:
val |= AML_UART_DATA_LEN_6BIT;
break;
case CS5:
val |= AML_UART_DATA_LEN_5BIT;
break;
}
if (cflags & PARENB)
val |= AML_UART_PARITY_EN;
else
val &= ~AML_UART_PARITY_EN;
if (cflags & PARODD)
val |= AML_UART_PARITY_TYPE;
else
val &= ~AML_UART_PARITY_TYPE;
val &= ~AML_UART_STOP_BIT_LEN_MASK;
if (cflags & CSTOPB)
val |= AML_UART_STOP_BIT_2SB;
else
val |= AML_UART_STOP_BIT_1SB;
if (cflags & CRTSCTS)
val &= ~AML_UART_TWO_WIRE_EN;
else
val |= AML_UART_TWO_WIRE_EN;
writel(val, port->membase + AML_UART_CONTROL);
baud = uart_get_baud_rate(port, termios, old, 50, 4000000);
meson_uart_change_speed(port, baud);
port->read_status_mask = AML_UART_TX_FIFO_WERR;
if (iflags & INPCK)
port->read_status_mask |= AML_UART_PARITY_ERR |
AML_UART_FRAME_ERR;
port->ignore_status_mask = 0;
if (iflags & IGNPAR)
port->ignore_status_mask |= AML_UART_PARITY_ERR |
AML_UART_FRAME_ERR;
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static int meson_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
int ret = 0;
if (port->type != PORT_MESON)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
return ret;
}
static void meson_uart_release_port(struct uart_port *port)
{
devm_iounmap(port->dev, port->membase);
port->membase = NULL;
devm_release_mem_region(port->dev, port->mapbase, port->mapsize);
}
static int meson_uart_request_port(struct uart_port *port)
{
if (!devm_request_mem_region(port->dev, port->mapbase, port->mapsize,
dev_name(port->dev))) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
}
port->membase = devm_ioremap(port->dev, port->mapbase,
port->mapsize);
if (!port->membase)
return -ENOMEM;
return 0;
}
static void meson_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MESON;
meson_uart_request_port(port);
}
}
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context (i.e. kgdb).
*/
static int meson_uart_poll_get_char(struct uart_port *port)
{
u32 c;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
c = NO_POLL_CHAR;
else
c = readl(port->membase + AML_UART_RFIFO);
spin_unlock_irqrestore(&port->lock, flags);
return c;
}
static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
unsigned long flags;
u32 reg;
int ret;
spin_lock_irqsave(&port->lock, flags);
/* Wait until FIFO is empty or timeout */
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
reg & AML_UART_TX_EMPTY,
AML_UART_POLL_USEC,
AML_UART_TIMEOUT_USEC);
if (ret == -ETIMEDOUT) {
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
goto out;
}
/* Write the character */
writel(c, port->membase + AML_UART_WFIFO);
/* Wait until FIFO is empty or timeout */
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
reg & AML_UART_TX_EMPTY,
AML_UART_POLL_USEC,
AML_UART_TIMEOUT_USEC);
if (ret == -ETIMEDOUT)
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
out:
spin_unlock_irqrestore(&port->lock, flags);
}
#endif /* CONFIG_CONSOLE_POLL */
static const struct uart_ops meson_uart_ops = {
.set_mctrl = meson_uart_set_mctrl,
.get_mctrl = meson_uart_get_mctrl,
.tx_empty = meson_uart_tx_empty,
.start_tx = meson_uart_start_tx,
.stop_tx = meson_uart_stop_tx,
.stop_rx = meson_uart_stop_rx,
.startup = meson_uart_startup,
.shutdown = meson_uart_shutdown,
.set_termios = meson_uart_set_termios,
.type = meson_uart_type,
.config_port = meson_uart_config_port,
.request_port = meson_uart_request_port,
.release_port = meson_uart_release_port,
.verify_port = meson_uart_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = meson_uart_poll_get_char,
.poll_put_char = meson_uart_poll_put_char,
#endif
};
#ifdef CONFIG_SERIAL_MESON_CONSOLE
static void meson_uart_enable_tx_engine(struct uart_port *port)
{
u32 val;
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_TX_EN;
writel(val, port->membase + AML_UART_CONTROL);
}
static void meson_console_putchar(struct uart_port *port, unsigned char ch)
{
if (!port->membase)
return;
while (readl(port->membase + AML_UART_STATUS) & AML_UART_TX_FULL)
cpu_relax();
writel(ch, port->membase + AML_UART_WFIFO);
}
static void meson_serial_port_write(struct uart_port *port, const char *s,
u_int count)
{
unsigned long flags;
int locked;
u32 val, tmp;
local_irq_save(flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else {
spin_lock(&port->lock);
locked = 1;
}
val = readl(port->membase + AML_UART_CONTROL);
tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN);
writel(tmp, port->membase + AML_UART_CONTROL);
uart_console_write(port, s, count, meson_console_putchar);
writel(val, port->membase + AML_UART_CONTROL);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
}
static void meson_serial_console_write(struct console *co, const char *s,
u_int count)
{
struct uart_port *port;
port = meson_ports[co->index];
if (!port)
return;
meson_serial_port_write(port, s, count);
}
static int meson_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= AML_UART_PORT_NUM)
return -EINVAL;
port = meson_ports[co->index];
if (!port || !port->membase)
return -ENODEV;
meson_uart_enable_tx_engine(port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
#define MESON_SERIAL_CONSOLE(_devname) \
static struct console meson_serial_console_##_devname = { \
.name = __stringify(_devname), \
.write = meson_serial_console_write, \
.device = uart_console_device, \
.setup = meson_serial_console_setup, \
.flags = CON_PRINTBUFFER, \
.index = -1, \
.data = &meson_uart_driver_##_devname, \
}
MESON_SERIAL_CONSOLE(ttyAML);
MESON_SERIAL_CONSOLE(ttyS);
static void meson_serial_early_console_write(struct console *co,
const char *s,
u_int count)
{
struct earlycon_device *dev = co->data;
meson_serial_port_write(&dev->port, s, count);
}
static int __init
meson_serial_early_console_setup(struct earlycon_device *device, const char *opt)
{
if (!device->port.membase)
return -ENODEV;
meson_uart_enable_tx_engine(&device->port);
device->con->write = meson_serial_early_console_write;
return 0;
}
OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
meson_serial_early_console_setup);
#define MESON_SERIAL_CONSOLE_PTR(_devname) (&meson_serial_console_##_devname)
#else
#define MESON_SERIAL_CONSOLE_PTR(_devname) (NULL)
#endif
#define MESON_UART_DRIVER(_devname) \
static struct uart_driver meson_uart_driver_##_devname = { \
.owner = THIS_MODULE, \
.driver_name = "meson_uart", \
.dev_name = __stringify(_devname), \
.nr = AML_UART_PORT_NUM, \
.cons = MESON_SERIAL_CONSOLE_PTR(_devname), \
}
MESON_UART_DRIVER(ttyAML);
MESON_UART_DRIVER(ttyS);
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
{
struct clk *clk_xtal = NULL;
struct clk *clk_pclk = NULL;
struct clk *clk_baud = NULL;
clk_pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(clk_pclk))
return PTR_ERR(clk_pclk);
clk_xtal = devm_clk_get_enabled(&pdev->dev, "xtal");
if (IS_ERR(clk_xtal))
return PTR_ERR(clk_xtal);
clk_baud = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(clk_baud))
return PTR_ERR(clk_baud);
port->uartclk = clk_get_rate(clk_baud);
return 0;
}
static struct uart_driver *meson_uart_current(const struct meson_uart_data *pd)
{
return (pd && pd->uart_driver) ?
pd->uart_driver : &meson_uart_driver_ttyAML;
}
static int meson_uart_probe(struct platform_device *pdev)
{
const struct meson_uart_data *priv_data;
struct uart_driver *uart_driver;
struct resource *res_mem;
struct uart_port *port;
u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
int irq;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0) {
int id;
for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) {
if (!meson_ports[id]) {
pdev->id = id;
break;
}
}
}
if (pdev->id < 0 || pdev->id >= AML_UART_PORT_NUM)
return -EINVAL;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
if (meson_ports[pdev->id]) {
return dev_err_probe(&pdev->dev, -EBUSY,
"port %d already allocated\n", pdev->id);
}
port = devm_kzalloc(&pdev->dev, sizeof(struct uart_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
ret = meson_uart_probe_clocks(pdev, port);
if (ret)
return ret;
priv_data = device_get_match_data(&pdev->dev);
uart_driver = meson_uart_current(priv_data);
if (!uart_driver->state) {
ret = uart_register_driver(uart_driver);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"can't register uart driver\n");
}
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->mapsize = resource_size(res_mem);
port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
port->line = pdev->id;
port->type = PORT_MESON;
port->x_char = 0;
port->ops = &meson_uart_ops;
port->fifosize = fifosize;
port->private_data = (void *)priv_data;
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
/* reset port before registering (and possibly registering console) */
if (meson_uart_request_port(port) >= 0) {
meson_uart_reset(port);
meson_uart_release_port(port);
}
ret = uart_add_one_port(uart_driver, port);
if (ret)
meson_ports[pdev->id] = NULL;
return ret;
}
static int meson_uart_remove(struct platform_device *pdev)
{
struct uart_driver *uart_driver;
struct uart_port *port;
port = platform_get_drvdata(pdev);
uart_driver = meson_uart_current(port->private_data);
uart_remove_one_port(uart_driver, port);
meson_ports[pdev->id] = NULL;
for (int id = 0; id < AML_UART_PORT_NUM; id++)
if (meson_ports[id])
return 0;
/* No more available uart ports, unregister uart driver */
uart_unregister_driver(uart_driver);
return 0;
}
static struct meson_uart_data meson_g12a_uart_data = {
.has_xtal_div2 = true,
};
static struct meson_uart_data meson_a1_uart_data = {
.uart_driver = &meson_uart_driver_ttyS,
.has_xtal_div2 = false,
};
static struct meson_uart_data meson_s4_uart_data = {
.uart_driver = &meson_uart_driver_ttyS,
.has_xtal_div2 = true,
};
static const struct of_device_id meson_uart_dt_match[] = {
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
{ .compatible = "amlogic,meson8b-uart" },
{ .compatible = "amlogic,meson-gx-uart" },
{
.compatible = "amlogic,meson-g12a-uart",
.data = (void *)&meson_g12a_uart_data,
},
{
.compatible = "amlogic,meson-s4-uart",
.data = (void *)&meson_s4_uart_data,
},
{
.compatible = "amlogic,meson-a1-uart",
.data = (void *)&meson_a1_uart_data,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
static struct platform_driver meson_uart_platform_driver = {
.probe = meson_uart_probe,
.remove = meson_uart_remove,
.driver = {
.name = "meson_uart",
.of_match_table = meson_uart_dt_match,
},
};
module_platform_driver(meson_uart_platform_driver);
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_DESCRIPTION("Amlogic Meson serial port driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/meson_uart.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/soc/qcom/geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <dt-bindings/interconnect/qcom,icc.h>
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG 0x22c
#define SE_UART_IO_MACRO_CTRL 0x240
#define SE_UART_TX_TRANS_CFG 0x25c
#define SE_UART_TX_WORD_LEN 0x268
#define SE_UART_TX_STOP_BIT_LEN 0x26c
#define SE_UART_TX_TRANS_LEN 0x270
#define SE_UART_RX_TRANS_CFG 0x280
#define SE_UART_RX_WORD_LEN 0x28c
#define SE_UART_RX_STALE_CNT 0x294
#define SE_UART_TX_PARITY_CFG 0x2a4
#define SE_UART_RX_PARITY_CFG 0x2a8
#define SE_UART_MANUAL_RFR 0x2ac
/* SE_UART_TRANS_CFG */
#define UART_TX_PAR_EN BIT(0)
#define UART_CTS_MASK BIT(1)
/* SE_UART_TX_STOP_BIT_LEN */
#define TX_STOP_BIT_LEN_1 0
#define TX_STOP_BIT_LEN_2 2
/* SE_UART_RX_TRANS_CFG */
#define UART_RX_PAR_EN BIT(3)
/* SE_UART_RX_WORD_LEN */
#define RX_WORD_LEN_MASK GENMASK(9, 0)
/* SE_UART_RX_STALE_CNT */
#define RX_STALE_CNT GENMASK(23, 0)
/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */
#define PAR_CALC_EN BIT(0)
#define PAR_EVEN 0x00
#define PAR_ODD 0x01
#define PAR_SPACE 0x10
/* SE_UART_MANUAL_RFR register fields */
#define UART_MANUAL_RFR_EN BIT(31)
#define UART_RFR_NOT_READY BIT(1)
#define UART_RFR_READY BIT(0)
/* UART M_CMD OP codes */
#define UART_START_TX 0x1
/* UART S_CMD OP codes */
#define UART_START_READ 0x1
#define UART_PARAM 0x1
#define UART_PARAM_RFR_OPEN BIT(7)
#define UART_OVERSAMPLING 32
#define STALE_TIMEOUT 16
#define DEFAULT_BITS_PER_CHAR 10
#define GENI_UART_CONS_PORTS 1
#define GENI_UART_PORTS 3
#define DEF_FIFO_DEPTH_WORDS 16
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
#define UART_RX_WM 2
/* SE_UART_LOOPBACK_CFG */
#define RX_TX_SORTED BIT(0)
#define CTS_RTS_SORTED BIT(1)
#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
/* UART pin swap value */
#define DEFAULT_IO_MACRO_IO0_IO1_MASK GENMASK(3, 0)
#define IO_MACRO_IO0_SEL 0x3
#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
#define IO_MACRO_IO2_IO3_SWAP 0x4640
/* We always configure 4 bytes per FIFO word */
#define BYTES_PER_FIFO_WORD 4U
#define DMA_RX_BUF_SIZE 2048
struct qcom_geni_device_data {
bool console;
enum geni_se_xfer_mode mode;
};
struct qcom_geni_private_data {
/* NOTE: earlycon port will have NULL here */
struct uart_driver *drv;
u32 poll_cached_bytes;
unsigned int poll_cached_bytes_cnt;
u32 write_cached_bytes;
unsigned int write_cached_bytes_cnt;
};
struct qcom_geni_serial_port {
struct uart_port uport;
struct geni_se se;
const char *name;
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
bool setup;
unsigned int baud;
unsigned long clk_rate;
void *rx_buf;
u32 loopback;
bool brk;
unsigned int tx_remaining;
int wakeup_irq;
bool rx_tx_swap;
bool cts_rts_swap;
struct qcom_geni_private_data private_data;
const struct qcom_geni_device_data *dev_data;
};
static const struct uart_ops qcom_geni_console_pops;
static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
static struct uart_driver qcom_geni_uart_driver;
static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
{
return container_of(uport, struct qcom_geni_serial_port, uport);
}
static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
[0] = {
.uport = {
.iotype = UPIO_MEM,
.ops = &qcom_geni_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
.line = 0,
},
},
[1] = {
.uport = {
.iotype = UPIO_MEM,
.ops = &qcom_geni_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
.line = 1,
},
},
[2] = {
.uport = {
.iotype = UPIO_MEM,
.ops = &qcom_geni_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
.line = 2,
},
},
};
static struct qcom_geni_serial_port qcom_geni_console_port = {
.uport = {
.iotype = UPIO_MEM,
.ops = &qcom_geni_console_pops,
.flags = UPF_BOOT_AUTOCONF,
.line = 0,
},
};
static int qcom_geni_serial_request_port(struct uart_port *uport)
{
struct platform_device *pdev = to_platform_device(uport->dev);
struct qcom_geni_serial_port *port = to_dev_port(uport);
uport->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(uport->membase))
return PTR_ERR(uport->membase);
port->se.base = uport->membase;
return 0;
}
static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
{
if (cfg_flags & UART_CONFIG_TYPE) {
uport->type = PORT_MSM;
qcom_geni_serial_request_port(uport);
}
}
static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
{
unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
u32 geni_ios;
if (uart_console(uport)) {
mctrl |= TIOCM_CTS;
} else {
geni_ios = readl(uport->membase + SE_GENI_IOS);
if (!(geni_ios & IO2_DATA_IN))
mctrl |= TIOCM_CTS;
}
return mctrl;
}
static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
u32 uart_manual_rfr = 0;
struct qcom_geni_serial_port *port = to_dev_port(uport);
if (uart_console(uport))
return;
if (mctrl & TIOCM_LOOP)
port->loopback = RX_TX_CTS_RTS_SORTED;
if (!(mctrl & TIOCM_RTS) && !uport->suspended)
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
}
static const char *qcom_geni_serial_get_type(struct uart_port *uport)
{
return "MSM";
}
static struct qcom_geni_serial_port *get_port_from_line(int line, bool console)
{
struct qcom_geni_serial_port *port;
int nr_ports = console ? GENI_UART_CONS_PORTS : GENI_UART_PORTS;
if (line < 0 || line >= nr_ports)
return ERR_PTR(-ENXIO);
port = console ? &qcom_geni_console_port : &qcom_geni_uart_ports[line];
return port;
}
static bool qcom_geni_serial_main_active(struct uart_port *uport)
{
return readl(uport->membase + SE_GENI_STATUS) & M_GENI_CMD_ACTIVE;
}
static bool qcom_geni_serial_secondary_active(struct uart_port *uport)
{
return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE;
}
static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
int offset, int field, bool set)
{
u32 reg;
struct qcom_geni_serial_port *port;
unsigned int baud;
unsigned int fifo_bits;
unsigned long timeout_us = 20000;
struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) {
port = to_dev_port(uport);
baud = port->baud;
if (!baud)
baud = 115200;
fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
/*
* Total polling iterations based on FIFO worth of bytes to be
* sent at current baud. Add a little fluff to the wait.
*/
timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
}
/*
* Use custom implementation instead of readl_poll_atomic since ktimer
* is not ready at the time of early console.
*/
timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
while (timeout_us) {
reg = readl(uport->membase + offset);
if ((bool)(reg & field) == set)
return true;
udelay(10);
timeout_us -= 10;
}
return false;
}
static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
{
u32 m_cmd;
writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
m_cmd = UART_START_TX << M_OPCODE_SHFT;
writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
}
static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
{
int done;
u32 irq_clear = M_CMD_DONE_EN;
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
writel(M_GENI_CMD_ABORT, uport->membase +
SE_GENI_M_CMD_CTRL_REG);
irq_clear |= M_CMD_ABORT_EN;
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
}
writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_abort_rx(struct uart_port *uport)
{
u32 irq_clear = S_CMD_DONE_EN | S_CMD_ABORT_EN;
writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_ABORT, false);
writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
}
#ifdef CONFIG_CONSOLE_POLL
static int qcom_geni_serial_get_char(struct uart_port *uport)
{
struct qcom_geni_private_data *private_data = uport->private_data;
u32 status;
u32 word_cnt;
int ret;
if (!private_data->poll_cached_bytes_cnt) {
status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
word_cnt = status & RX_FIFO_WC_MSK;
if (!word_cnt)
return NO_POLL_CHAR;
if (word_cnt == 1 && (status & RX_LAST))
/*
* NOTE: If RX_LAST_BYTE_VALID is 0 it needs to be
* treated as if it was BYTES_PER_FIFO_WORD.
*/
private_data->poll_cached_bytes_cnt =
(status & RX_LAST_BYTE_VALID_MSK) >>
RX_LAST_BYTE_VALID_SHFT;
if (private_data->poll_cached_bytes_cnt == 0)
private_data->poll_cached_bytes_cnt = BYTES_PER_FIFO_WORD;
private_data->poll_cached_bytes =
readl(uport->membase + SE_GENI_RX_FIFOn);
}
private_data->poll_cached_bytes_cnt--;
ret = private_data->poll_cached_bytes & 0xff;
private_data->poll_cached_bytes >>= 8;
return ret;
}
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
unsigned char c)
{
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, 1);
WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_TX_FIFO_WATERMARK_EN, true));
writel(c, uport->membase + SE_GENI_TX_FIFOn);
writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
#endif
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch)
{
struct qcom_geni_private_data *private_data = uport->private_data;
private_data->write_cached_bytes =
(private_data->write_cached_bytes >> 8) | (ch << 24);
private_data->write_cached_bytes_cnt++;
if (private_data->write_cached_bytes_cnt == BYTES_PER_FIFO_WORD) {
writel(private_data->write_cached_bytes,
uport->membase + SE_GENI_TX_FIFOn);
private_data->write_cached_bytes_cnt = 0;
}
}
static void
__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
unsigned int count)
{
struct qcom_geni_private_data *private_data = uport->private_data;
int i;
u32 bytes_to_send = count;
for (i = 0; i < count; i++) {
/*
* uart_console_write() adds a carriage return for each newline.
* Account for additional bytes to be written.
*/
if (s[i] == '\n')
bytes_to_send++;
}
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, bytes_to_send);
for (i = 0; i < count; ) {
size_t chars_to_write = 0;
size_t avail = DEF_FIFO_DEPTH_WORDS - DEF_TX_WM;
/*
* If the WM bit never set, then the Tx state machine is not
* in a valid state, so break, cancel/abort any existing
* command. Unfortunately the current data being written is
* lost.
*/
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_TX_FIFO_WATERMARK_EN, true))
break;
chars_to_write = min_t(size_t, count - i, avail / 2);
uart_console_write(uport, s + i, chars_to_write,
qcom_geni_serial_wr_char);
writel(M_TX_FIFO_WATERMARK_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
i += chars_to_write;
}
if (private_data->write_cached_bytes_cnt) {
private_data->write_cached_bytes >>= BITS_PER_BYTE *
(BYTES_PER_FIFO_WORD - private_data->write_cached_bytes_cnt);
writel(private_data->write_cached_bytes,
uport->membase + SE_GENI_TX_FIFOn);
private_data->write_cached_bytes_cnt = 0;
}
qcom_geni_serial_poll_tx_done(uport);
}
static void qcom_geni_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
bool locked = true;
unsigned long flags;
u32 geni_status;
u32 irq_en;
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
port = get_port_from_line(co->index, true);
if (IS_ERR(port))
return;
uport = &port->uport;
if (oops_in_progress)
locked = spin_trylock_irqsave(&uport->lock, flags);
else
spin_lock_irqsave(&uport->lock, flags);
geni_status = readl(uport->membase + SE_GENI_STATUS);
/* Cancel the current write to log the fault */
if (!locked) {
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
writel(M_CMD_ABORT_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
/*
* It seems we can't interrupt existing transfers if all data
* has been sent, in which case we need to look for done first.
*/
qcom_geni_serial_poll_tx_done(uport);
if (!uart_circ_empty(&uport->state->xmit)) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
}
__qcom_geni_serial_console_write(uport, s, count);
if (port->tx_remaining)
qcom_geni_serial_setup_tx(uport, port->tx_remaining);
if (locked)
spin_unlock_irqrestore(&uport->lock, flags);
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
{
u32 i;
unsigned char buf[sizeof(u32)];
struct tty_port *tport;
struct qcom_geni_serial_port *port = to_dev_port(uport);
tport = &uport->state->port;
for (i = 0; i < bytes; ) {
int c;
int chunk = min_t(int, bytes - i, BYTES_PER_FIFO_WORD);
ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
i += chunk;
if (drop)
continue;
for (c = 0; c < chunk; c++) {
int sysrq;
uport->icount.rx++;
if (port->brk && buf[c] == 0) {
port->brk = false;
if (uart_handle_break(uport))
continue;
}
sysrq = uart_prepare_sysrq_char(uport, buf[c]);
if (!sysrq)
tty_insert_flip_char(tport, buf[c], TTY_NORMAL);
}
}
if (!drop)
tty_flip_buffer_push(tport);
}
#else
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
{
}
#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
static void handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct tty_port *tport = &uport->state->port;
int ret;
ret = tty_insert_flip_string(tport, port->rx_buf, bytes);
if (ret != bytes) {
dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n",
__func__, ret, bytes);
WARN_ON_ONCE(1);
}
uport->icount.rx += ret;
tty_flip_buffer_push(tport);
}
static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
{
return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
}
static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
bool done;
if (!qcom_geni_serial_main_active(uport))
return;
if (port->tx_dma_addr) {
geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr,
port->tx_remaining);
port->tx_dma_addr = 0;
port->tx_remaining = 0;
}
geni_se_cancel_m_cmd(&port->se);
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true);
if (!done) {
geni_se_abort_m_cmd(&port->se);
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
if (!done)
dev_err_ratelimited(uport->dev, "M_CMD_ABORT_EN not set");
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_start_tx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
unsigned int xmit_size;
int ret;
if (port->tx_dma_addr)
return;
if (uart_circ_empty(xmit))
return;
xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
qcom_geni_serial_setup_tx(uport, xmit_size);
ret = geni_se_tx_dma_prep(&port->se, &xmit->buf[xmit->tail],
xmit_size, &port->tx_dma_addr);
if (ret) {
dev_err(uport->dev, "unable to start TX SE DMA: %d\n", ret);
qcom_geni_serial_stop_tx_dma(uport);
return;
}
port->tx_remaining = xmit_size;
}
static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport)
{
u32 irq_en;
if (qcom_geni_serial_main_active(uport) ||
!qcom_geni_serial_tx_empty(uport))
return;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
{
u32 irq_en;
struct qcom_geni_serial_port *port = to_dev_port(uport);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
/* Possible stop tx is called multiple times. */
if (!qcom_geni_serial_main_active(uport))
return;
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop)
{
u32 status;
u32 word_cnt;
u32 last_word_byte_cnt;
u32 last_word_partial;
u32 total_bytes;
status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
word_cnt = status & RX_FIFO_WC_MSK;
last_word_partial = status & RX_LAST;
last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
RX_LAST_BYTE_VALID_SHFT;
if (!word_cnt)
return;
total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1);
if (last_word_partial && last_word_byte_cnt)
total_bytes += last_word_byte_cnt;
else
total_bytes += BYTES_PER_FIFO_WORD;
handle_rx_console(uport, total_bytes, drop);
}
static void qcom_geni_serial_stop_rx_fifo(struct uart_port *uport)
{
u32 irq_en;
struct qcom_geni_serial_port *port = to_dev_port(uport);
u32 s_irq_status;
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
if (!qcom_geni_serial_secondary_active(uport))
return;
geni_se_cancel_s_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
S_CMD_CANCEL_EN, true);
/*
* If timeout occurs secondary engine remains active
* and Abort sequence is executed.
*/
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
/* Flush the Rx buffer */
if (s_irq_status & S_RX_FIFO_LAST_EN)
qcom_geni_serial_handle_rx_fifo(uport, true);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
if (qcom_geni_serial_secondary_active(uport))
qcom_geni_serial_abort_rx(uport);
}
static void qcom_geni_serial_start_rx_fifo(struct uart_port *uport)
{
u32 irq_en;
struct qcom_geni_serial_port *port = to_dev_port(uport);
if (qcom_geni_serial_secondary_active(uport))
qcom_geni_serial_stop_rx_fifo(uport);
geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
if (!qcom_geni_serial_secondary_active(uport))
return;
geni_se_cancel_s_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
S_CMD_CANCEL_EN, true);
if (qcom_geni_serial_secondary_active(uport))
qcom_geni_serial_abort_rx(uport);
if (port->rx_dma_addr) {
geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr,
DMA_RX_BUF_SIZE);
port->rx_dma_addr = 0;
}
}
static void qcom_geni_serial_start_rx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
int ret;
if (qcom_geni_serial_secondary_active(uport))
qcom_geni_serial_stop_rx_dma(uport);
geni_se_setup_s_cmd(&port->se, UART_START_READ, UART_PARAM_RFR_OPEN);
ret = geni_se_rx_dma_prep(&port->se, port->rx_buf,
DMA_RX_BUF_SIZE,
&port->rx_dma_addr);
if (ret) {
dev_err(uport->dev, "unable to start RX SE DMA: %d\n", ret);
qcom_geni_serial_stop_rx_dma(uport);
}
}
static void qcom_geni_serial_handle_rx_dma(struct uart_port *uport, bool drop)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
u32 rx_in;
int ret;
if (!qcom_geni_serial_secondary_active(uport))
return;
if (!port->rx_dma_addr)
return;
geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr, DMA_RX_BUF_SIZE);
port->rx_dma_addr = 0;
rx_in = readl(uport->membase + SE_DMA_RX_LEN_IN);
if (!rx_in) {
dev_warn(uport->dev, "serial engine reports 0 RX bytes in!\n");
return;
}
if (!drop)
handle_rx_uart(uport, rx_in, drop);
ret = geni_se_rx_dma_prep(&port->se, port->rx_buf,
DMA_RX_BUF_SIZE,
&port->rx_dma_addr);
if (ret) {
dev_err(uport->dev, "unable to start RX SE DMA: %d\n", ret);
qcom_geni_serial_stop_rx_dma(uport);
}
}
static void qcom_geni_serial_start_rx(struct uart_port *uport)
{
uport->ops->start_rx(uport);
}
static void qcom_geni_serial_stop_rx(struct uart_port *uport)
{
uport->ops->stop_rx(uport);
}
static void qcom_geni_serial_stop_tx(struct uart_port *uport)
{
uport->ops->stop_tx(uport);
}
static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
unsigned int remaining)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
unsigned int tx_bytes;
u8 buf[BYTES_PER_FIFO_WORD];
while (remaining) {
memset(buf, 0, sizeof(buf));
tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
uart_xmit_advance(uport, tx_bytes);
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
remaining -= tx_bytes;
port->tx_remaining -= tx_bytes;
}
}
static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
bool done, bool active)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
size_t avail;
size_t pending;
u32 status;
u32 irq_en;
unsigned int chunk;
status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
/* Complete the current tx command before taking newly added data */
if (active)
pending = port->tx_remaining;
else
pending = uart_circ_chars_pending(xmit);
/* All data has been transmitted and acknowledged as received */
if (!pending && !status && done) {
qcom_geni_serial_stop_tx_fifo(uport);
goto out_write_wakeup;
}
avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
avail *= BYTES_PER_FIFO_WORD;
chunk = min(avail, pending);
if (!chunk)
goto out_write_wakeup;
if (!port->tx_remaining) {
qcom_geni_serial_setup_tx(uport, pending);
port->tx_remaining = pending;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
qcom_geni_serial_send_chunk_fifo(uport, chunk);
/*
* The tx fifo watermark is level triggered and latched. Though we had
* cleared it in qcom_geni_serial_isr it will have already reasserted
* so we must clear it again here after our writes.
*/
writel(M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_CLEAR);
out_write_wakeup:
if (!port->tx_remaining) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (irq_en & M_TX_FIFO_WATERMARK_EN)
writel(irq_en & ~M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
}
static void qcom_geni_serial_handle_tx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
uart_xmit_advance(uport, port->tx_remaining);
geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr, port->tx_remaining);
port->tx_dma_addr = 0;
port->tx_remaining = 0;
if (!uart_circ_empty(xmit))
qcom_geni_serial_start_tx_dma(uport);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
}
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
u32 m_irq_status;
u32 s_irq_status;
u32 geni_status;
u32 dma;
u32 dma_tx_status;
u32 dma_rx_status;
struct uart_port *uport = dev;
bool drop_rx = false;
struct tty_port *tport = &uport->state->port;
struct qcom_geni_serial_port *port = to_dev_port(uport);
if (uport->suspended)
return IRQ_NONE;
spin_lock(&uport->lock);
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
dma_tx_status = readl(uport->membase + SE_DMA_TX_IRQ_STAT);
dma_rx_status = readl(uport->membase + SE_DMA_RX_IRQ_STAT);
geni_status = readl(uport->membase + SE_GENI_STATUS);
dma = readl(uport->membase + SE_GENI_DMA_MODE_EN);
m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
writel(dma_tx_status, uport->membase + SE_DMA_TX_IRQ_CLR);
writel(dma_rx_status, uport->membase + SE_DMA_RX_IRQ_CLR);
if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
goto out_unlock;
if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
uport->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
if (s_irq_status & (S_GP_IRQ_0_EN | S_GP_IRQ_1_EN)) {
if (s_irq_status & S_GP_IRQ_0_EN)
uport->icount.parity++;
drop_rx = true;
} else if (s_irq_status & (S_GP_IRQ_2_EN | S_GP_IRQ_3_EN)) {
uport->icount.brk++;
port->brk = true;
}
if (dma) {
if (dma_tx_status & TX_DMA_DONE)
qcom_geni_serial_handle_tx_dma(uport);
if (dma_rx_status) {
if (dma_rx_status & RX_RESET_DONE)
goto out_unlock;
if (dma_rx_status & RX_DMA_PARITY_ERR) {
uport->icount.parity++;
drop_rx = true;
}
if (dma_rx_status & RX_DMA_BREAK)
uport->icount.brk++;
if (dma_rx_status & (RX_DMA_DONE | RX_EOT))
qcom_geni_serial_handle_rx_dma(uport, drop_rx);
}
} else {
if (m_irq_status & m_irq_en &
(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
qcom_geni_serial_handle_tx_fifo(uport,
m_irq_status & M_CMD_DONE_EN,
geni_status & M_GENI_CMD_ACTIVE);
if (s_irq_status & (S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN))
qcom_geni_serial_handle_rx_fifo(uport, drop_rx);
}
out_unlock:
uart_unlock_and_check_sysrq(uport);
return IRQ_HANDLED;
}
static int setup_fifos(struct qcom_geni_serial_port *port)
{
struct uart_port *uport;
u32 old_rx_fifo_depth = port->rx_fifo_depth;
uport = &port->uport;
port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se);
port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
uport->fifosize =
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
if (port->rx_buf && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
/*
* Use krealloc rather than krealloc_array because rx_buf is
* accessed as 1 byte entries as well as 4 byte entries so it's
* not necessarily an array.
*/
port->rx_buf = devm_krealloc(uport->dev, port->rx_buf,
port->rx_fifo_depth * sizeof(u32),
GFP_KERNEL);
if (!port->rx_buf)
return -ENOMEM;
}
return 0;
}
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
disable_irq(uport->irq);
if (uart_console(uport))
return;
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
}
static int qcom_geni_serial_port_setup(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
u32 pin_swap;
int ret;
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
return -ENXIO;
}
qcom_geni_serial_stop_rx(uport);
ret = setup_fifos(port);
if (ret)
return ret;
writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
pin_swap = readl(uport->membase + SE_UART_IO_MACRO_CTRL);
if (port->rx_tx_swap) {
pin_swap &= ~DEFAULT_IO_MACRO_IO2_IO3_MASK;
pin_swap |= IO_MACRO_IO2_IO3_SWAP;
}
if (port->cts_rts_swap) {
pin_swap &= ~DEFAULT_IO_MACRO_IO0_IO1_MASK;
pin_swap |= IO_MACRO_IO0_SEL;
}
/* Configure this register if RX-TX, CTS-RTS pins are swapped */
if (port->rx_tx_swap || port->cts_rts_swap)
writel(pin_swap, uport->membase + SE_UART_IO_MACRO_CTRL);
/*
* Make an unconditional cancel on the main sequencer to reset
* it else we could end up in data loss scenarios.
*/
if (uart_console(uport))
qcom_geni_serial_poll_tx_done(uport);
geni_se_config_packing(&port->se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
false, true, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, port->dev_data->mode);
qcom_geni_serial_start_rx(uport);
port->setup = true;
return 0;
}
static int qcom_geni_serial_startup(struct uart_port *uport)
{
int ret;
struct qcom_geni_serial_port *port = to_dev_port(uport);
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
enable_irq(uport->irq);
return 0;
}
static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk,
unsigned int *clk_div, unsigned int percent_tol)
{
unsigned long freq;
unsigned long div, maxdiv;
u64 mult;
unsigned long offset, abs_tol, achieved;
abs_tol = div_u64((u64)desired_clk * percent_tol, 100);
maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
div = 1;
while (div <= maxdiv) {
mult = (u64)div * desired_clk;
if (mult != (unsigned long)mult)
break;
offset = div * abs_tol;
freq = clk_round_rate(clk, mult - offset);
/* Can only get lower if we're done */
if (freq < mult - offset)
break;
/*
* Re-calculate div in case rounding skipped rates but we
* ended up at a good one, then check for a match.
*/
div = DIV_ROUND_CLOSEST(freq, desired_clk);
achieved = DIV_ROUND_CLOSEST(freq, div);
if (achieved <= desired_clk + abs_tol &&
achieved >= desired_clk - abs_tol) {
*clk_div = div;
return freq;
}
div = DIV_ROUND_UP(freq, desired_clk);
}
return 0;
}
static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
unsigned int sampling_rate, unsigned int *clk_div)
{
unsigned long ser_clk;
unsigned long desired_clk;
desired_clk = baud * sampling_rate;
if (!desired_clk)
return 0;
/*
* try to find a clock rate within 2% tolerance, then within 5%
*/
ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2);
if (!ser_clk)
ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5);
return ser_clk;
}
static void qcom_geni_serial_set_termios(struct uart_port *uport,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int baud;
u32 bits_per_char;
u32 tx_trans_cfg;
u32 tx_parity_cfg;
u32 rx_trans_cfg;
u32 rx_parity_cfg;
u32 stop_bit_len;
unsigned int clk_div;
u32 ser_clk_cfg;
struct qcom_geni_serial_port *port = to_dev_port(uport);
unsigned long clk_rate;
u32 ver, sampling_rate;
unsigned int avg_bw_core;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->baud = baud;
sampling_rate = UART_OVERSAMPLING;
/* Sampling rate is halved for IP versions >= 2.5 */
ver = geni_se_get_qup_hw_version(&port->se);
if (ver >= QUP_SE_VERSION_2_5)
sampling_rate /= 2;
clk_rate = get_clk_div_rate(port->se.clk, baud,
sampling_rate, &clk_div);
if (!clk_rate) {
dev_err(port->se.dev,
"Couldn't find suitable clock rate for %u\n",
baud * sampling_rate);
goto out_restart_rx;
}
dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = %u\n",
baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
port->clk_rate = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
ser_clk_cfg = SER_CLK_EN;
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
/*
* Bump up BW vote on CPU and CORE path as driver supports FIFO mode
* only.
*/
avg_bw_core = (baud > 115200) ? Bps_to_icc(CORE_2X_50_MHZ)
: GENI_DEFAULT_BW;
port->se.icc_paths[GENI_TO_CORE].avg_bw = avg_bw_core;
port->se.icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(baud);
geni_icc_set_bw(&port->se);
/* parity */
tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG);
rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG);
if (termios->c_cflag & PARENB) {
tx_trans_cfg |= UART_TX_PAR_EN;
rx_trans_cfg |= UART_RX_PAR_EN;
tx_parity_cfg |= PAR_CALC_EN;
rx_parity_cfg |= PAR_CALC_EN;
if (termios->c_cflag & PARODD) {
tx_parity_cfg |= PAR_ODD;
rx_parity_cfg |= PAR_ODD;
} else if (termios->c_cflag & CMSPAR) {
tx_parity_cfg |= PAR_SPACE;
rx_parity_cfg |= PAR_SPACE;
} else {
tx_parity_cfg |= PAR_EVEN;
rx_parity_cfg |= PAR_EVEN;
}
} else {
tx_trans_cfg &= ~UART_TX_PAR_EN;
rx_trans_cfg &= ~UART_RX_PAR_EN;
tx_parity_cfg &= ~PAR_CALC_EN;
rx_parity_cfg &= ~PAR_CALC_EN;
}
/* bits per char */
bits_per_char = tty_get_char_size(termios->c_cflag);
/* stop bits */
if (termios->c_cflag & CSTOPB)
stop_bit_len = TX_STOP_BIT_LEN_2;
else
stop_bit_len = TX_STOP_BIT_LEN_1;
/* flow control, clear the CTS_MASK bit if using flow control. */
if (termios->c_cflag & CRTSCTS)
tx_trans_cfg &= ~UART_CTS_MASK;
else
tx_trans_cfg |= UART_CTS_MASK;
if (baud)
uart_update_timeout(uport, termios->c_cflag, baud);
if (!uart_console(uport))
writel(port->loopback,
uport->membase + SE_UART_LOOPBACK_CFG);
writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
out_restart_rx:
qcom_geni_serial_start_rx(uport);
}
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static int qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
if (co->index >= GENI_UART_CONS_PORTS || co->index < 0)
return -ENXIO;
port = get_port_from_line(co->index, true);
if (IS_ERR(port)) {
pr_err("Invalid line %d\n", co->index);
return PTR_ERR(port);
}
uport = &port->uport;
if (unlikely(!uport->membase))
return -ENXIO;
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(uport, co, baud, parity, bits, flow);
}
static void qcom_geni_serial_earlycon_write(struct console *con,
const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
__qcom_geni_serial_console_write(&dev->port, s, n);
}
#ifdef CONFIG_CONSOLE_POLL
static int qcom_geni_serial_earlycon_read(struct console *con,
char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
struct uart_port *uport = &dev->port;
int num_read = 0;
int ch;
while (num_read < n) {
ch = qcom_geni_serial_get_char(uport);
if (ch == NO_POLL_CHAR)
break;
s[num_read++] = ch;
}
return num_read;
}
static void __init qcom_geni_serial_enable_early_read(struct geni_se *se,
struct console *con)
{
geni_se_setup_s_cmd(se, UART_START_READ, 0);
con->read = qcom_geni_serial_earlycon_read;
}
#else
static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
struct console *con) { }
#endif
static struct qcom_geni_private_data earlycon_private_data;
static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
const char *opt)
{
struct uart_port *uport = &dev->port;
u32 tx_trans_cfg;
u32 tx_parity_cfg = 0; /* Disable Tx Parity */
u32 rx_trans_cfg = 0;
u32 rx_parity_cfg = 0; /* Disable Rx Parity */
u32 stop_bit_len = 0; /* Default stop bit length - 1 bit */
u32 bits_per_char;
struct geni_se se;
if (!uport->membase)
return -EINVAL;
uport->private_data = &earlycon_private_data;
memset(&se, 0, sizeof(se));
se.base = uport->membase;
if (geni_se_read_proto(&se) != GENI_SE_UART)
return -ENXIO;
/*
* Ignore Flow control.
* n = 8.
*/
tx_trans_cfg = UART_CTS_MASK;
bits_per_char = BITS_PER_BYTE;
/*
* Make an unconditional cancel on the main sequencer to reset
* it else we could end up in data loss scenarios.
*/
qcom_geni_serial_poll_tx_done(uport);
qcom_geni_serial_abort_rx(uport);
geni_se_config_packing(&se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
false, true, true);
geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
geni_se_select_mode(&se, GENI_SE_FIFO);
writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
dev->con->setup = NULL;
qcom_geni_serial_enable_early_read(&se, dev->con);
return 0;
}
OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart",
qcom_geni_serial_earlycon_setup);
static int __init console_register(struct uart_driver *drv)
{
return uart_register_driver(drv);
}
static void console_unregister(struct uart_driver *drv)
{
uart_unregister_driver(drv);
}
static struct console cons_ops = {
.name = "ttyMSM",
.write = qcom_geni_serial_console_write,
.device = uart_console_device,
.setup = qcom_geni_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &qcom_geni_console_driver,
};
static struct uart_driver qcom_geni_console_driver = {
.owner = THIS_MODULE,
.driver_name = "qcom_geni_console",
.dev_name = "ttyMSM",
.nr = GENI_UART_CONS_PORTS,
.cons = &cons_ops,
};
#else
static int console_register(struct uart_driver *drv)
{
return 0;
}
static void console_unregister(struct uart_driver *drv)
{
}
#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
static struct uart_driver qcom_geni_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "qcom_geni_uart",
.dev_name = "ttyHS",
.nr = GENI_UART_PORTS,
};
static void qcom_geni_serial_pm(struct uart_port *uport,
unsigned int new_state, unsigned int old_state)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
/* If we've never been called, treat it as off */
if (old_state == UART_PM_STATE_UNDEFINED)
old_state = UART_PM_STATE_OFF;
if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
geni_icc_enable(&port->se);
if (port->clk_rate)
dev_pm_opp_set_rate(uport->dev, port->clk_rate);
geni_se_resources_on(&port->se);
} else if (new_state == UART_PM_STATE_OFF &&
old_state == UART_PM_STATE_ON) {
geni_se_resources_off(&port->se);
dev_pm_opp_set_rate(uport->dev, 0);
geni_icc_disable(&port->se);
}
}
static const struct uart_ops qcom_geni_console_pops = {
.tx_empty = qcom_geni_serial_tx_empty,
.stop_tx = qcom_geni_serial_stop_tx_fifo,
.start_tx = qcom_geni_serial_start_tx_fifo,
.stop_rx = qcom_geni_serial_stop_rx_fifo,
.start_rx = qcom_geni_serial_start_rx_fifo,
.set_termios = qcom_geni_serial_set_termios,
.startup = qcom_geni_serial_startup,
.request_port = qcom_geni_serial_request_port,
.config_port = qcom_geni_serial_config_port,
.shutdown = qcom_geni_serial_shutdown,
.type = qcom_geni_serial_get_type,
.set_mctrl = qcom_geni_serial_set_mctrl,
.get_mctrl = qcom_geni_serial_get_mctrl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = qcom_geni_serial_get_char,
.poll_put_char = qcom_geni_serial_poll_put_char,
.poll_init = qcom_geni_serial_port_setup,
#endif
.pm = qcom_geni_serial_pm,
};
static const struct uart_ops qcom_geni_uart_pops = {
.tx_empty = qcom_geni_serial_tx_empty,
.stop_tx = qcom_geni_serial_stop_tx_dma,
.start_tx = qcom_geni_serial_start_tx_dma,
.start_rx = qcom_geni_serial_start_rx_dma,
.stop_rx = qcom_geni_serial_stop_rx_dma,
.set_termios = qcom_geni_serial_set_termios,
.startup = qcom_geni_serial_startup,
.request_port = qcom_geni_serial_request_port,
.config_port = qcom_geni_serial_config_port,
.shutdown = qcom_geni_serial_shutdown,
.type = qcom_geni_serial_get_type,
.set_mctrl = qcom_geni_serial_set_mctrl,
.get_mctrl = qcom_geni_serial_get_mctrl,
.pm = qcom_geni_serial_pm,
};
static int qcom_geni_serial_probe(struct platform_device *pdev)
{
int ret = 0;
int line;
struct qcom_geni_serial_port *port;
struct uart_port *uport;
struct resource *res;
int irq;
struct uart_driver *drv;
const struct qcom_geni_device_data *data;
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -EINVAL;
if (data->console) {
drv = &qcom_geni_console_driver;
line = of_alias_get_id(pdev->dev.of_node, "serial");
} else {
drv = &qcom_geni_uart_driver;
line = of_alias_get_id(pdev->dev.of_node, "serial");
if (line == -ENODEV) /* compat with non-standard aliases */
line = of_alias_get_id(pdev->dev.of_node, "hsuart");
}
port = get_port_from_line(line, data->console);
if (IS_ERR(port)) {
dev_err(&pdev->dev, "Invalid line %d\n", line);
return PTR_ERR(port);
}
uport = &port->uport;
/* Don't allow 2 drivers to access the same port */
if (uport->private_data)
return -ENODEV;
uport->dev = &pdev->dev;
port->dev_data = data;
port->se.dev = &pdev->dev;
port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
port->se.clk = devm_clk_get(&pdev->dev, "se");
if (IS_ERR(port->se.clk)) {
ret = PTR_ERR(port->se.clk);
dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
uport->mapbase = res->start;
port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
if (!data->console) {
port->rx_buf = devm_kzalloc(uport->dev,
DMA_RX_BUF_SIZE, GFP_KERNEL);
if (!port->rx_buf)
return -ENOMEM;
}
ret = geni_icc_get(&port->se, NULL);
if (ret)
return ret;
port->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
port->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
/* Set BW for register access */
ret = geni_icc_set_bw(&port->se);
if (ret)
return ret;
port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
"qcom_geni_serial_%s%d",
uart_console(uport) ? "console" : "uart", uport->line);
if (!port->name)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE);
if (!data->console)
port->wakeup_irq = platform_get_irq_optional(pdev, 1);
if (of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"))
port->rx_tx_swap = true;
if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap"))
port->cts_rts_swap = true;
ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
return ret;
}
port->private_data.drv = drv;
uport->private_data = &port->private_data;
platform_set_drvdata(pdev, port);
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
IRQF_TRIGGER_HIGH, port->name, uport);
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
return ret;
}
ret = uart_add_one_port(drv, uport);
if (ret)
return ret;
if (port->wakeup_irq > 0) {
device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
port->wakeup_irq);
if (ret) {
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, uport);
return ret;
}
}
return 0;
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
{
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_driver *drv = port->private_data.drv;
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
return 0;
}
static int qcom_geni_serial_sys_suspend(struct device *dev)
{
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
struct qcom_geni_private_data *private_data = uport->private_data;
/*
* This is done so we can hit the lowest possible state in suspend
* even with no_console_suspend
*/
if (uart_console(uport)) {
geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ACTIVE_ONLY);
geni_icc_set_bw(&port->se);
}
return uart_suspend_port(private_data->drv, uport);
}
static int qcom_geni_serial_sys_resume(struct device *dev)
{
int ret;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
struct qcom_geni_private_data *private_data = uport->private_data;
ret = uart_resume_port(private_data->drv, uport);
if (uart_console(uport)) {
geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
}
return ret;
}
static int qcom_geni_serial_sys_hib_resume(struct device *dev)
{
int ret = 0;
struct uart_port *uport;
struct qcom_geni_private_data *private_data;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
uport = &port->uport;
private_data = uport->private_data;
if (uart_console(uport)) {
geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
ret = uart_resume_port(private_data->drv, uport);
/*
* For hibernation usecase clients for
* console UART won't call port setup during restore,
* hence call port setup for console uart.
*/
qcom_geni_serial_port_setup(uport);
} else {
/*
* Peripheral register settings are lost during hibernation.
* Update setup flag such that port setup happens again
* during next session. Clients of HS-UART will close and
* open the port during hibernation.
*/
port->setup = false;
}
return ret;
}
static const struct qcom_geni_device_data qcom_geni_console_data = {
.console = true,
.mode = GENI_SE_FIFO,
};
static const struct qcom_geni_device_data qcom_geni_uart_data = {
.console = false,
.mode = GENI_SE_DMA,
};
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
.suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
.freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
.thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
};
static const struct of_device_id qcom_geni_serial_match_table[] = {
{
.compatible = "qcom,geni-debug-uart",
.data = &qcom_geni_console_data,
},
{
.compatible = "qcom,geni-uart",
.data = &qcom_geni_uart_data,
},
{}
};
MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
static struct platform_driver qcom_geni_serial_platform_driver = {
.remove = qcom_geni_serial_remove,
.probe = qcom_geni_serial_probe,
.driver = {
.name = "qcom_geni_serial",
.of_match_table = qcom_geni_serial_match_table,
.pm = &qcom_geni_serial_pm_ops,
},
};
static int __init qcom_geni_serial_init(void)
{
int ret;
ret = console_register(&qcom_geni_console_driver);
if (ret)
return ret;
ret = uart_register_driver(&qcom_geni_uart_driver);
if (ret) {
console_unregister(&qcom_geni_console_driver);
return ret;
}
ret = platform_driver_register(&qcom_geni_serial_platform_driver);
if (ret) {
console_unregister(&qcom_geni_console_driver);
uart_unregister_driver(&qcom_geni_uart_driver);
}
return ret;
}
module_init(qcom_geni_serial_init);
static void __exit qcom_geni_serial_exit(void)
{
platform_driver_unregister(&qcom_geni_serial_platform_driver);
console_unregister(&qcom_geni_console_driver);
uart_unregister_driver(&qcom_geni_uart_driver);
}
module_exit(qcom_geni_serial_exit);
MODULE_DESCRIPTION("Serial driver for GENI based QUP cores");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/qcom_geni_serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <[email protected]>
*
* Adapted for ARM and earlycon:
* Copyright (C) 2014 Linaro Ltd.
* Author: Rob Herring <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <asm/semihost.h>
static void smh_write(struct console *con, const char *s, unsigned n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, smh_putc);
}
static int
__init early_smh_setup(struct earlycon_device *device, const char *opt)
{
device->con->write = smh_write;
return 0;
}
EARLYCON_DECLARE(smh, early_smh_setup);
| linux-master | drivers/tty/serial/earlycon-semihost.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence UART driver (found in Xilinx Zynq)
*
* Copyright (c) 2011 - 2014 Xilinx, Inc.
*
* This driver has originally been pushed by Xilinx using a Zynq-branding. This
* still shows in the naming of this file, the kconfig symbols and some symbols
* in the code.
*/
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/iopoll.h>
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */
#define CDNS_UART_MINOR 0 /* works best with devtmpfs */
#define CDNS_UART_NR_PORTS 16
#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
#define CDNS_UART_REGISTER_SPACE 0x1000
#define TX_TIMEOUT 500000
/* Rx Trigger level */
static int rx_trigger_level = 56;
module_param(rx_trigger_level, uint, 0444);
MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
/* Rx Timeout */
static int rx_timeout = 10;
module_param(rx_timeout, uint, 0444);
MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
/* Register offsets for the UART. */
#define CDNS_UART_CR 0x00 /* Control Register */
#define CDNS_UART_MR 0x04 /* Mode Register */
#define CDNS_UART_IER 0x08 /* Interrupt Enable */
#define CDNS_UART_IDR 0x0C /* Interrupt Disable */
#define CDNS_UART_IMR 0x10 /* Interrupt Mask */
#define CDNS_UART_ISR 0x14 /* Interrupt Status */
#define CDNS_UART_BAUDGEN 0x18 /* Baud Rate Generator */
#define CDNS_UART_RXTOUT 0x1C /* RX Timeout */
#define CDNS_UART_RXWM 0x20 /* RX FIFO Trigger Level */
#define CDNS_UART_MODEMCR 0x24 /* Modem Control */
#define CDNS_UART_MODEMSR 0x28 /* Modem Status */
#define CDNS_UART_SR 0x2C /* Channel Status */
#define CDNS_UART_FIFO 0x30 /* FIFO */
#define CDNS_UART_BAUDDIV 0x34 /* Baud Rate Divider */
#define CDNS_UART_FLOWDEL 0x38 /* Flow Delay */
#define CDNS_UART_IRRX_PWIDTH 0x3C /* IR Min Received Pulse Width */
#define CDNS_UART_IRTX_PWIDTH 0x40 /* IR Transmitted pulse Width */
#define CDNS_UART_TXWM 0x44 /* TX FIFO Trigger Level */
#define CDNS_UART_RXBS 0x48 /* RX FIFO byte status register */
/* Control Register Bit Definitions */
#define CDNS_UART_CR_STOPBRK 0x00000100 /* Stop TX break */
#define CDNS_UART_CR_STARTBRK 0x00000080 /* Set TX break */
#define CDNS_UART_CR_TX_DIS 0x00000020 /* TX disabled. */
#define CDNS_UART_CR_TX_EN 0x00000010 /* TX enabled */
#define CDNS_UART_CR_RX_DIS 0x00000008 /* RX disabled. */
#define CDNS_UART_CR_RX_EN 0x00000004 /* RX enabled */
#define CDNS_UART_CR_TXRST 0x00000002 /* TX logic reset */
#define CDNS_UART_CR_RXRST 0x00000001 /* RX logic reset */
#define CDNS_UART_CR_RST_TO 0x00000040 /* Restart Timeout Counter */
#define CDNS_UART_RXBS_PARITY 0x00000001 /* Parity error status */
#define CDNS_UART_RXBS_FRAMING 0x00000002 /* Framing error status */
#define CDNS_UART_RXBS_BRK 0x00000004 /* Overrun error status */
/*
* Mode Register:
* The mode register (MR) defines the mode of transfer as well as the data
* format. If this register is modified during transmission or reception,
* data validity cannot be guaranteed.
*/
#define CDNS_UART_MR_CLKSEL 0x00000001 /* Pre-scalar selection */
#define CDNS_UART_MR_CHMODE_L_LOOP 0x00000200 /* Local loop back mode */
#define CDNS_UART_MR_CHMODE_NORM 0x00000000 /* Normal mode */
#define CDNS_UART_MR_CHMODE_MASK 0x00000300 /* Mask for mode bits */
#define CDNS_UART_MR_STOPMODE_2_BIT 0x00000080 /* 2 stop bits */
#define CDNS_UART_MR_STOPMODE_1_BIT 0x00000000 /* 1 stop bit */
#define CDNS_UART_MR_PARITY_NONE 0x00000020 /* No parity mode */
#define CDNS_UART_MR_PARITY_MARK 0x00000018 /* Mark parity mode */
#define CDNS_UART_MR_PARITY_SPACE 0x00000010 /* Space parity mode */
#define CDNS_UART_MR_PARITY_ODD 0x00000008 /* Odd parity mode */
#define CDNS_UART_MR_PARITY_EVEN 0x00000000 /* Even parity mode */
#define CDNS_UART_MR_CHARLEN_6_BIT 0x00000006 /* 6 bits data */
#define CDNS_UART_MR_CHARLEN_7_BIT 0x00000004 /* 7 bits data */
#define CDNS_UART_MR_CHARLEN_8_BIT 0x00000000 /* 8 bits data */
/*
* Interrupt Registers:
* Interrupt control logic uses the interrupt enable register (IER) and the
* interrupt disable register (IDR) to set the value of the bits in the
* interrupt mask register (IMR). The IMR determines whether to pass an
* interrupt to the interrupt status register (ISR).
* Writing a 1 to IER Enables an interrupt, writing a 1 to IDR disables an
* interrupt. IMR and ISR are read only, and IER and IDR are write only.
* Reading either IER or IDR returns 0x00.
* All four registers have the same bit definitions.
*/
#define CDNS_UART_IXR_TOUT 0x00000100 /* RX Timeout error interrupt */
#define CDNS_UART_IXR_PARITY 0x00000080 /* Parity error interrupt */
#define CDNS_UART_IXR_FRAMING 0x00000040 /* Framing error interrupt */
#define CDNS_UART_IXR_OVERRUN 0x00000020 /* Overrun error interrupt */
#define CDNS_UART_IXR_TXFULL 0x00000010 /* TX FIFO Full interrupt */
#define CDNS_UART_IXR_TXEMPTY 0x00000008 /* TX FIFO empty interrupt */
#define CDNS_UART_ISR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt */
#define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
#define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
/*
* Do not enable parity error interrupt for the following
* reason: When parity error interrupt is enabled, each Rx
* parity error always results in 2 events. The first one
* being parity error interrupt and the second one with a
* proper Rx interrupt with the incoming data. Disabling
* parity error interrupt ensures better handling of parity
* error events. With this change, for a parity error case, we
* get a Rx interrupt with parity error set in ISR register
* and we still handle parity errors in the desired way.
*/
#define CDNS_UART_RX_IRQS (CDNS_UART_IXR_FRAMING | \
CDNS_UART_IXR_OVERRUN | \
CDNS_UART_IXR_RXTRIG | \
CDNS_UART_IXR_TOUT)
/* Goes in read_status_mask for break detection as the HW doesn't do it*/
#define CDNS_UART_IXR_BRK 0x00002000
#define CDNS_UART_RXBS_SUPPORT BIT(1)
/*
* Modem Control register:
* The read/write Modem Control register controls the interface with the modem
* or data set, or a peripheral device emulating a modem.
*/
#define CDNS_UART_MODEMCR_FCM 0x00000020 /* Automatic flow control mode */
#define CDNS_UART_MODEMCR_RTS 0x00000002 /* Request to send output control */
#define CDNS_UART_MODEMCR_DTR 0x00000001 /* Data Terminal Ready */
/*
* Modem Status register:
* The read/write Modem Status register reports the interface with the modem
* or data set, or a peripheral device emulating a modem.
*/
#define CDNS_UART_MODEMSR_DCD BIT(7) /* Data Carrier Detect */
#define CDNS_UART_MODEMSR_RI BIT(6) /* Ting Indicator */
#define CDNS_UART_MODEMSR_DSR BIT(5) /* Data Set Ready */
#define CDNS_UART_MODEMSR_CTS BIT(4) /* Clear To Send */
/*
* Channel Status Register:
* The channel status register (CSR) is provided to enable the control logic
* to monitor the status of bits in the channel interrupt status register,
* even if these are masked out by the interrupt mask register.
*/
#define CDNS_UART_SR_RXEMPTY 0x00000002 /* RX FIFO empty */
#define CDNS_UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */
#define CDNS_UART_SR_TXFULL 0x00000010 /* TX FIFO full */
#define CDNS_UART_SR_RXTRIG 0x00000001 /* Rx Trigger */
#define CDNS_UART_SR_TACTIVE 0x00000800 /* TX state machine active */
/* baud dividers min/max values */
#define CDNS_UART_BDIV_MIN 4
#define CDNS_UART_BDIV_MAX 255
#define CDNS_UART_CD_MAX 65535
#define UART_AUTOSUSPEND_TIMEOUT 3000
/**
* struct cdns_uart - device data
* @port: Pointer to the UART port
* @uartclk: Reference clock
* @pclk: APB clock
* @cdns_uart_driver: Pointer to UART driver
* @baud: Current baud rate
* @clk_rate_change_nb: Notifier block for clock changes
* @quirks: Flags for RXBS support.
* @cts_override: Modem control state override
*/
struct cdns_uart {
struct uart_port *port;
struct clk *uartclk;
struct clk *pclk;
struct uart_driver *cdns_uart_driver;
unsigned int baud;
struct notifier_block clk_rate_change_nb;
u32 quirks;
bool cts_override;
};
struct cdns_platform_data {
u32 quirks;
};
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
clk_rate_change_nb)
/**
* cdns_uart_handle_rx - Handle the received bytes along with Rx errors.
* @dev_id: Id of the UART port
* @isrstatus: The interrupt status register value as read
* Return: None
*/
static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
{
struct uart_port *port = (struct uart_port *)dev_id;
struct cdns_uart *cdns_uart = port->private_data;
unsigned int data;
unsigned int rxbs_status = 0;
unsigned int status_mask;
unsigned int framerrprocessed = 0;
char status = TTY_NORMAL;
bool is_rxbs_support;
is_rxbs_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
while ((readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
if (is_rxbs_support)
rxbs_status = readl(port->membase + CDNS_UART_RXBS);
data = readl(port->membase + CDNS_UART_FIFO);
port->icount.rx++;
/*
* There is no hardware break detection in Zynq, so we interpret
* framing error with all-zeros data as a break sequence.
* Most of the time, there's another non-zero byte at the
* end of the sequence.
*/
if (!is_rxbs_support && (isrstatus & CDNS_UART_IXR_FRAMING)) {
if (!data) {
port->read_status_mask |= CDNS_UART_IXR_BRK;
framerrprocessed = 1;
continue;
}
}
if (is_rxbs_support && (rxbs_status & CDNS_UART_RXBS_BRK)) {
port->icount.brk++;
status = TTY_BREAK;
if (uart_handle_break(port))
continue;
}
isrstatus &= port->read_status_mask;
isrstatus &= ~port->ignore_status_mask;
status_mask = port->read_status_mask;
status_mask &= ~port->ignore_status_mask;
if (data &&
(port->read_status_mask & CDNS_UART_IXR_BRK)) {
port->read_status_mask &= ~CDNS_UART_IXR_BRK;
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (uart_handle_sysrq_char(port, data))
continue;
if (is_rxbs_support) {
if ((rxbs_status & CDNS_UART_RXBS_PARITY)
&& (status_mask & CDNS_UART_IXR_PARITY)) {
port->icount.parity++;
status = TTY_PARITY;
}
if ((rxbs_status & CDNS_UART_RXBS_FRAMING)
&& (status_mask & CDNS_UART_IXR_PARITY)) {
port->icount.frame++;
status = TTY_FRAME;
}
} else {
if (isrstatus & CDNS_UART_IXR_PARITY) {
port->icount.parity++;
status = TTY_PARITY;
}
if ((isrstatus & CDNS_UART_IXR_FRAMING) &&
!framerrprocessed) {
port->icount.frame++;
status = TTY_FRAME;
}
}
if (isrstatus & CDNS_UART_IXR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(&port->state->port, 0,
TTY_OVERRUN);
}
tty_insert_flip_char(&port->state->port, data, status);
isrstatus = 0;
}
tty_flip_buffer_push(&port->state->port);
}
/**
* cdns_uart_handle_tx - Handle the bytes to be Txed.
* @dev_id: Id of the UART port
* Return: None
*/
static void cdns_uart_handle_tx(void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
struct circ_buf *xmit = &port->state->xmit;
unsigned int numbytes;
if (uart_circ_empty(xmit)) {
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
return;
}
numbytes = port->fifosize;
while (numbytes && !uart_circ_empty(xmit) &&
!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO);
uart_xmit_advance(port, 1);
numbytes--;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
/**
* cdns_uart_isr - Interrupt handler
* @irq: Irq number
* @dev_id: Id of the port
*
* Return: IRQHANDLED
*/
static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int isrstatus;
spin_lock(&port->lock);
/* Read the interrupt status register to determine which
* interrupt(s) is/are active and clear them.
*/
isrstatus = readl(port->membase + CDNS_UART_ISR);
writel(isrstatus, port->membase + CDNS_UART_ISR);
if (isrstatus & CDNS_UART_IXR_TXEMPTY) {
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
isrstatus &= port->read_status_mask;
isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
*/
if (isrstatus & CDNS_UART_IXR_RXMASK &&
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
/**
* cdns_uart_calc_baud_divs - Calculate baud rate divisors
* @clk: UART module input clock
* @baud: Desired baud rate
* @rbdiv: BDIV value (return value)
* @rcd: CD value (return value)
* @div8: Value for clk_sel bit in mod (return value)
* Return: baud rate, requested baud when possible, or actual baud when there
* was too much error, zero if no valid divisors are found.
*
* Formula to obtain baud rate is
* baud_tx/rx rate = clk/CD * (BDIV + 1)
* input_clk = (Uart User Defined Clock or Apb Clock)
* depends on UCLKEN in MR Reg
* clk = input_clk or input_clk/8;
* depends on CLKS in MR reg
* CD and BDIV depends on values in
* baud rate generate register
* baud rate clock divisor register
*/
static unsigned int cdns_uart_calc_baud_divs(unsigned int clk,
unsigned int baud, u32 *rbdiv, u32 *rcd, int *div8)
{
u32 cd, bdiv;
unsigned int calc_baud;
unsigned int bestbaud = 0;
unsigned int bauderror;
unsigned int besterror = ~0;
if (baud < clk / ((CDNS_UART_BDIV_MAX + 1) * CDNS_UART_CD_MAX)) {
*div8 = 1;
clk /= 8;
} else {
*div8 = 0;
}
for (bdiv = CDNS_UART_BDIV_MIN; bdiv <= CDNS_UART_BDIV_MAX; bdiv++) {
cd = DIV_ROUND_CLOSEST(clk, baud * (bdiv + 1));
if (cd < 1 || cd > CDNS_UART_CD_MAX)
continue;
calc_baud = clk / (cd * (bdiv + 1));
if (baud > calc_baud)
bauderror = baud - calc_baud;
else
bauderror = calc_baud - baud;
if (besterror > bauderror) {
*rbdiv = bdiv;
*rcd = cd;
bestbaud = calc_baud;
besterror = bauderror;
}
}
/* use the values when percent error is acceptable */
if (((besterror * 100) / baud) < 3)
bestbaud = baud;
return bestbaud;
}
/**
* cdns_uart_set_baud_rate - Calculate and set the baud rate
* @port: Handle to the uart port structure
* @baud: Baud rate to set
* Return: baud rate, requested baud when possible, or actual baud when there
* was too much error, zero if no valid divisors are found.
*/
static unsigned int cdns_uart_set_baud_rate(struct uart_port *port,
unsigned int baud)
{
unsigned int calc_baud;
u32 cd = 0, bdiv = 0;
u32 mreg;
int div8;
struct cdns_uart *cdns_uart = port->private_data;
calc_baud = cdns_uart_calc_baud_divs(port->uartclk, baud, &bdiv, &cd,
&div8);
/* Write new divisors to hardware */
mreg = readl(port->membase + CDNS_UART_MR);
if (div8)
mreg |= CDNS_UART_MR_CLKSEL;
else
mreg &= ~CDNS_UART_MR_CLKSEL;
writel(mreg, port->membase + CDNS_UART_MR);
writel(cd, port->membase + CDNS_UART_BAUDGEN);
writel(bdiv, port->membase + CDNS_UART_BAUDDIV);
cdns_uart->baud = baud;
return calc_baud;
}
#ifdef CONFIG_COMMON_CLK
/**
* cdns_uart_clk_notifier_cb - Clock notifier callback
* @nb: Notifier block
* @event: Notify event
* @data: Notifier data
* Return: NOTIFY_OK or NOTIFY_DONE on success, NOTIFY_BAD on error.
*/
static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
u32 ctrl_reg;
struct uart_port *port;
int locked = 0;
struct clk_notifier_data *ndata = data;
struct cdns_uart *cdns_uart = to_cdns_uart(nb);
unsigned long flags;
port = cdns_uart->port;
if (port->suspended)
return NOTIFY_OK;
switch (event) {
case PRE_RATE_CHANGE:
{
u32 bdiv, cd;
int div8;
/*
* Find out if current baud-rate can be achieved with new clock
* frequency.
*/
if (!cdns_uart_calc_baud_divs(ndata->new_rate, cdns_uart->baud,
&bdiv, &cd, &div8)) {
dev_warn(port->dev, "clock rate change rejected\n");
return NOTIFY_BAD;
}
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
return NOTIFY_OK;
}
case POST_RATE_CHANGE:
/*
* Set clk dividers to generate correct baud with new clock
* frequency.
*/
spin_lock_irqsave(&cdns_uart->port->lock, flags);
locked = 1;
port->uartclk = ndata->new_rate;
cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port,
cdns_uart->baud);
fallthrough;
case ABORT_RATE_CHANGE:
if (!locked)
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/*
* Clear the RX disable and TX disable bits and then set the TX
* enable bit and RX enable bit to enable the transmitter and
* receiver.
*/
writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
#endif
/**
* cdns_uart_start_tx - Start transmitting bytes
* @port: Handle to the uart port structure
*/
static void cdns_uart_start_tx(struct uart_port *port)
{
unsigned int status;
if (uart_tx_stopped(port))
return;
/*
* Set the TX enable bit and clear the TX disable bit to enable the
* transmitter.
*/
status = readl(port->membase + CDNS_UART_CR);
status &= ~CDNS_UART_CR_TX_DIS;
status |= CDNS_UART_CR_TX_EN;
writel(status, port->membase + CDNS_UART_CR);
if (uart_circ_empty(&port->state->xmit))
return;
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR);
cdns_uart_handle_tx(port);
/* Enable the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER);
}
/**
* cdns_uart_stop_tx - Stop TX
* @port: Handle to the uart port structure
*/
static void cdns_uart_stop_tx(struct uart_port *port)
{
unsigned int regval;
regval = readl(port->membase + CDNS_UART_CR);
regval |= CDNS_UART_CR_TX_DIS;
/* Disable the transmitter */
writel(regval, port->membase + CDNS_UART_CR);
}
/**
* cdns_uart_stop_rx - Stop RX
* @port: Handle to the uart port structure
*/
static void cdns_uart_stop_rx(struct uart_port *port)
{
unsigned int regval;
/* Disable RX IRQs */
writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IDR);
/* Disable the receiver */
regval = readl(port->membase + CDNS_UART_CR);
regval |= CDNS_UART_CR_RX_DIS;
writel(regval, port->membase + CDNS_UART_CR);
}
/**
* cdns_uart_tx_empty - Check whether TX is empty
* @port: Handle to the uart port structure
*
* Return: TIOCSER_TEMT on success, 0 otherwise
*/
static unsigned int cdns_uart_tx_empty(struct uart_port *port)
{
unsigned int status;
status = readl(port->membase + CDNS_UART_SR) &
(CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
}
/**
* cdns_uart_break_ctl - Based on the input ctl we have to start or stop
* transmitting char breaks
* @port: Handle to the uart port structure
* @ctl: Value based on which start or stop decision is taken
*/
static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned int status;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
status = readl(port->membase + CDNS_UART_CR);
if (ctl == -1)
writel(CDNS_UART_CR_STARTBRK | status,
port->membase + CDNS_UART_CR);
else {
if ((status & CDNS_UART_CR_STOPBRK) == 0)
writel(CDNS_UART_CR_STOPBRK | status,
port->membase + CDNS_UART_CR);
}
spin_unlock_irqrestore(&port->lock, flags);
}
/**
* cdns_uart_set_termios - termios operations, handling data length, parity,
* stop bits, flow control, baud rate
* @port: Handle to the uart port structure
* @termios: Handle to the input termios structure
* @old: Values of the previously saved termios structure
*/
static void cdns_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg;
spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
/*
* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
* min and max baud should be calculated here based on port->uartclk.
* this way we get a valid baud and can safely call set_baud()
*/
minbaud = port->uartclk /
((CDNS_UART_BDIV_MAX + 1) * CDNS_UART_CD_MAX * 8);
maxbaud = port->uartclk / (CDNS_UART_BDIV_MIN + 1);
baud = uart_get_baud_rate(port, termios, old, minbaud, maxbaud);
baud = cdns_uart_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/*
* Clear the RX disable and TX disable bits and then set the TX enable
* bit and RX enable bit to enable the transmitter and receiver.
*/
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
port->ignore_status_mask = 0;
if (termios->c_iflag & INPCK)
port->read_status_mask |= CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN;
/* ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_TOUT | CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN;
mode_reg = readl(port->membase + CDNS_UART_MR);
/* Handling Data Size */
switch (termios->c_cflag & CSIZE) {
case CS6:
cval |= CDNS_UART_MR_CHARLEN_6_BIT;
break;
case CS7:
cval |= CDNS_UART_MR_CHARLEN_7_BIT;
break;
default:
case CS8:
cval |= CDNS_UART_MR_CHARLEN_8_BIT;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
break;
}
/* Handling Parity and Stop Bits length */
if (termios->c_cflag & CSTOPB)
cval |= CDNS_UART_MR_STOPMODE_2_BIT; /* 2 STOP bits */
else
cval |= CDNS_UART_MR_STOPMODE_1_BIT; /* 1 STOP bit */
if (termios->c_cflag & PARENB) {
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
cval |= CDNS_UART_MR_PARITY_MARK;
else
cval |= CDNS_UART_MR_PARITY_SPACE;
} else {
if (termios->c_cflag & PARODD)
cval |= CDNS_UART_MR_PARITY_ODD;
else
cval |= CDNS_UART_MR_PARITY_EVEN;
}
} else {
cval |= CDNS_UART_MR_PARITY_NONE;
}
cval |= mode_reg & 1;
writel(cval, port->membase + CDNS_UART_MR);
cval = readl(port->membase + CDNS_UART_MODEMCR);
if (termios->c_cflag & CRTSCTS)
cval |= CDNS_UART_MODEMCR_FCM;
else
cval &= ~CDNS_UART_MODEMCR_FCM;
writel(cval, port->membase + CDNS_UART_MODEMCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/**
* cdns_uart_startup - Called when an application opens a cdns_uart port
* @port: Handle to the uart port structure
*
* Return: 0 on success, negative errno otherwise
*/
static int cdns_uart_startup(struct uart_port *port)
{
struct cdns_uart *cdns_uart = port->private_data;
bool is_brk_support;
int ret;
unsigned long flags;
unsigned int status = 0;
is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
port->membase + CDNS_UART_CR);
/* Set the Control Register with TX/RX Enable, TX/RX Reset,
* no break chars.
*/
writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
port->membase + CDNS_UART_CR);
while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/*
* Clear the RX disable bit and then set the RX enable bit to enable
* the receiver.
*/
status = readl(port->membase + CDNS_UART_CR);
status &= ~CDNS_UART_CR_RX_DIS;
status |= CDNS_UART_CR_RX_EN;
writel(status, port->membase + CDNS_UART_CR);
/* Set the Mode Register with normal mode,8 data bits,1 stop bit,
* no parity.
*/
writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT
| CDNS_UART_MR_PARITY_NONE | CDNS_UART_MR_CHARLEN_8_BIT,
port->membase + CDNS_UART_MR);
/*
* Set the RX FIFO Trigger level to use most of the FIFO, but it
* can be tuned with a module parameter
*/
writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/*
* Receive Timeout register is enabled but it
* can be tuned with a module parameter
*/
writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
/* Clear out any pending interrupts before enabling them */
writel(readl(port->membase + CDNS_UART_ISR),
port->membase + CDNS_UART_ISR);
spin_unlock_irqrestore(&port->lock, flags);
ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
if (ret) {
dev_err(port->dev, "request_irq '%d' failed with %d\n",
port->irq, ret);
return ret;
}
/* Set the Interrupt Registers with desired interrupts */
if (is_brk_support)
writel(CDNS_UART_RX_IRQS | CDNS_UART_IXR_BRK,
port->membase + CDNS_UART_IER);
else
writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER);
return 0;
}
/**
* cdns_uart_shutdown - Called when an application closes a cdns_uart port
* @port: Handle to the uart port structure
*/
static void cdns_uart_shutdown(struct uart_port *port)
{
int status;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
status = readl(port->membase + CDNS_UART_IMR);
writel(status, port->membase + CDNS_UART_IDR);
writel(0xffffffff, port->membase + CDNS_UART_ISR);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&port->lock, flags);
free_irq(port->irq, port);
}
/**
* cdns_uart_type - Set UART type to cdns_uart port
* @port: Handle to the uart port structure
*
* Return: string on success, NULL otherwise
*/
static const char *cdns_uart_type(struct uart_port *port)
{
return port->type == PORT_XUARTPS ? CDNS_UART_NAME : NULL;
}
/**
* cdns_uart_verify_port - Verify the port params
* @port: Handle to the uart port structure
* @ser: Handle to the structure whose members are compared
*
* Return: 0 on success, negative errno otherwise.
*/
static int cdns_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (ser->type != PORT_UNKNOWN && ser->type != PORT_XUARTPS)
return -EINVAL;
if (port->irq != ser->irq)
return -EINVAL;
if (ser->io_type != UPIO_MEM)
return -EINVAL;
if (port->iobase != ser->port)
return -EINVAL;
if (ser->hub6 != 0)
return -EINVAL;
return 0;
}
/**
* cdns_uart_request_port - Claim the memory region attached to cdns_uart port,
* called when the driver adds a cdns_uart port via
* uart_add_one_port()
* @port: Handle to the uart port structure
*
* Return: 0 on success, negative errno otherwise.
*/
static int cdns_uart_request_port(struct uart_port *port)
{
if (!request_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE,
CDNS_UART_NAME)) {
return -ENOMEM;
}
port->membase = ioremap(port->mapbase, CDNS_UART_REGISTER_SPACE);
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
release_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE);
return -ENOMEM;
}
return 0;
}
/**
* cdns_uart_release_port - Release UART port
* @port: Handle to the uart port structure
*
* Release the memory region attached to a cdns_uart port. Called when the
* driver removes a cdns_uart port via uart_remove_one_port().
*/
static void cdns_uart_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, CDNS_UART_REGISTER_SPACE);
iounmap(port->membase);
port->membase = NULL;
}
/**
* cdns_uart_config_port - Configure UART port
* @port: Handle to the uart port structure
* @flags: If any
*/
static void cdns_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE && cdns_uart_request_port(port) == 0)
port->type = PORT_XUARTPS;
}
/**
* cdns_uart_get_mctrl - Get the modem control state
* @port: Handle to the uart port structure
*
* Return: the modem control state
*/
static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
{
u32 val;
unsigned int mctrl = 0;
struct cdns_uart *cdns_uart_data = port->private_data;
if (cdns_uart_data->cts_override)
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
val = readl(port->membase + CDNS_UART_MODEMSR);
if (val & CDNS_UART_MODEMSR_CTS)
mctrl |= TIOCM_CTS;
if (val & CDNS_UART_MODEMSR_DSR)
mctrl |= TIOCM_DSR;
if (val & CDNS_UART_MODEMSR_RI)
mctrl |= TIOCM_RNG;
if (val & CDNS_UART_MODEMSR_DCD)
mctrl |= TIOCM_CAR;
return mctrl;
}
static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val;
u32 mode_reg;
struct cdns_uart *cdns_uart_data = port->private_data;
if (cdns_uart_data->cts_override)
return;
val = readl(port->membase + CDNS_UART_MODEMCR);
mode_reg = readl(port->membase + CDNS_UART_MR);
val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
mode_reg &= ~CDNS_UART_MR_CHMODE_MASK;
if (mctrl & TIOCM_RTS)
val |= CDNS_UART_MODEMCR_RTS;
if (mctrl & TIOCM_DTR)
val |= CDNS_UART_MODEMCR_DTR;
if (mctrl & TIOCM_LOOP)
mode_reg |= CDNS_UART_MR_CHMODE_L_LOOP;
else
mode_reg |= CDNS_UART_MR_CHMODE_NORM;
writel(val, port->membase + CDNS_UART_MODEMCR);
writel(mode_reg, port->membase + CDNS_UART_MR);
}
#ifdef CONFIG_CONSOLE_POLL
static int cdns_uart_poll_get_char(struct uart_port *port)
{
int c;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Check if FIFO is empty */
if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
c = NO_POLL_CHAR;
else /* Read a character */
c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
spin_unlock_irqrestore(&port->lock, flags);
return c;
}
static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Wait until FIFO is empty */
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
/* Write a character */
writel(c, port->membase + CDNS_UART_FIFO);
/* Wait until FIFO is empty */
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
spin_unlock_irqrestore(&port->lock, flags);
}
#endif
static void cdns_uart_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
switch (state) {
case UART_PM_STATE_OFF:
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
break;
default:
pm_runtime_get_sync(port->dev);
break;
}
}
static const struct uart_ops cdns_uart_ops = {
.set_mctrl = cdns_uart_set_mctrl,
.get_mctrl = cdns_uart_get_mctrl,
.start_tx = cdns_uart_start_tx,
.stop_tx = cdns_uart_stop_tx,
.stop_rx = cdns_uart_stop_rx,
.tx_empty = cdns_uart_tx_empty,
.break_ctl = cdns_uart_break_ctl,
.set_termios = cdns_uart_set_termios,
.startup = cdns_uart_startup,
.shutdown = cdns_uart_shutdown,
.pm = cdns_uart_pm,
.type = cdns_uart_type,
.verify_port = cdns_uart_verify_port,
.request_port = cdns_uart_request_port,
.release_port = cdns_uart_release_port,
.config_port = cdns_uart_config_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = cdns_uart_poll_get_char,
.poll_put_char = cdns_uart_poll_put_char,
#endif
};
static struct uart_driver cdns_uart_uart_driver;
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/**
* cdns_uart_console_putchar - write the character to the FIFO buffer
* @port: Handle to the uart port structure
* @ch: Character to be written
*/
static void cdns_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
unsigned int ctrl_reg;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
while (1) {
ctrl_reg = readl(port->membase + CDNS_UART_CR);
if (!(ctrl_reg & CDNS_UART_CR_TX_DIS))
break;
if (time_after(jiffies, timeout)) {
dev_warn(port->dev,
"timeout waiting for Enable\n");
return;
}
cpu_relax();
}
timeout = jiffies + msecs_to_jiffies(1000);
while (1) {
ctrl_reg = readl(port->membase + CDNS_UART_SR);
if (!(ctrl_reg & CDNS_UART_SR_TXFULL))
break;
if (time_after(jiffies, timeout)) {
dev_warn(port->dev,
"timeout waiting for TX fifo\n");
return;
}
cpu_relax();
}
writel(ch, port->membase + CDNS_UART_FIFO);
}
static void cdns_early_write(struct console *con, const char *s,
unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, cdns_uart_console_putchar);
}
static int __init cdns_early_console_setup(struct earlycon_device *device,
const char *opt)
{
struct uart_port *port = &device->port;
if (!port->membase)
return -ENODEV;
/* initialise control register */
writel(CDNS_UART_CR_TX_EN|CDNS_UART_CR_TXRST|CDNS_UART_CR_RXRST,
port->membase + CDNS_UART_CR);
/* only set baud if specified on command line - otherwise
* assume it has been initialized by a boot loader.
*/
if (port->uartclk && device->baud) {
u32 cd = 0, bdiv = 0;
u32 mr;
int div8;
cdns_uart_calc_baud_divs(port->uartclk, device->baud,
&bdiv, &cd, &div8);
mr = CDNS_UART_MR_PARITY_NONE;
if (div8)
mr |= CDNS_UART_MR_CLKSEL;
writel(mr, port->membase + CDNS_UART_MR);
writel(cd, port->membase + CDNS_UART_BAUDGEN);
writel(bdiv, port->membase + CDNS_UART_BAUDDIV);
}
device->con->write = cdns_early_write;
return 0;
}
OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
/* Static pointer to console port */
static struct uart_port *console_port;
/**
* cdns_uart_console_write - perform write operation
* @co: Console handle
* @s: Pointer to character array
* @count: No of characters
*/
static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = console_port;
unsigned long flags;
unsigned int imr, ctrl;
int locked = 1;
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
imr = readl(port->membase + CDNS_UART_IMR);
writel(imr, port->membase + CDNS_UART_IDR);
/*
* Make sure that the tx part is enabled. Set the TX enable bit and
* clear the TX disable bit to enable the transmitter.
*/
ctrl = readl(port->membase + CDNS_UART_CR);
ctrl &= ~CDNS_UART_CR_TX_DIS;
ctrl |= CDNS_UART_CR_TX_EN;
writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
while (cdns_uart_tx_empty(port) != TIOCSER_TEMT)
cpu_relax();
/* restore interrupt state */
writel(imr, port->membase + CDNS_UART_IER);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
/**
* cdns_uart_console_setup - Initialize the uart to default config
* @co: Console handle
* @options: Initial settings of uart
*
* Return: 0 on success, negative errno otherwise.
*/
static int cdns_uart_console_setup(struct console *co, char *options)
{
struct uart_port *port = console_port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
unsigned long time_out;
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
co->index);
return -ENODEV;
}
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
/* Wait for tx_empty before setting up the console */
time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
while (time_before(jiffies, time_out) &&
cdns_uart_tx_empty(port) != TIOCSER_TEMT)
cpu_relax();
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console cdns_uart_console = {
.name = CDNS_UART_TTY_NAME,
.write = cdns_uart_console_write,
.device = uart_console_device,
.setup = cdns_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */
.data = &cdns_uart_uart_driver,
};
#endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */
#ifdef CONFIG_PM_SLEEP
/**
* cdns_uart_suspend - suspend event
* @device: Pointer to the device structure
*
* Return: 0
*/
static int cdns_uart_suspend(struct device *device)
{
struct uart_port *port = dev_get_drvdata(device);
struct cdns_uart *cdns_uart = port->private_data;
int may_wake;
may_wake = device_may_wakeup(device);
if (console_suspend_enabled && uart_console(port) && may_wake) {
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Empty the receive FIFO 1st before making changes */
while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY))
readl(port->membase + CDNS_UART_FIFO);
/* set RX trigger level to 1 */
writel(1, port->membase + CDNS_UART_RXWM);
/* disable RX timeout interrups */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* Call the API provided in serial_core.c file which handles
* the suspend.
*/
return uart_suspend_port(cdns_uart->cdns_uart_driver, port);
}
/**
* cdns_uart_resume - Resume after a previous suspend
* @device: Pointer to the device structure
*
* Return: 0
*/
static int cdns_uart_resume(struct device *device)
{
struct uart_port *port = dev_get_drvdata(device);
struct cdns_uart *cdns_uart = port->private_data;
unsigned long flags;
u32 ctrl_reg;
int may_wake;
int ret;
may_wake = device_may_wakeup(device);
if (console_suspend_enabled && uart_console(port) && !may_wake) {
ret = clk_enable(cdns_uart->pclk);
if (ret)
return ret;
ret = clk_enable(cdns_uart->uartclk);
if (ret) {
clk_disable(cdns_uart->pclk);
return ret;
}
spin_lock_irqsave(&port->lock, flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/* restore rx timeout value */
writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
/* Enable Tx/Rx */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
clk_disable(cdns_uart->uartclk);
clk_disable(cdns_uart->pclk);
spin_unlock_irqrestore(&port->lock, flags);
} else {
spin_lock_irqsave(&port->lock, flags);
/* restore original rx trigger level */
writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/* enable RX timeout interrupt */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
spin_unlock_irqrestore(&port->lock, flags);
}
return uart_resume_port(cdns_uart->cdns_uart_driver, port);
}
#endif /* ! CONFIG_PM_SLEEP */
static int __maybe_unused cdns_runtime_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct cdns_uart *cdns_uart = port->private_data;
clk_disable(cdns_uart->uartclk);
clk_disable(cdns_uart->pclk);
return 0;
};
static int __maybe_unused cdns_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct cdns_uart *cdns_uart = port->private_data;
int ret;
ret = clk_enable(cdns_uart->pclk);
if (ret)
return ret;
ret = clk_enable(cdns_uart->uartclk);
if (ret) {
clk_disable(cdns_uart->pclk);
return ret;
}
return 0;
};
static const struct dev_pm_ops cdns_uart_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cdns_uart_suspend, cdns_uart_resume)
SET_RUNTIME_PM_OPS(cdns_runtime_suspend,
cdns_runtime_resume, NULL)
};
static const struct cdns_platform_data zynqmp_uart_def = {
.quirks = CDNS_UART_RXBS_SUPPORT, };
/* Match table for of_platform binding */
static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{ .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
{ .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
{}
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
/* Temporary variable for storing number of instances */
static int instances;
/**
* cdns_uart_probe - Platform driver probe
* @pdev: Pointer to the platform device structure
*
* Return: 0 on success, negative errno otherwise
*/
static int cdns_uart_probe(struct platform_device *pdev)
{
int rc, id, irq;
struct uart_port *port;
struct resource *res;
struct cdns_uart *cdns_uart_data;
const struct of_device_id *match;
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
GFP_KERNEL);
if (!cdns_uart_data)
return -ENOMEM;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
/* Look for a serialN alias */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (id < 0)
id = 0;
if (id >= CDNS_UART_NR_PORTS) {
dev_err(&pdev->dev, "Cannot get uart_port structure\n");
return -ENODEV;
}
if (!cdns_uart_uart_driver.state) {
cdns_uart_uart_driver.owner = THIS_MODULE;
cdns_uart_uart_driver.driver_name = CDNS_UART_NAME;
cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME;
cdns_uart_uart_driver.major = CDNS_UART_MAJOR;
cdns_uart_uart_driver.minor = CDNS_UART_MINOR;
cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
cdns_uart_uart_driver.cons = &cdns_uart_console;
#endif
rc = uart_register_driver(&cdns_uart_uart_driver);
if (rc < 0) {
dev_err(&pdev->dev, "Failed to register driver\n");
return rc;
}
}
cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver;
match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
if (match && match->data) {
const struct cdns_platform_data *data = match->data;
cdns_uart_data->quirks = data->quirks;
}
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk");
if (PTR_ERR(cdns_uart_data->pclk) == -EPROBE_DEFER) {
rc = PTR_ERR(cdns_uart_data->pclk);
goto err_out_unregister_driver;
}
if (IS_ERR(cdns_uart_data->pclk)) {
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk");
if (IS_ERR(cdns_uart_data->pclk)) {
rc = PTR_ERR(cdns_uart_data->pclk);
goto err_out_unregister_driver;
}
dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n");
}
cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk");
if (PTR_ERR(cdns_uart_data->uartclk) == -EPROBE_DEFER) {
rc = PTR_ERR(cdns_uart_data->uartclk);
goto err_out_unregister_driver;
}
if (IS_ERR(cdns_uart_data->uartclk)) {
cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(cdns_uart_data->uartclk)) {
rc = PTR_ERR(cdns_uart_data->uartclk);
goto err_out_unregister_driver;
}
dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n");
}
rc = clk_prepare_enable(cdns_uart_data->pclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
goto err_out_unregister_driver;
}
rc = clk_prepare_enable(cdns_uart_data->uartclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto err_out_clk_dis_pclk;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
rc = -ENODEV;
goto err_out_clk_disable;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
rc = irq;
goto err_out_clk_disable;
}
#ifdef CONFIG_COMMON_CLK
cdns_uart_data->clk_rate_change_nb.notifier_call =
cdns_uart_clk_notifier_cb;
if (clk_notifier_register(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb))
dev_warn(&pdev->dev, "Unable to register clock notifier.\n");
#endif
/* At this point, we've got an empty uart_port struct, initialize it */
spin_lock_init(&port->lock);
port->type = PORT_UNKNOWN;
port->iotype = UPIO_MEM32;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &cdns_uart_ops;
port->fifosize = CDNS_UART_FIFO_SIZE;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
port->line = id;
/*
* Register the port.
* This function also registers this device with the tty layer
* and triggers invocation of the config_port() entry point.
*/
port->mapbase = res->start;
port->irq = irq;
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
device_init_wakeup(port->dev, true);
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/*
* If console hasn't been found yet try to assign this port
* because it is required to be assigned for console setup function.
* If register_console() don't assign value, then console_port pointer
* is cleanup.
*/
if (!console_port) {
cdns_uart_console.index = id;
console_port = port;
}
#endif
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
if (rc) {
dev_err(&pdev->dev,
"uart_add_one_port() failed; err=%i\n", rc);
goto err_out_pm_disable;
}
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/* This is not port which is used for console that's why clean it up */
if (console_port == port &&
!console_is_registered(cdns_uart_uart_driver.cons)) {
console_port = NULL;
cdns_uart_console.index = -1;
}
#endif
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
"cts-override");
instances++;
return 0;
err_out_pm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb);
#endif
err_out_clk_disable:
clk_disable_unprepare(cdns_uart_data->uartclk);
err_out_clk_dis_pclk:
clk_disable_unprepare(cdns_uart_data->pclk);
err_out_unregister_driver:
if (!instances)
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return rc;
}
/**
* cdns_uart_remove - called when the platform driver is unregistered
* @pdev: Pointer to the platform device structure
*
* Return: 0 on success, negative errno otherwise
*/
static int cdns_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct cdns_uart *cdns_uart_data = port->private_data;
/* Remove the cdns_uart port from the serial core */
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb);
#endif
uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
port->mapbase = 0;
clk_disable_unprepare(cdns_uart_data->uartclk);
clk_disable_unprepare(cdns_uart_data->pclk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
if (console_port == port)
console_port = NULL;
#endif
if (!--instances)
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return 0;
}
static struct platform_driver cdns_uart_platform_driver = {
.probe = cdns_uart_probe,
.remove = cdns_uart_remove,
.driver = {
.name = CDNS_UART_NAME,
.of_match_table = cdns_uart_of_match,
.pm = &cdns_uart_dev_pm_ops,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
},
};
static int __init cdns_uart_init(void)
{
/* Register the platform driver */
return platform_driver_register(&cdns_uart_platform_driver);
}
static void __exit cdns_uart_exit(void)
{
/* Unregister the platform driver */
platform_driver_unregister(&cdns_uart_platform_driver);
}
arch_initcall(cdns_uart_init);
module_exit(cdns_uart_exit);
MODULE_DESCRIPTION("Driver for Cadence UART");
MODULE_AUTHOR("Xilinx Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/xilinx_uartps.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* altera_jtaguart.c -- Altera JTAG UART driver
*
* Based on mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <[email protected]>
* (C) Copyright 2008, Thomas Chou <[email protected]>
* (C) Copyright 2010, Tobias Klauser <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/of.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/altera_jtaguart.h>
#define DRV_NAME "altera_jtaguart"
/*
* Altera JTAG UART register definitions according to the Altera JTAG UART
* datasheet: https://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
*/
#define ALTERA_JTAGUART_SIZE 8
#define ALTERA_JTAGUART_DATA_REG 0
#define ALTERA_JTAGUART_DATA_DATA_MSK 0x000000FF
#define ALTERA_JTAGUART_DATA_RVALID_MSK 0x00008000
#define ALTERA_JTAGUART_DATA_RAVAIL_MSK 0xFFFF0000
#define ALTERA_JTAGUART_DATA_RAVAIL_OFF 16
#define ALTERA_JTAGUART_CONTROL_REG 4
#define ALTERA_JTAGUART_CONTROL_RE_MSK 0x00000001
#define ALTERA_JTAGUART_CONTROL_WE_MSK 0x00000002
#define ALTERA_JTAGUART_CONTROL_RI_MSK 0x00000100
#define ALTERA_JTAGUART_CONTROL_RI_OFF 8
#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
static unsigned int altera_jtaguart_tx_space(struct uart_port *port, u32 *ctlp)
{
u32 ctl = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG);
if (ctlp)
*ctlp = ctl;
return FIELD_GET(ALTERA_JTAGUART_CONTROL_WSPACE_MSK, ctl);
}
static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
{
return altera_jtaguart_tx_space(port, NULL) ? TIOCSER_TEMT : 0;
}
static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs)
{
}
static void altera_jtaguart_start_tx(struct uart_port *port)
{
port->read_status_mask |= ALTERA_JTAGUART_CONTROL_WE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
}
static void altera_jtaguart_stop_tx(struct uart_port *port)
{
port->read_status_mask &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
}
static void altera_jtaguart_stop_rx(struct uart_port *port)
{
port->read_status_mask &= ~ALTERA_JTAGUART_CONTROL_RE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
}
static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
{
}
static void altera_jtaguart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
/* Just copy the old termios settings back */
if (old)
tty_termios_copy_hw(termios, old);
}
static void altera_jtaguart_rx_chars(struct uart_port *port)
{
u32 status;
u8 ch;
while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
ALTERA_JTAGUART_DATA_RVALID_MSK) {
ch = status & ALTERA_JTAGUART_DATA_DATA_MSK;
port->icount.rx++;
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
}
tty_flip_buffer_push(&port->state->port);
}
static void altera_jtaguart_tx_chars(struct uart_port *port)
{
unsigned int count;
u8 ch;
count = altera_jtaguart_tx_space(port, NULL);
uart_port_tx_limited(port, ch, count,
true,
writel(ch, port->membase + ALTERA_JTAGUART_DATA_REG),
({}));
}
static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
{
struct uart_port *port = data;
unsigned int isr;
isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
spin_lock(&port->lock);
if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
altera_jtaguart_rx_chars(port);
if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
altera_jtaguart_tx_chars(port);
spin_unlock(&port->lock);
return IRQ_RETVAL(isr);
}
static void altera_jtaguart_config_port(struct uart_port *port, int flags)
{
port->type = PORT_ALTERA_JTAGUART;
/* Clear mask, so no surprise interrupts. */
writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG);
}
static int altera_jtaguart_startup(struct uart_port *port)
{
unsigned long flags;
int ret;
ret = request_irq(port->irq, altera_jtaguart_interrupt, 0,
DRV_NAME, port);
if (ret) {
pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
"interrupt vector=%d\n", port->line, port->irq);
return ret;
}
spin_lock_irqsave(&port->lock, flags);
/* Enable RX interrupts now */
port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void altera_jtaguart_shutdown(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable all interrupts now */
port->read_status_mask = 0;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
spin_unlock_irqrestore(&port->lock, flags);
free_irq(port->irq, port);
}
static const char *altera_jtaguart_type(struct uart_port *port)
{
return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL;
}
static int altera_jtaguart_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
static void altera_jtaguart_release_port(struct uart_port *port)
{
/* Nothing to release... */
}
static int altera_jtaguart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART)
return -EINVAL;
return 0;
}
/*
* Define the basic serial functions we support.
*/
static const struct uart_ops altera_jtaguart_ops = {
.tx_empty = altera_jtaguart_tx_empty,
.get_mctrl = altera_jtaguart_get_mctrl,
.set_mctrl = altera_jtaguart_set_mctrl,
.start_tx = altera_jtaguart_start_tx,
.stop_tx = altera_jtaguart_stop_tx,
.stop_rx = altera_jtaguart_stop_rx,
.break_ctl = altera_jtaguart_break_ctl,
.startup = altera_jtaguart_startup,
.shutdown = altera_jtaguart_shutdown,
.set_termios = altera_jtaguart_set_termios,
.type = altera_jtaguart_type,
.request_port = altera_jtaguart_request_port,
.release_port = altera_jtaguart_release_port,
.config_port = altera_jtaguart_config_port,
.verify_port = altera_jtaguart_verify_port,
};
#define ALTERA_JTAGUART_MAXPORTS 1
static struct uart_port altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
unsigned long flags;
u32 status;
spin_lock_irqsave(&port->lock, flags);
while (!altera_jtaguart_tx_space(port, &status)) {
spin_unlock_irqrestore(&port->lock, flags);
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
return; /* no connection activity */
}
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
spin_unlock_irqrestore(&port->lock, flags);
}
#else
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
while (!altera_jtaguart_tx_space(port, NULL)) {
spin_unlock_irqrestore(&port->lock, flags);
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
spin_unlock_irqrestore(&port->lock, flags);
}
#endif
static void altera_jtaguart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &altera_jtaguart_ports[co->index];
uart_console_write(port, s, count, altera_jtaguart_console_putc);
}
static int __init altera_jtaguart_console_setup(struct console *co,
char *options)
{
struct uart_port *port;
if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS)
return -EINVAL;
port = &altera_jtaguart_ports[co->index];
if (port->membase == NULL)
return -ENODEV;
return 0;
}
static struct uart_driver altera_jtaguart_driver;
static struct console altera_jtaguart_console = {
.name = "ttyJ",
.write = altera_jtaguart_console_write,
.device = uart_console_device,
.setup = altera_jtaguart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &altera_jtaguart_driver,
};
static int __init altera_jtaguart_console_init(void)
{
register_console(&altera_jtaguart_console);
return 0;
}
console_initcall(altera_jtaguart_console_init);
#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console)
static void altera_jtaguart_earlycon_write(struct console *co, const char *s,
unsigned int count)
{
struct earlycon_device *dev = co->data;
uart_console_write(&dev->port, s, count, altera_jtaguart_console_putc);
}
static int __init altera_jtaguart_earlycon_setup(struct earlycon_device *dev,
const char *options)
{
if (!dev->port.membase)
return -ENODEV;
dev->con->write = altera_jtaguart_earlycon_write;
return 0;
}
OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup);
#else
#define ALTERA_JTAGUART_CONSOLE NULL
#endif /* CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE */
static struct uart_driver altera_jtaguart_driver = {
.owner = THIS_MODULE,
.driver_name = "altera_jtaguart",
.dev_name = "ttyJ",
.major = ALTERA_JTAGUART_MAJOR,
.minor = ALTERA_JTAGUART_MINOR,
.nr = ALTERA_JTAGUART_MAXPORTS,
.cons = ALTERA_JTAGUART_CONSOLE,
};
static int altera_jtaguart_probe(struct platform_device *pdev)
{
struct altera_jtaguart_platform_uart *platp =
dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_mem;
int i = pdev->id;
int irq;
/* -1 emphasizes that the platform must have one port, no .N suffix */
if (i == -1)
i = 0;
if (i >= ALTERA_JTAGUART_MAXPORTS)
return -EINVAL;
port = &altera_jtaguart_ports[i];
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem)
port->mapbase = res_mem->start;
else if (platp)
port->mapbase = platp->mapbase;
else
return -ENODEV;
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0 && irq != -ENXIO)
return irq;
if (irq > 0)
port->irq = irq;
else if (platp)
port->irq = platp->irq;
else
return -ENODEV;
port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
if (!port->membase)
return -ENOMEM;
port->line = i;
port->type = PORT_ALTERA_JTAGUART;
port->iotype = SERIAL_IO_MEM;
port->ops = &altera_jtaguart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
uart_add_one_port(&altera_jtaguart_driver, port);
return 0;
}
static int altera_jtaguart_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i = pdev->id;
if (i == -1)
i = 0;
port = &altera_jtaguart_ports[i];
uart_remove_one_port(&altera_jtaguart_driver, port);
iounmap(port->membase);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id altera_jtaguart_match[] = {
{ .compatible = "ALTR,juart-1.0", },
{ .compatible = "altr,juart-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
#endif /* CONFIG_OF */
static struct platform_driver altera_jtaguart_platform_driver = {
.probe = altera_jtaguart_probe,
.remove = altera_jtaguart_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(altera_jtaguart_match),
},
};
static int __init altera_jtaguart_init(void)
{
int rc;
rc = uart_register_driver(&altera_jtaguart_driver);
if (rc)
return rc;
rc = platform_driver_register(&altera_jtaguart_platform_driver);
if (rc)
uart_unregister_driver(&altera_jtaguart_driver);
return rc;
}
static void __exit altera_jtaguart_exit(void)
{
platform_driver_unregister(&altera_jtaguart_platform_driver);
uart_unregister_driver(&altera_jtaguart_driver);
}
module_init(altera_jtaguart_init);
module_exit(altera_jtaguart_exit);
MODULE_DESCRIPTION("Altera JTAG UART driver");
MODULE_AUTHOR("Thomas Chou <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/tty/serial/altera_jtaguart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SA11x0 serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include "serial_mctrl_gpio.h"
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_SA1100_MAJOR 204
#define MINOR_START 5
#define NR_PORTS 3
#define SA1100_ISR_PASS_LIMIT 256
/*
* Convert from ignore_status_mask or read_status_mask to UTSR[01]
*/
#define SM_TO_UTSR0(x) ((x) & 0xff)
#define SM_TO_UTSR1(x) ((x) >> 8)
#define UTSR0_TO_SM(x) ((x))
#define UTSR1_TO_SM(x) ((x) << 8)
#define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0)
#define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1)
#define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2)
#define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3)
#define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0)
#define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1)
#define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR)
#define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0)
#define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1)
#define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2)
#define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3)
#define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0)
#define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1)
#define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR)
/*
* This is the size of our serial port register set.
*/
#define UART_PORT_SIZE 0x24
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
struct sa1100_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
struct mctrl_gpios *gpios;
};
/*
* Handle any change of modem status signal since we were last called.
*/
static void sa1100_mctrl_check(struct sa1100_port *sport)
{
unsigned int status, changed;
status = sport->port.ops->get_mctrl(&sport->port);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void sa1100_timeout(struct timer_list *t)
{
struct sa1100_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
sa1100_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* interrupts disabled on entry
*/
static void sa1100_stop_tx(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE);
sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS);
}
/*
* port locked and interrupts disabled
*/
static void sa1100_start_tx(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS);
UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE);
}
/*
* Interrupts enabled
*/
static void sa1100_stop_rx(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE);
}
/*
* Set the modem control timer to fire immediately.
*/
static void sa1100_enable_ms(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
mod_timer(&sport->timer, jiffies);
mctrl_gpio_enable_ms(sport->gpios);
}
static void
sa1100_rx_chars(struct sa1100_port *sport)
{
unsigned int status;
u8 ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
while (status & UTSR1_TO_SM(UTSR1_RNE)) {
ch = UART_GET_CHAR(sport);
sport->port.icount.rx++;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) {
if (status & UTSR1_TO_SM(UTSR1_PRE))
sport->port.icount.parity++;
else if (status & UTSR1_TO_SM(UTSR1_FRE))
sport->port.icount.frame++;
if (status & UTSR1_TO_SM(UTSR1_ROR))
sport->port.icount.overrun++;
status &= sport->port.read_status_mask;
if (status & UTSR1_TO_SM(UTSR1_PRE))
flg = TTY_PARITY;
else if (status & UTSR1_TO_SM(UTSR1_FRE))
flg = TTY_FRAME;
sport->port.sysrq = 0;
}
if (uart_handle_sysrq_char(&sport->port, ch))
goto ignore_char;
uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg);
ignore_char:
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
}
tty_flip_buffer_push(&sport->port.state->port);
}
static void sa1100_tx_chars(struct sa1100_port *sport)
{
u8 ch;
/*
* Check the modem control lines before
* transmitting anything.
*/
sa1100_mctrl_check(sport);
uart_port_tx(&sport->port, ch,
UART_GET_UTSR1(sport) & UTSR1_TNF,
UART_PUT_CHAR(sport, ch));
}
static irqreturn_t sa1100_int(int irq, void *dev_id)
{
struct sa1100_port *sport = dev_id;
unsigned int status, pass_counter = 0;
spin_lock(&sport->port.lock);
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
do {
if (status & (UTSR0_RFS | UTSR0_RID)) {
/* Clear the receiver idle bit, if set */
if (status & UTSR0_RID)
UART_PUT_UTSR0(sport, UTSR0_RID);
sa1100_rx_chars(sport);
}
/* Clear the relevant break bits */
if (status & (UTSR0_RBB | UTSR0_REB))
UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB));
if (status & UTSR0_RBB)
sport->port.icount.brk++;
if (status & UTSR0_REB)
uart_handle_break(&sport->port);
if (status & UTSR0_TFS)
sa1100_tx_chars(sport);
if (pass_counter++ > SA1100_ISR_PASS_LIMIT)
break;
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) |
~UTSR0_TFS;
} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int sa1100_tx_empty(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT;
}
static unsigned int sa1100_get_mctrl(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
mctrl_gpio_get(sport->gpios, &ret);
return ret;
}
static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
mctrl_gpio_set(sport->gpios, mctrl);
}
/*
* Interrupts always disabled.
*/
static void sa1100_break_ctl(struct uart_port *port, int break_state)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
unsigned long flags;
unsigned int utcr3;
spin_lock_irqsave(&sport->port.lock, flags);
utcr3 = UART_GET_UTCR3(sport);
if (break_state == -1)
utcr3 |= UTCR3_BRK;
else
utcr3 &= ~UTCR3_BRK;
UART_PUT_UTCR3(sport, utcr3);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static int sa1100_startup(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(sport->port.irq, sa1100_int, 0,
"sa11x0-uart", sport);
if (retval)
return retval;
/*
* Finally, clear and enable interrupts
*/
UART_PUT_UTSR0(sport, -1);
UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE);
/*
* Enable modem status interrupts
*/
spin_lock_irq(&sport->port.lock);
sa1100_enable_ms(&sport->port);
spin_unlock_irq(&sport->port.lock);
return 0;
}
static void sa1100_shutdown(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Free the interrupt
*/
free_irq(sport->port.irq, sport);
/*
* Disable all interrupts, port and break condition.
*/
UART_PUT_UTCR3(sport, 0);
}
static void
sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
unsigned long flags;
unsigned int utcr0, old_utcr3, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS8)
utcr0 = UTCR0_DSS;
else
utcr0 = 0;
if (termios->c_cflag & CSTOPB)
utcr0 |= UTCR0_SBS;
if (termios->c_cflag & PARENB) {
utcr0 |= UTCR0_PE;
if (!(termios->c_cflag & PARODD))
utcr0 |= UTCR0_OES;
}
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
del_timer_sync(&sport->timer);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |=
UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |=
UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |=
UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
UTSR1_TO_SM(UTSR1_ROR);
}
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* disable interrupts and drain transmitter
*/
old_utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE));
while (UART_GET_UTSR1(sport) & UTSR1_TBY)
barrier();
/* then, disable everything */
UART_PUT_UTCR3(sport, 0);
/* set the parity, stop bits and data size */
UART_PUT_UTCR0(sport, utcr0);
/* set the baud rate */
quot -= 1;
UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8));
UART_PUT_UTCR2(sport, (quot & 0xff));
UART_PUT_UTSR0(sport, -1);
UART_PUT_UTCR3(sport, old_utcr3);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
sa1100_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *sa1100_type(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
return sport->port.type == PORT_SA1100 ? "SA1100" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void sa1100_release_port(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int sa1100_request_port(struct uart_port *port)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"sa11x0-uart") != NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void sa1100_config_port(struct uart_port *port, int flags)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
if (flags & UART_CONFIG_TYPE &&
sa1100_request_port(&sport->port) == 0)
sport->port.type = PORT_SA1100;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_SA1100 and PORT_UNKNOWN
*/
static int
sa1100_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100)
ret = -EINVAL;
if (sport->port.irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)sport->port.mapbase != ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
static struct uart_ops sa1100_pops = {
.tx_empty = sa1100_tx_empty,
.set_mctrl = sa1100_set_mctrl,
.get_mctrl = sa1100_get_mctrl,
.stop_tx = sa1100_stop_tx,
.start_tx = sa1100_start_tx,
.stop_rx = sa1100_stop_rx,
.enable_ms = sa1100_enable_ms,
.break_ctl = sa1100_break_ctl,
.startup = sa1100_startup,
.shutdown = sa1100_shutdown,
.set_termios = sa1100_set_termios,
.type = sa1100_type,
.release_port = sa1100_release_port,
.request_port = sa1100_request_port,
.config_port = sa1100_config_port,
.verify_port = sa1100_verify_port,
};
static struct sa1100_port sa1100_ports[NR_PORTS];
/*
* Setup the SA1100 serial ports. Note that we don't include the IrDA
* port here since we have our own SIR/FIR driver (see drivers/net/irda)
*
* Note also that we support "console=ttySAx" where "x" is either 0 or 1.
* Which serial port this ends up being depends on the machine you're
* running this kernel on. I'm not convinced that this is a good idea,
* but that's the way it traditionally works.
*
* Note that NanoEngine UART3 becomes UART2, and UART2 is no longer
* used here.
*/
static void __init sa1100_init_ports(void)
{
static int first = 1;
int i;
if (!first)
return;
first = 0;
for (i = 0; i < NR_PORTS; i++) {
sa1100_ports[i].port.uartclk = 3686400;
sa1100_ports[i].port.ops = &sa1100_pops;
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
}
/*
* make transmit lines outputs, so that when the port
* is closed, the output is in the MARK state.
*/
PPDR |= PPC_TXD1 | PPC_TXD3;
PPSR |= PPC_TXD1 | PPC_TXD3;
}
void sa1100_register_uart_fns(struct sa1100_port_fns *fns)
{
if (fns->get_mctrl)
sa1100_pops.get_mctrl = fns->get_mctrl;
if (fns->set_mctrl)
sa1100_pops.set_mctrl = fns->set_mctrl;
sa1100_pops.pm = fns->pm;
/*
* FIXME: fns->set_wake is unused - this should be called from
* the suspend() callback if device_may_wakeup(dev)) is set.
*/
}
void __init sa1100_register_uart(int idx, int port)
{
if (idx >= NR_PORTS) {
printk(KERN_ERR "%s: bad index number %d\n", __func__, idx);
return;
}
switch (port) {
case 1:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0;
sa1100_ports[idx].port.mapbase = _Ser1UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser1UART;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
case 2:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0;
sa1100_ports[idx].port.mapbase = _Ser2UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser2ICP;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
case 3:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0;
sa1100_ports[idx].port.mapbase = _Ser3UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser3UART;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
default:
printk(KERN_ERR "%s: bad port number %d\n", __func__, port);
}
}
#ifdef CONFIG_SERIAL_SA1100_CONSOLE
static void sa1100_console_putchar(struct uart_port *port, unsigned char ch)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
while (!(UART_GET_UTSR1(sport) & UTSR1_TNF))
barrier();
UART_PUT_CHAR(sport, ch);
}
/*
* Interrupts are disabled on entering
*/
static void
sa1100_console_write(struct console *co, const char *s, unsigned int count)
{
struct sa1100_port *sport = &sa1100_ports[co->index];
unsigned int old_utcr3, status;
/*
* First, save UTCR3 and then disable interrupts
*/
old_utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) |
UTCR3_TXE);
uart_console_write(&sport->port, s, count, sa1100_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore UTCR3
*/
do {
status = UART_GET_UTSR1(sport);
} while (status & UTSR1_TBY);
UART_PUT_UTCR3(sport, old_utcr3);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
sa1100_console_get_options(struct sa1100_port *sport, int *baud,
int *parity, int *bits)
{
unsigned int utcr3;
utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE);
if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) {
/* ok, the port was enabled */
unsigned int utcr0, quot;
utcr0 = UART_GET_UTCR0(sport);
*parity = 'n';
if (utcr0 & UTCR0_PE) {
if (utcr0 & UTCR0_OES)
*parity = 'e';
else
*parity = 'o';
}
if (utcr0 & UTCR0_DSS)
*bits = 8;
else
*bits = 7;
quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8;
quot &= 0xfff;
*baud = sport->port.uartclk / (16 * (quot + 1));
}
}
static int __init
sa1100_console_setup(struct console *co, char *options)
{
struct sa1100_port *sport;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= NR_PORTS)
co->index = 0;
sport = &sa1100_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
sa1100_console_get_options(sport, &baud, &parity, &bits);
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver sa1100_reg;
static struct console sa1100_console = {
.name = "ttySA",
.write = sa1100_console_write,
.device = uart_console_device,
.setup = sa1100_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sa1100_reg,
};
static int __init sa1100_rs_console_init(void)
{
sa1100_init_ports();
register_console(&sa1100_console);
return 0;
}
console_initcall(sa1100_rs_console_init);
#define SA1100_CONSOLE &sa1100_console
#else
#define SA1100_CONSOLE NULL
#endif
static struct uart_driver sa1100_reg = {
.owner = THIS_MODULE,
.driver_name = "ttySA",
.dev_name = "ttySA",
.major = SERIAL_SA1100_MAJOR,
.minor = MINOR_START,
.nr = NR_PORTS,
.cons = SA1100_CONSOLE,
};
static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state)
{
struct sa1100_port *sport = platform_get_drvdata(dev);
if (sport)
uart_suspend_port(&sa1100_reg, &sport->port);
return 0;
}
static int sa1100_serial_resume(struct platform_device *dev)
{
struct sa1100_port *sport = platform_get_drvdata(dev);
if (sport)
uart_resume_port(&sa1100_reg, &sport->port);
return 0;
}
static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev)
{
sport->port.dev = &dev->dev;
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SA1100_CONSOLE);
// mctrl_gpio_init() requires that the GPIO driver supports interrupts,
// but we need to support GPIO drivers for hardware that has no such
// interrupts. Use mctrl_gpio_init_noauto() instead.
sport->gpios = mctrl_gpio_init_noauto(sport->port.dev, 0);
if (IS_ERR(sport->gpios)) {
int err = PTR_ERR(sport->gpios);
dev_err(sport->port.dev, "failed to get mctrl gpios: %d\n",
err);
if (err == -EPROBE_DEFER)
return err;
sport->gpios = NULL;
}
platform_set_drvdata(dev, sport);
return uart_add_one_port(&sa1100_reg, &sport->port);
}
static int sa1100_serial_probe(struct platform_device *dev)
{
struct resource *res;
int i;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
for (i = 0; i < NR_PORTS; i++)
if (sa1100_ports[i].port.mapbase == res->start)
break;
if (i == NR_PORTS)
return -ENODEV;
sa1100_serial_add_one_port(&sa1100_ports[i], dev);
return 0;
}
static int sa1100_serial_remove(struct platform_device *pdev)
{
struct sa1100_port *sport = platform_get_drvdata(pdev);
if (sport)
uart_remove_one_port(&sa1100_reg, &sport->port);
return 0;
}
static struct platform_driver sa11x0_serial_driver = {
.probe = sa1100_serial_probe,
.remove = sa1100_serial_remove,
.suspend = sa1100_serial_suspend,
.resume = sa1100_serial_resume,
.driver = {
.name = "sa11x0-uart",
},
};
static int __init sa1100_serial_init(void)
{
int ret;
printk(KERN_INFO "Serial: SA11x0 driver\n");
sa1100_init_ports();
ret = uart_register_driver(&sa1100_reg);
if (ret == 0) {
ret = platform_driver_register(&sa11x0_serial_driver);
if (ret)
uart_unregister_driver(&sa1100_reg);
}
return ret;
}
static void __exit sa1100_serial_exit(void)
{
platform_driver_unregister(&sa11x0_serial_driver);
uart_unregister_driver(&sa1100_reg);
}
module_init(sa1100_serial_init);
module_exit(sa1100_serial_exit);
MODULE_AUTHOR("Deep Blue Solutions Ltd");
MODULE_DESCRIPTION("SA1100 generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR);
MODULE_ALIAS("platform:sa11x0-uart");
| linux-master | drivers/tty/serial/sa1100.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* NXP (Philips) SCC+++(SCN+++) serial driver
*
* Copyright (C) 2012 Alexander Shiyan <[email protected]>
*
* Based on sc26xx.c, by Thomas Bogendörfer ([email protected])
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/serial-sccnxp.h>
#include <linux/regulator/consumer.h>
#define SCCNXP_NAME "uart-sccnxp"
#define SCCNXP_MAJOR 204
#define SCCNXP_MINOR 205
#define SCCNXP_MR_REG (0x00)
# define MR0_BAUD_NORMAL (0 << 0)
# define MR0_BAUD_EXT1 (1 << 0)
# define MR0_BAUD_EXT2 (5 << 0)
# define MR0_FIFO (1 << 3)
# define MR0_TXLVL (1 << 4)
# define MR1_BITS_5 (0 << 0)
# define MR1_BITS_6 (1 << 0)
# define MR1_BITS_7 (2 << 0)
# define MR1_BITS_8 (3 << 0)
# define MR1_PAR_EVN (0 << 2)
# define MR1_PAR_ODD (1 << 2)
# define MR1_PAR_NO (4 << 2)
# define MR2_STOP1 (7 << 0)
# define MR2_STOP2 (0xf << 0)
#define SCCNXP_SR_REG (0x01)
# define SR_RXRDY (1 << 0)
# define SR_FULL (1 << 1)
# define SR_TXRDY (1 << 2)
# define SR_TXEMT (1 << 3)
# define SR_OVR (1 << 4)
# define SR_PE (1 << 5)
# define SR_FE (1 << 6)
# define SR_BRK (1 << 7)
#define SCCNXP_CSR_REG (SCCNXP_SR_REG)
# define CSR_TIMER_MODE (0x0d)
#define SCCNXP_CR_REG (0x02)
# define CR_RX_ENABLE (1 << 0)
# define CR_RX_DISABLE (1 << 1)
# define CR_TX_ENABLE (1 << 2)
# define CR_TX_DISABLE (1 << 3)
# define CR_CMD_MRPTR1 (0x01 << 4)
# define CR_CMD_RX_RESET (0x02 << 4)
# define CR_CMD_TX_RESET (0x03 << 4)
# define CR_CMD_STATUS_RESET (0x04 << 4)
# define CR_CMD_BREAK_RESET (0x05 << 4)
# define CR_CMD_START_BREAK (0x06 << 4)
# define CR_CMD_STOP_BREAK (0x07 << 4)
# define CR_CMD_MRPTR0 (0x0b << 4)
#define SCCNXP_RHR_REG (0x03)
#define SCCNXP_THR_REG SCCNXP_RHR_REG
#define SCCNXP_IPCR_REG (0x04)
#define SCCNXP_ACR_REG SCCNXP_IPCR_REG
# define ACR_BAUD0 (0 << 7)
# define ACR_BAUD1 (1 << 7)
# define ACR_TIMER_MODE (6 << 4)
#define SCCNXP_ISR_REG (0x05)
#define SCCNXP_IMR_REG SCCNXP_ISR_REG
# define IMR_TXRDY (1 << 0)
# define IMR_RXRDY (1 << 1)
# define ISR_TXRDY(x) (1 << ((x * 4) + 0))
# define ISR_RXRDY(x) (1 << ((x * 4) + 1))
#define SCCNXP_CTPU_REG (0x06)
#define SCCNXP_CTPL_REG (0x07)
#define SCCNXP_IPR_REG (0x0d)
#define SCCNXP_OPCR_REG SCCNXP_IPR_REG
#define SCCNXP_SOP_REG (0x0e)
#define SCCNXP_START_COUNTER_REG SCCNXP_SOP_REG
#define SCCNXP_ROP_REG (0x0f)
/* Route helpers */
#define MCTRL_MASK(sig) (0xf << (sig))
#define MCTRL_IBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_IP0)
#define MCTRL_OBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_OP0)
#define SCCNXP_HAVE_IO 0x00000001
#define SCCNXP_HAVE_MR0 0x00000002
struct sccnxp_chip {
const char *name;
unsigned int nr;
unsigned long freq_min;
unsigned long freq_std;
unsigned long freq_max;
unsigned int flags;
unsigned int fifosize;
/* Time between read/write cycles */
unsigned int trwd;
};
struct sccnxp_port {
struct uart_driver uart;
struct uart_port port[SCCNXP_MAX_UARTS];
bool opened[SCCNXP_MAX_UARTS];
int irq;
u8 imr;
struct sccnxp_chip *chip;
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
struct console console;
#endif
spinlock_t lock;
bool poll;
struct timer_list timer;
struct sccnxp_pdata pdata;
struct regulator *regulator;
};
static const struct sccnxp_chip sc2681 = {
.name = "SC2681",
.nr = 2,
.freq_min = 1000000,
.freq_std = 3686400,
.freq_max = 4000000,
.flags = SCCNXP_HAVE_IO,
.fifosize = 3,
.trwd = 200,
};
static const struct sccnxp_chip sc2691 = {
.name = "SC2691",
.nr = 1,
.freq_min = 1000000,
.freq_std = 3686400,
.freq_max = 4000000,
.flags = 0,
.fifosize = 3,
.trwd = 150,
};
static const struct sccnxp_chip sc2692 = {
.name = "SC2692",
.nr = 2,
.freq_min = 1000000,
.freq_std = 3686400,
.freq_max = 4000000,
.flags = SCCNXP_HAVE_IO,
.fifosize = 3,
.trwd = 30,
};
static const struct sccnxp_chip sc2891 = {
.name = "SC2891",
.nr = 1,
.freq_min = 100000,
.freq_std = 3686400,
.freq_max = 8000000,
.flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
.fifosize = 16,
.trwd = 27,
};
static const struct sccnxp_chip sc2892 = {
.name = "SC2892",
.nr = 2,
.freq_min = 100000,
.freq_std = 3686400,
.freq_max = 8000000,
.flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
.fifosize = 16,
.trwd = 17,
};
static const struct sccnxp_chip sc28202 = {
.name = "SC28202",
.nr = 2,
.freq_min = 1000000,
.freq_std = 14745600,
.freq_max = 50000000,
.flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
.fifosize = 256,
.trwd = 10,
};
static const struct sccnxp_chip sc68681 = {
.name = "SC68681",
.nr = 2,
.freq_min = 1000000,
.freq_std = 3686400,
.freq_max = 4000000,
.flags = SCCNXP_HAVE_IO,
.fifosize = 3,
.trwd = 200,
};
static const struct sccnxp_chip sc68692 = {
.name = "SC68692",
.nr = 2,
.freq_min = 1000000,
.freq_std = 3686400,
.freq_max = 4000000,
.flags = SCCNXP_HAVE_IO,
.fifosize = 3,
.trwd = 200,
};
static u8 sccnxp_read(struct uart_port *port, u8 reg)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
u8 ret;
ret = readb(port->membase + (reg << port->regshift));
ndelay(s->chip->trwd);
return ret;
}
static void sccnxp_write(struct uart_port *port, u8 reg, u8 v)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
writeb(v, port->membase + (reg << port->regshift));
ndelay(s->chip->trwd);
}
static u8 sccnxp_port_read(struct uart_port *port, u8 reg)
{
return sccnxp_read(port, (port->line << 3) + reg);
}
static void sccnxp_port_write(struct uart_port *port, u8 reg, u8 v)
{
sccnxp_write(port, (port->line << 3) + reg, v);
}
static int sccnxp_update_best_err(int a, int b, int *besterr)
{
int err = abs(a - b);
if (*besterr > err) {
*besterr = err;
return 0;
}
return 1;
}
static const struct {
u8 csr;
u8 acr;
u8 mr0;
int baud;
} baud_std[] = {
{ 0, ACR_BAUD0, MR0_BAUD_NORMAL, 50, },
{ 0, ACR_BAUD1, MR0_BAUD_NORMAL, 75, },
{ 1, ACR_BAUD0, MR0_BAUD_NORMAL, 110, },
{ 2, ACR_BAUD0, MR0_BAUD_NORMAL, 134, },
{ 3, ACR_BAUD1, MR0_BAUD_NORMAL, 150, },
{ 3, ACR_BAUD0, MR0_BAUD_NORMAL, 200, },
{ 4, ACR_BAUD0, MR0_BAUD_NORMAL, 300, },
{ 0, ACR_BAUD1, MR0_BAUD_EXT1, 450, },
{ 1, ACR_BAUD0, MR0_BAUD_EXT2, 880, },
{ 3, ACR_BAUD1, MR0_BAUD_EXT1, 900, },
{ 5, ACR_BAUD0, MR0_BAUD_NORMAL, 600, },
{ 7, ACR_BAUD0, MR0_BAUD_NORMAL, 1050, },
{ 2, ACR_BAUD0, MR0_BAUD_EXT2, 1076, },
{ 6, ACR_BAUD0, MR0_BAUD_NORMAL, 1200, },
{ 10, ACR_BAUD1, MR0_BAUD_NORMAL, 1800, },
{ 7, ACR_BAUD1, MR0_BAUD_NORMAL, 2000, },
{ 8, ACR_BAUD0, MR0_BAUD_NORMAL, 2400, },
{ 5, ACR_BAUD1, MR0_BAUD_EXT1, 3600, },
{ 9, ACR_BAUD0, MR0_BAUD_NORMAL, 4800, },
{ 10, ACR_BAUD0, MR0_BAUD_NORMAL, 7200, },
{ 11, ACR_BAUD0, MR0_BAUD_NORMAL, 9600, },
{ 8, ACR_BAUD0, MR0_BAUD_EXT1, 14400, },
{ 12, ACR_BAUD1, MR0_BAUD_NORMAL, 19200, },
{ 9, ACR_BAUD0, MR0_BAUD_EXT1, 28800, },
{ 12, ACR_BAUD0, MR0_BAUD_NORMAL, 38400, },
{ 11, ACR_BAUD0, MR0_BAUD_EXT1, 57600, },
{ 12, ACR_BAUD1, MR0_BAUD_EXT1, 115200, },
{ 12, ACR_BAUD0, MR0_BAUD_EXT1, 230400, },
{ 0, 0, 0, 0 }
};
static int sccnxp_set_baud(struct uart_port *port, int baud)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
int div_std, tmp_baud, bestbaud = INT_MAX, besterr = INT_MAX;
struct sccnxp_chip *chip = s->chip;
u8 i, acr = 0, csr = 0, mr0 = 0;
/* Find divisor to load to the timer preset registers */
div_std = DIV_ROUND_CLOSEST(port->uartclk, 2 * 16 * baud);
if ((div_std >= 2) && (div_std <= 0xffff)) {
bestbaud = DIV_ROUND_CLOSEST(port->uartclk, 2 * 16 * div_std);
sccnxp_update_best_err(baud, bestbaud, &besterr);
csr = CSR_TIMER_MODE;
sccnxp_port_write(port, SCCNXP_CTPU_REG, div_std >> 8);
sccnxp_port_write(port, SCCNXP_CTPL_REG, div_std);
/* Issue start timer/counter command */
sccnxp_port_read(port, SCCNXP_START_COUNTER_REG);
}
/* Find best baud from table */
for (i = 0; baud_std[i].baud && besterr; i++) {
if (baud_std[i].mr0 && !(chip->flags & SCCNXP_HAVE_MR0))
continue;
div_std = DIV_ROUND_CLOSEST(chip->freq_std, baud_std[i].baud);
tmp_baud = DIV_ROUND_CLOSEST(port->uartclk, div_std);
if (!sccnxp_update_best_err(baud, tmp_baud, &besterr)) {
acr = baud_std[i].acr;
csr = baud_std[i].csr;
mr0 = baud_std[i].mr0;
bestbaud = tmp_baud;
}
}
if (chip->flags & SCCNXP_HAVE_MR0) {
/* Enable FIFO, set half level for TX */
mr0 |= MR0_FIFO | MR0_TXLVL;
/* Update MR0 */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR0);
sccnxp_port_write(port, SCCNXP_MR_REG, mr0);
}
sccnxp_port_write(port, SCCNXP_ACR_REG, acr | ACR_TIMER_MODE);
sccnxp_port_write(port, SCCNXP_CSR_REG, (csr << 4) | csr);
if (baud != bestbaud)
dev_dbg(port->dev, "Baudrate desired: %i, calculated: %i\n",
baud, bestbaud);
return bestbaud;
}
static void sccnxp_enable_irq(struct uart_port *port, int mask)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
s->imr |= mask << (port->line * 4);
sccnxp_write(port, SCCNXP_IMR_REG, s->imr);
}
static void sccnxp_disable_irq(struct uart_port *port, int mask)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
s->imr &= ~(mask << (port->line * 4));
sccnxp_write(port, SCCNXP_IMR_REG, s->imr);
}
static void sccnxp_set_bit(struct uart_port *port, int sig, int state)
{
u8 bitmask;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(sig)) {
bitmask = 1 << MCTRL_OBIT(s->pdata.mctrl_cfg[port->line], sig);
if (state)
sccnxp_write(port, SCCNXP_SOP_REG, bitmask);
else
sccnxp_write(port, SCCNXP_ROP_REG, bitmask);
}
}
static void sccnxp_handle_rx(struct uart_port *port)
{
u8 sr, ch, flag;
for (;;) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
if (!(sr & SR_RXRDY))
break;
sr &= SR_PE | SR_FE | SR_OVR | SR_BRK;
ch = sccnxp_port_read(port, SCCNXP_RHR_REG);
port->icount.rx++;
flag = TTY_NORMAL;
if (unlikely(sr)) {
if (sr & SR_BRK) {
port->icount.brk++;
sccnxp_port_write(port, SCCNXP_CR_REG,
CR_CMD_BREAK_RESET);
if (uart_handle_break(port))
continue;
} else if (sr & SR_PE)
port->icount.parity++;
else if (sr & SR_FE)
port->icount.frame++;
else if (sr & SR_OVR) {
port->icount.overrun++;
sccnxp_port_write(port, SCCNXP_CR_REG,
CR_CMD_STATUS_RESET);
}
sr &= port->read_status_mask;
if (sr & SR_BRK)
flag = TTY_BREAK;
else if (sr & SR_PE)
flag = TTY_PARITY;
else if (sr & SR_FE)
flag = TTY_FRAME;
else if (sr & SR_OVR)
flag = TTY_OVERRUN;
}
if (uart_handle_sysrq_char(port, ch))
continue;
if (sr & port->ignore_status_mask)
continue;
uart_insert_char(port, sr, SR_OVR, ch, flag);
}
tty_flip_buffer_push(&port->state->port);
}
static void sccnxp_handle_tx(struct uart_port *port)
{
u8 sr;
struct circ_buf *xmit = &port->state->xmit;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
if (unlikely(port->x_char)) {
sccnxp_port_write(port, SCCNXP_THR_REG, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
/* Disable TX if FIFO is empty */
if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXEMT) {
sccnxp_disable_irq(port, IMR_TXRDY);
/* Set direction to input */
if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
}
return;
}
while (!uart_circ_empty(xmit)) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
if (!(sr & SR_TXRDY))
break;
sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]);
uart_xmit_advance(port, 1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void sccnxp_handle_events(struct sccnxp_port *s)
{
int i;
u8 isr;
do {
isr = sccnxp_read(&s->port[0], SCCNXP_ISR_REG);
isr &= s->imr;
if (!isr)
break;
for (i = 0; i < s->uart.nr; i++) {
if (s->opened[i] && (isr & ISR_RXRDY(i)))
sccnxp_handle_rx(&s->port[i]);
if (s->opened[i] && (isr & ISR_TXRDY(i)))
sccnxp_handle_tx(&s->port[i]);
}
} while (1);
}
static void sccnxp_timer(struct timer_list *t)
{
struct sccnxp_port *s = from_timer(s, t, timer);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_handle_events(s);
spin_unlock_irqrestore(&s->lock, flags);
mod_timer(&s->timer, jiffies + usecs_to_jiffies(s->pdata.poll_time_us));
}
static irqreturn_t sccnxp_ist(int irq, void *dev_id)
{
struct sccnxp_port *s = (struct sccnxp_port *)dev_id;
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_handle_events(s);
spin_unlock_irqrestore(&s->lock, flags);
return IRQ_HANDLED;
}
static void sccnxp_start_tx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
/* Set direction to output */
if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 1);
sccnxp_enable_irq(port, IMR_TXRDY);
spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_stop_tx(struct uart_port *port)
{
/* Do nothing */
}
static void sccnxp_stop_rx(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE);
spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_tx_empty(struct uart_port *port)
{
u8 val;
unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
spin_lock_irqsave(&s->lock, flags);
val = sccnxp_port_read(port, SCCNXP_SR_REG);
spin_unlock_irqrestore(&s->lock, flags);
return (val & SR_TXEMT) ? TIOCSER_TEMT : 0;
}
static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
if (!(s->chip->flags & SCCNXP_HAVE_IO))
return;
spin_lock_irqsave(&s->lock, flags);
sccnxp_set_bit(port, DTR_OP, mctrl & TIOCM_DTR);
sccnxp_set_bit(port, RTS_OP, mctrl & TIOCM_RTS);
spin_unlock_irqrestore(&s->lock, flags);
}
static unsigned int sccnxp_get_mctrl(struct uart_port *port)
{
u8 bitmask, ipr;
unsigned long flags;
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
if (!(s->chip->flags & SCCNXP_HAVE_IO))
return mctrl;
spin_lock_irqsave(&s->lock, flags);
ipr = ~sccnxp_read(port, SCCNXP_IPCR_REG);
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DSR_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
DSR_IP);
mctrl &= ~TIOCM_DSR;
mctrl |= (ipr & bitmask) ? TIOCM_DSR : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(CTS_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
CTS_IP);
mctrl &= ~TIOCM_CTS;
mctrl |= (ipr & bitmask) ? TIOCM_CTS : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(DCD_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
DCD_IP);
mctrl &= ~TIOCM_CAR;
mctrl |= (ipr & bitmask) ? TIOCM_CAR : 0;
}
if (s->pdata.mctrl_cfg[port->line] & MCTRL_MASK(RNG_IP)) {
bitmask = 1 << MCTRL_IBIT(s->pdata.mctrl_cfg[port->line],
RNG_IP);
mctrl &= ~TIOCM_RNG;
mctrl |= (ipr & bitmask) ? TIOCM_RNG : 0;
}
spin_unlock_irqrestore(&s->lock, flags);
return mctrl;
}
static void sccnxp_break_ctl(struct uart_port *port, int break_state)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
sccnxp_port_write(port, SCCNXP_CR_REG, break_state ?
CR_CMD_START_BREAK : CR_CMD_STOP_BREAK);
spin_unlock_irqrestore(&s->lock, flags);
}
static void sccnxp_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
u8 mr1, mr2;
int baud;
spin_lock_irqsave(&s->lock, flags);
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
/* Disable RX & TX, reset break condition, status and FIFOs */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET |
CR_RX_DISABLE | CR_TX_DISABLE);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET);
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mr1 = MR1_BITS_5;
break;
case CS6:
mr1 = MR1_BITS_6;
break;
case CS7:
mr1 = MR1_BITS_7;
break;
case CS8:
default:
mr1 = MR1_BITS_8;
break;
}
/* Parity */
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
mr1 |= MR1_PAR_ODD;
} else
mr1 |= MR1_PAR_NO;
/* Stop bits */
mr2 = (termios->c_cflag & CSTOPB) ? MR2_STOP2 : MR2_STOP1;
/* Update desired format */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_MRPTR1);
sccnxp_port_write(port, SCCNXP_MR_REG, mr1);
sccnxp_port_write(port, SCCNXP_MR_REG, mr2);
/* Set read status mask */
port->read_status_mask = SR_OVR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SR_PE | SR_FE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= SR_BRK;
/* Set status ignore mask */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNBRK)
port->ignore_status_mask |= SR_BRK;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= SR_PE;
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SR_PE | SR_OVR | SR_FE | SR_BRK;
/* Setup baudrate */
baud = uart_get_baud_rate(port, termios, old, 50,
(s->chip->flags & SCCNXP_HAVE_MR0) ?
230400 : 38400);
baud = sccnxp_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
/* Report actual baudrate back to core */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_startup(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
if (s->chip->flags & SCCNXP_HAVE_IO) {
/* Outputs are controlled manually */
sccnxp_write(port, SCCNXP_OPCR_REG, 0);
}
/* Reset break condition, status and FIFOs */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_RX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_TX_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_STATUS_RESET);
sccnxp_port_write(port, SCCNXP_CR_REG, CR_CMD_BREAK_RESET);
/* Enable RX & TX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_ENABLE | CR_TX_ENABLE);
/* Enable RX interrupt */
sccnxp_enable_irq(port, IMR_RXRDY);
s->opened[port->line] = 1;
spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static void sccnxp_shutdown(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
s->opened[port->line] = 0;
/* Disable interrupts */
sccnxp_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
/* Disable TX & RX */
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE | CR_TX_DISABLE);
/* Leave direction to input */
if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
spin_unlock_irqrestore(&s->lock, flags);
}
static const char *sccnxp_type(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
return (port->type == PORT_SC26XX) ? s->chip->name : NULL;
}
static void sccnxp_release_port(struct uart_port *port)
{
/* Do nothing */
}
static int sccnxp_request_port(struct uart_port *port)
{
/* Do nothing */
return 0;
}
static void sccnxp_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_SC26XX;
}
static int sccnxp_verify_port(struct uart_port *port, struct serial_struct *s)
{
if ((s->type == PORT_UNKNOWN) || (s->type == PORT_SC26XX))
return 0;
if (s->irq == port->irq)
return 0;
return -EINVAL;
}
static const struct uart_ops sccnxp_ops = {
.tx_empty = sccnxp_tx_empty,
.set_mctrl = sccnxp_set_mctrl,
.get_mctrl = sccnxp_get_mctrl,
.stop_tx = sccnxp_stop_tx,
.start_tx = sccnxp_start_tx,
.stop_rx = sccnxp_stop_rx,
.break_ctl = sccnxp_break_ctl,
.startup = sccnxp_startup,
.shutdown = sccnxp_shutdown,
.set_termios = sccnxp_set_termios,
.type = sccnxp_type,
.release_port = sccnxp_release_port,
.request_port = sccnxp_request_port,
.config_port = sccnxp_config_port,
.verify_port = sccnxp_verify_port,
};
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
static void sccnxp_console_putchar(struct uart_port *port, unsigned char c)
{
int tryes = 100000;
while (tryes--) {
if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXRDY) {
sccnxp_port_write(port, SCCNXP_THR_REG, c);
break;
}
barrier();
}
}
static void sccnxp_console_write(struct console *co, const char *c, unsigned n)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[co->index];
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
uart_console_write(port, c, n, sccnxp_console_putchar);
spin_unlock_irqrestore(&s->lock, flags);
}
static int sccnxp_console_setup(struct console *co, char *options)
{
struct sccnxp_port *s = (struct sccnxp_port *)co->data;
struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0];
int baud = 9600, bits = 8, parity = 'n', flow = 'n';
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
#endif
static const struct platform_device_id sccnxp_id_table[] = {
{ .name = "sc2681", .driver_data = (kernel_ulong_t)&sc2681, },
{ .name = "sc2691", .driver_data = (kernel_ulong_t)&sc2691, },
{ .name = "sc2692", .driver_data = (kernel_ulong_t)&sc2692, },
{ .name = "sc2891", .driver_data = (kernel_ulong_t)&sc2891, },
{ .name = "sc2892", .driver_data = (kernel_ulong_t)&sc2892, },
{ .name = "sc28202", .driver_data = (kernel_ulong_t)&sc28202, },
{ .name = "sc68681", .driver_data = (kernel_ulong_t)&sc68681, },
{ .name = "sc68692", .driver_data = (kernel_ulong_t)&sc68692, },
{ }
};
MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
static int sccnxp_probe(struct platform_device *pdev)
{
struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
int i, ret, uartclk;
struct sccnxp_port *s;
void __iomem *membase;
struct clk *clk;
membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(membase))
return PTR_ERR(membase);
s = devm_kzalloc(&pdev->dev, sizeof(struct sccnxp_port), GFP_KERNEL);
if (!s) {
dev_err(&pdev->dev, "Error allocating port structure\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, s);
spin_lock_init(&s->lock);
s->chip = (struct sccnxp_chip *)pdev->id_entry->driver_data;
s->regulator = devm_regulator_get(&pdev->dev, "vcc");
if (!IS_ERR(s->regulator)) {
ret = regulator_enable(s->regulator);
if (ret) {
dev_err(&pdev->dev,
"Failed to enable regulator: %i\n", ret);
return ret;
}
} else if (PTR_ERR(s->regulator) == -EPROBE_DEFER)
return -EPROBE_DEFER;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
if (ret == -EPROBE_DEFER)
goto err_out;
uartclk = 0;
} else {
uartclk = clk_get_rate(clk);
}
if (!uartclk) {
dev_notice(&pdev->dev, "Using default clock frequency\n");
uartclk = s->chip->freq_std;
}
/* Check input frequency */
if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
dev_err(&pdev->dev, "Frequency out of bounds\n");
ret = -EINVAL;
goto err_out;
}
if (pdata)
memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata));
if (s->pdata.poll_time_us) {
dev_info(&pdev->dev, "Using poll mode, resolution %u usecs\n",
s->pdata.poll_time_us);
s->poll = 1;
}
if (!s->poll) {
s->irq = platform_get_irq(pdev, 0);
if (s->irq < 0) {
ret = -ENXIO;
goto err_out;
}
}
s->uart.owner = THIS_MODULE;
s->uart.dev_name = "ttySC";
s->uart.major = SCCNXP_MAJOR;
s->uart.minor = SCCNXP_MINOR;
s->uart.nr = s->chip->nr;
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
s->uart.cons = &s->console;
s->uart.cons->device = uart_console_device;
s->uart.cons->write = sccnxp_console_write;
s->uart.cons->setup = sccnxp_console_setup;
s->uart.cons->flags = CON_PRINTBUFFER;
s->uart.cons->index = -1;
s->uart.cons->data = s;
strcpy(s->uart.cons->name, "ttySC");
#endif
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(&pdev->dev, "Registering UART driver failed\n");
goto err_out;
}
for (i = 0; i < s->uart.nr; i++) {
s->port[i].line = i;
s->port[i].dev = &pdev->dev;
s->port[i].irq = s->irq;
s->port[i].type = PORT_SC26XX;
s->port[i].fifosize = s->chip->fifosize;
s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
s->port[i].iotype = UPIO_MEM;
s->port[i].mapbase = res->start;
s->port[i].membase = membase;
s->port[i].regshift = s->pdata.reg_shift;
s->port[i].uartclk = uartclk;
s->port[i].ops = &sccnxp_ops;
s->port[i].has_sysrq = IS_ENABLED(CONFIG_SERIAL_SCCNXP_CONSOLE);
uart_add_one_port(&s->uart, &s->port[i]);
/* Set direction to input */
if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(&s->port[i], DIR_OP, 0);
}
/* Disable interrupts */
s->imr = 0;
sccnxp_write(&s->port[0], SCCNXP_IMR_REG, 0);
if (!s->poll) {
ret = devm_request_threaded_irq(&pdev->dev, s->irq, NULL,
sccnxp_ist,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
dev_name(&pdev->dev), s);
if (!ret)
return 0;
dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
} else {
timer_setup(&s->timer, sccnxp_timer, 0);
mod_timer(&s->timer, jiffies +
usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
}
uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
regulator_disable(s->regulator);
return ret;
}
static int sccnxp_remove(struct platform_device *pdev)
{
int i;
struct sccnxp_port *s = platform_get_drvdata(pdev);
if (!s->poll)
devm_free_irq(&pdev->dev, s->irq, s);
else
del_timer_sync(&s->timer);
for (i = 0; i < s->uart.nr; i++)
uart_remove_one_port(&s->uart, &s->port[i]);
uart_unregister_driver(&s->uart);
if (!IS_ERR(s->regulator))
return regulator_disable(s->regulator);
return 0;
}
static struct platform_driver sccnxp_uart_driver = {
.driver = {
.name = SCCNXP_NAME,
},
.probe = sccnxp_probe,
.remove = sccnxp_remove,
.id_table = sccnxp_id_table,
};
module_platform_driver(sccnxp_uart_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Alexander Shiyan <[email protected]>");
MODULE_DESCRIPTION("SCCNXP serial driver");
| linux-master | drivers/tty/serial/sccnxp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* icom.c
*
* Copyright (C) 2001 IBM Corporation. All rights reserved.
*
* Serial device driver.
*
* Based on code from serial.c
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/termios.h>
#include <linux/fs.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
/*#define ICOM_TRACE enable port trace capabilities */
#define ICOM_DRIVER_NAME "icom"
#define NR_PORTS 128
static const unsigned int icom_acfg_baud[] = {
300,
600,
900,
1200,
1800,
2400,
3600,
4800,
7200,
9600,
14400,
19200,
28800,
38400,
57600,
76800,
115200,
153600,
230400,
307200,
460800,
};
#define BAUD_TABLE_LIMIT (ARRAY_SIZE(icom_acfg_baud) - 1)
struct icom_regs {
u32 control; /* Adapter Control Register */
u32 interrupt; /* Adapter Interrupt Register */
u32 int_mask; /* Adapter Interrupt Mask Reg */
u32 int_pri; /* Adapter Interrupt Priority r */
u32 int_reg_b; /* Adapter non-masked Interrupt */
u32 resvd01;
u32 resvd02;
u32 resvd03;
u32 control_2; /* Adapter Control Register 2 */
u32 interrupt_2; /* Adapter Interrupt Register 2 */
u32 int_mask_2; /* Adapter Interrupt Mask 2 */
u32 int_pri_2; /* Adapter Interrupt Prior 2 */
u32 int_reg_2b; /* Adapter non-masked 2 */
};
struct func_dram {
u32 reserved[108]; /* 0-1B0 reserved by personality code */
u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
u8 RcvStnAddr; /* 1B4 Receive Station Addr */
u8 IdleState; /* 1B5 Idle State */
u8 IdleMonitor; /* 1B6 Idle Monitor */
u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
u8 StartXmitCmd; /* 1BC Start Xmit Command */
u8 HDLCConfigReg; /* 1BD Reserved */
u8 CauseCode; /* 1BE Cause code for fatal error */
u8 xchar; /* 1BF High priority send */
u32 reserved3; /* 1C0-1C3 Reserved */
u8 PrevCmdReg; /* 1C4 Reserved */
u8 CmdReg; /* 1C5 Command Register */
u8 async_config2; /* 1C6 Async Config Byte 2 */
u8 async_config3; /* 1C7 Async Config Byte 3 */
u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
u8 misc_flags; /* 1DD misc flags */
#define V2_HARDWARE 0x40
#define ICOM_HDW_ACTIVE 0x01
u8 call_length; /* 1DE Phone #/CFI buff ln */
u8 call_length2; /* 1DF Upper byte (unused) */
u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
u16 timer_value; /* 1E4-1E5 general timer value */
u8 timer_command; /* 1E6 general timer cmd */
u8 dce_command; /* 1E7 dce command reg */
u8 dce_cmd_status; /* 1E8 dce command stat */
u8 x21_r1_ioff; /* 1E9 dce ready counter */
u8 x21_r0_ioff; /* 1EA dce not ready ctr */
u8 x21_ralt_ioff; /* 1EB dce CNR counter */
u8 x21_r1_ion; /* 1EC dce ready I on ctr */
u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
u8 ier; /* 1EE Interrupt Enable */
u8 isr; /* 1EF Input Signal Reg */
u8 osr; /* 1F0 Output Signal Reg */
u8 reset; /* 1F1 Reset/Reload Reg */
u8 disable; /* 1F2 Disable Reg */
u8 sync; /* 1F3 Sync Reg */
u8 error_stat; /* 1F4 Error Status */
u8 cable_id; /* 1F5 Cable ID */
u8 cs_length; /* 1F6 CS Load Length */
u8 mac_length; /* 1F7 Mac Load Length */
u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
};
/*
* adapter defines and structures
*/
#define ICOM_CONTROL_START_A 0x00000008
#define ICOM_CONTROL_STOP_A 0x00000004
#define ICOM_CONTROL_START_B 0x00000002
#define ICOM_CONTROL_STOP_B 0x00000001
#define ICOM_CONTROL_START_C 0x00000008
#define ICOM_CONTROL_STOP_C 0x00000004
#define ICOM_CONTROL_START_D 0x00000002
#define ICOM_CONTROL_STOP_D 0x00000001
#define ICOM_IRAM_OFFSET 0x1000
#define ICOM_IRAM_SIZE 0x0C00
#define ICOM_DCE_IRAM_OFFSET 0x0A00
#define ICOM_CABLE_ID_VALID 0x01
#define ICOM_CABLE_ID_MASK 0xF0
#define ICOM_DISABLE 0x80
#define CMD_XMIT_RCV_ENABLE 0xC0
#define CMD_XMIT_ENABLE 0x40
#define CMD_RCV_DISABLE 0x00
#define CMD_RCV_ENABLE 0x80
#define CMD_RESTART 0x01
#define CMD_HOLD_XMIT 0x02
#define CMD_SND_BREAK 0x04
#define RS232_CABLE 0x06
#define V24_CABLE 0x0E
#define V35_CABLE 0x0C
#define V36_CABLE 0x02
#define NO_CABLE 0x00
#define START_DOWNLOAD 0x80
#define ICOM_INT_MASK_PRC_A 0x00003FFF
#define ICOM_INT_MASK_PRC_B 0x3FFF0000
#define ICOM_INT_MASK_PRC_C 0x00003FFF
#define ICOM_INT_MASK_PRC_D 0x3FFF0000
#define INT_RCV_COMPLETED 0x1000
#define INT_XMIT_COMPLETED 0x2000
#define INT_IDLE_DETECT 0x0800
#define INT_RCV_DISABLED 0x0400
#define INT_XMIT_DISABLED 0x0200
#define INT_RCV_XMIT_SHUTDOWN 0x0100
#define INT_FATAL_ERROR 0x0080
#define INT_CABLE_PULL 0x0020
#define INT_SIGNAL_CHANGE 0x0010
#define HDLC_PPP_PURE_ASYNC 0x02
#define HDLC_FF_FILL 0x00
#define HDLC_HDW_FLOW 0x01
#define START_XMIT 0x80
#define ICOM_ACFG_DRIVE1 0x20
#define ICOM_ACFG_NO_PARITY 0x00
#define ICOM_ACFG_PARITY_ENAB 0x02
#define ICOM_ACFG_PARITY_ODD 0x01
#define ICOM_ACFG_8BPC 0x00
#define ICOM_ACFG_7BPC 0x04
#define ICOM_ACFG_6BPC 0x08
#define ICOM_ACFG_5BPC 0x0C
#define ICOM_ACFG_1STOP_BIT 0x00
#define ICOM_ACFG_2STOP_BIT 0x10
#define ICOM_DTR 0x80
#define ICOM_RTS 0x40
#define ICOM_RI 0x08
#define ICOM_DSR 0x80
#define ICOM_DCD 0x20
#define ICOM_CTS 0x40
#define NUM_XBUFFS 1
#define NUM_RBUFFS 2
#define RCV_BUFF_SZ 0x0200
#define XMIT_BUFF_SZ 0x1000
struct statusArea {
/**********************************************/
/* Transmit Status Area */
/**********************************************/
struct xmit_status_area{
__le32 leNext; /* Next entry in Little Endian on Adapter */
__le32 leNextASD;
__le32 leBuffer; /* Buffer for entry in LE for Adapter */
__le16 leLengthASD;
__le16 leOffsetASD;
__le16 leLength; /* Length of data in segment */
__le16 flags;
#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
#define SA_FLAGS_READY_TO_XMIT 0x0800
#define SA_FLAGS_STAT_MASK 0x007F
} xmit[NUM_XBUFFS];
/**********************************************/
/* Receive Status Area */
/**********************************************/
struct {
__le32 leNext; /* Next entry in Little Endian on Adapter */
__le32 leNextASD;
__le32 leBuffer; /* Buffer for entry in LE for Adapter */
__le16 WorkingLength; /* size of segment */
__le16 reserv01;
__le16 leLength; /* Length of data in segment */
__le16 flags;
#define SA_FL_RCV_DONE 0x0010 /* Data ready */
#define SA_FLAGS_OVERRUN 0x0040
#define SA_FLAGS_PARITY_ERROR 0x0080
#define SA_FLAGS_FRAME_ERROR 0x0001
#define SA_FLAGS_FRAME_TRUNC 0x0002
#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
#define SA_FLAGS_RCV_MASK 0xFFE6
} rcv[NUM_RBUFFS];
};
struct icom_adapter;
#define ICOM_MAJOR 243
#define ICOM_MINOR_START 0
struct icom_port {
struct uart_port uart_port;
unsigned char cable_id;
unsigned char read_status_mask;
unsigned char ignore_status_mask;
void __iomem * int_reg;
struct icom_regs __iomem *global_reg;
struct func_dram __iomem *dram;
int port;
struct statusArea *statStg;
dma_addr_t statStg_pci;
__le32 *xmitRestart;
dma_addr_t xmitRestart_pci;
unsigned char *xmit_buf;
dma_addr_t xmit_buf_pci;
unsigned char *recv_buf;
dma_addr_t recv_buf_pci;
int next_rcv;
int status;
#define ICOM_PORT_ACTIVE 1 /* Port exists. */
#define ICOM_PORT_OFF 0 /* Port does not exist. */
struct icom_adapter *adapter;
};
struct icom_adapter {
void __iomem * base_addr;
unsigned long base_addr_pci;
struct pci_dev *pci_dev;
struct icom_port port_info[4];
int index;
int version;
#define ADAPTER_V1 0x0001
#define ADAPTER_V2 0x0002
u32 subsystem_id;
#define FOUR_PORT_MODEL 0x0252
#define V2_TWO_PORTS_RVX 0x021A
#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
int numb_ports;
struct list_head icom_adapter_entry;
struct kref kref;
};
/* prototype */
extern void iCom_sercons_init(void);
struct lookup_proc_table {
u32 __iomem *global_control_reg;
unsigned long processor_id;
};
struct lookup_int_table {
u32 __iomem *global_int_mask;
unsigned long processor_id;
};
static inline struct icom_port *to_icom_port(struct uart_port *port)
{
return container_of(port, struct icom_port, uart_port);
}
static const struct pci_device_id icom_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = ADAPTER_V1,
},
{
.vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
.subvendor = PCI_VENDOR_ID_IBM,
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
.driver_data = ADAPTER_V2,
},
{
.vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
.subvendor = PCI_VENDOR_ID_IBM,
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
.driver_data = ADAPTER_V2,
},
{
.vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
.subvendor = PCI_VENDOR_ID_IBM,
.subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
.driver_data = ADAPTER_V2,
},
{
.vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
.subvendor = PCI_VENDOR_ID_IBM,
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE,
.driver_data = ADAPTER_V2,
},
{}
};
static struct lookup_proc_table start_proc[4] = {
{NULL, ICOM_CONTROL_START_A},
{NULL, ICOM_CONTROL_START_B},
{NULL, ICOM_CONTROL_START_C},
{NULL, ICOM_CONTROL_START_D}
};
static struct lookup_proc_table stop_proc[4] = {
{NULL, ICOM_CONTROL_STOP_A},
{NULL, ICOM_CONTROL_STOP_B},
{NULL, ICOM_CONTROL_STOP_C},
{NULL, ICOM_CONTROL_STOP_D}
};
static struct lookup_int_table int_mask_tbl[4] = {
{NULL, ICOM_INT_MASK_PRC_A},
{NULL, ICOM_INT_MASK_PRC_B},
{NULL, ICOM_INT_MASK_PRC_C},
{NULL, ICOM_INT_MASK_PRC_D},
};
MODULE_DEVICE_TABLE(pci, icom_pci_table);
static LIST_HEAD(icom_adapter_head);
/* spinlock for adapter initialization and changing adapter operations */
static DEFINE_SPINLOCK(icom_lock);
#ifdef ICOM_TRACE
static inline void trace(struct icom_port *icom_port, char *trace_pt,
unsigned long trace_data)
{
dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
icom_port->port, trace_pt, trace_data);
}
#else
static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {};
#endif
static void icom_kref_release(struct kref *kref);
static void free_port_memory(struct icom_port *icom_port)
{
struct pci_dev *dev = icom_port->adapter->pci_dev;
trace(icom_port, "RET_PORT_MEM", 0);
if (icom_port->recv_buf) {
dma_free_coherent(&dev->dev, 4096, icom_port->recv_buf,
icom_port->recv_buf_pci);
icom_port->recv_buf = NULL;
}
if (icom_port->xmit_buf) {
dma_free_coherent(&dev->dev, 4096, icom_port->xmit_buf,
icom_port->xmit_buf_pci);
icom_port->xmit_buf = NULL;
}
if (icom_port->statStg) {
dma_free_coherent(&dev->dev, 4096, icom_port->statStg,
icom_port->statStg_pci);
icom_port->statStg = NULL;
}
if (icom_port->xmitRestart) {
dma_free_coherent(&dev->dev, 4096, icom_port->xmitRestart,
icom_port->xmitRestart_pci);
icom_port->xmitRestart = NULL;
}
}
static int get_port_memory(struct icom_port *icom_port)
{
int index;
unsigned long stgAddr;
unsigned long startStgAddr;
unsigned long offset;
struct pci_dev *dev = icom_port->adapter->pci_dev;
icom_port->xmit_buf =
dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmit_buf_pci,
GFP_KERNEL);
if (!icom_port->xmit_buf) {
dev_err(&dev->dev, "Can not allocate Transmit buffer\n");
return -ENOMEM;
}
trace(icom_port, "GET_PORT_MEM",
(unsigned long) icom_port->xmit_buf);
icom_port->recv_buf =
dma_alloc_coherent(&dev->dev, 4096, &icom_port->recv_buf_pci,
GFP_KERNEL);
if (!icom_port->recv_buf) {
dev_err(&dev->dev, "Can not allocate Receive buffer\n");
free_port_memory(icom_port);
return -ENOMEM;
}
trace(icom_port, "GET_PORT_MEM",
(unsigned long) icom_port->recv_buf);
icom_port->statStg =
dma_alloc_coherent(&dev->dev, 4096, &icom_port->statStg_pci,
GFP_KERNEL);
if (!icom_port->statStg) {
dev_err(&dev->dev, "Can not allocate Status buffer\n");
free_port_memory(icom_port);
return -ENOMEM;
}
trace(icom_port, "GET_PORT_MEM",
(unsigned long) icom_port->statStg);
icom_port->xmitRestart =
dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmitRestart_pci,
GFP_KERNEL);
if (!icom_port->xmitRestart) {
dev_err(&dev->dev,
"Can not allocate xmit Restart buffer\n");
free_port_memory(icom_port);
return -ENOMEM;
}
/* FODs: Frame Out Descriptor Queue, this is a FIFO queue that
indicates that frames are to be transmitted
*/
stgAddr = (unsigned long) icom_port->statStg;
for (index = 0; index < NUM_XBUFFS; index++) {
trace(icom_port, "FOD_ADDR", stgAddr);
stgAddr = stgAddr + sizeof(icom_port->statStg->xmit[0]);
if (index < (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_ADDR", stgAddr);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
icom_port->statStg->xmit[index].leBuffer =
cpu_to_le32(icom_port->xmit_buf_pci);
} else if (index == (NUM_XBUFFS - 1)) {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
icom_port->statStg->xmit[index].leLengthASD =
cpu_to_le16(XMIT_BUFF_SZ);
trace(icom_port, "FOD_XBUFF",
(unsigned long) icom_port->xmit_buf);
icom_port->statStg->xmit[index].leBuffer =
cpu_to_le32(icom_port->xmit_buf_pci);
} else {
memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
}
}
/* FIDs */
startStgAddr = stgAddr;
/* fill in every entry, even if no buffer */
for (index = 0; index < NUM_RBUFFS; index++) {
trace(icom_port, "FID_ADDR", stgAddr);
stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
icom_port->statStg->rcv[index].leLength = 0;
icom_port->statStg->rcv[index].WorkingLength =
cpu_to_le16(RCV_BUFF_SZ);
if (index < (NUM_RBUFFS - 1) ) {
offset = stgAddr - (unsigned long) icom_port->statStg;
icom_port->statStg->rcv[index].leNext =
cpu_to_le32(icom_port-> statStg_pci + offset);
trace(icom_port, "FID_RBUFF",
(unsigned long) icom_port->recv_buf);
icom_port->statStg->rcv[index].leBuffer =
cpu_to_le32(icom_port->recv_buf_pci);
} else if (index == (NUM_RBUFFS -1) ) {
offset = startStgAddr - (unsigned long) icom_port->statStg;
icom_port->statStg->rcv[index].leNext =
cpu_to_le32(icom_port-> statStg_pci + offset);
trace(icom_port, "FID_RBUFF",
(unsigned long) icom_port->recv_buf + 2048);
icom_port->statStg->rcv[index].leBuffer =
cpu_to_le32(icom_port->recv_buf_pci + 2048);
} else {
icom_port->statStg->rcv[index].leNext = 0;
icom_port->statStg->rcv[index].leBuffer = 0;
}
}
return 0;
}
static void stop_processor(struct icom_port *icom_port)
{
unsigned long temp;
unsigned long flags;
int port;
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
if (port >= ARRAY_SIZE(stop_proc)) {
dev_err(&icom_port->adapter->pci_dev->dev,
"Invalid port assignment\n");
goto unlock;
}
if (port == 0 || port == 1)
stop_proc[port].global_control_reg = &icom_port->global_reg->control;
else
stop_proc[port].global_control_reg = &icom_port->global_reg->control_2;
temp = readl(stop_proc[port].global_control_reg);
temp = (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
writel(temp, stop_proc[port].global_control_reg);
/* write flush */
readl(stop_proc[port].global_control_reg);
unlock:
spin_unlock_irqrestore(&icom_lock, flags);
}
static void start_processor(struct icom_port *icom_port)
{
unsigned long temp;
unsigned long flags;
int port;
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
if (port >= ARRAY_SIZE(start_proc)) {
dev_err(&icom_port->adapter->pci_dev->dev,
"Invalid port assignment\n");
goto unlock;
}
if (port == 0 || port == 1)
start_proc[port].global_control_reg = &icom_port->global_reg->control;
else
start_proc[port].global_control_reg = &icom_port->global_reg->control_2;
temp = readl(start_proc[port].global_control_reg);
temp = (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
writel(temp, start_proc[port].global_control_reg);
/* write flush */
readl(start_proc[port].global_control_reg);
unlock:
spin_unlock_irqrestore(&icom_lock, flags);
}
static void load_code(struct icom_port *icom_port)
{
const struct firmware *fw;
char __iomem *iram_ptr;
int index;
int status = 0;
void __iomem *dram_ptr = icom_port->dram;
dma_addr_t temp_pci;
unsigned char *new_page = NULL;
unsigned char cable_id = NO_CABLE;
struct pci_dev *dev = icom_port->adapter->pci_dev;
/* Clear out any pending interrupts */
writew(0x3FFF, icom_port->int_reg);
trace(icom_port, "CLEAR_INTERRUPTS", 0);
/* Stop processor */
stop_processor(icom_port);
/* Zero out DRAM */
memset_io(dram_ptr, 0, 512);
/* Load Call Setup into Adapter */
if (request_firmware(&fw, "icom_call_setup.bin", &dev->dev) < 0) {
dev_err(&dev->dev,"Unable to load icom_call_setup.bin firmware image\n");
status = -1;
goto load_code_exit;
}
if (fw->size > ICOM_DCE_IRAM_OFFSET) {
dev_err(&dev->dev, "Invalid firmware image for icom_call_setup.bin found.\n");
release_firmware(fw);
status = -1;
goto load_code_exit;
}
iram_ptr = (char __iomem *)icom_port->dram + ICOM_IRAM_OFFSET;
for (index = 0; index < fw->size; index++)
writeb(fw->data[index], &iram_ptr[index]);
release_firmware(fw);
/* Load Resident DCE portion of Adapter */
if (request_firmware(&fw, "icom_res_dce.bin", &dev->dev) < 0) {
dev_err(&dev->dev,"Unable to load icom_res_dce.bin firmware image\n");
status = -1;
goto load_code_exit;
}
if (fw->size > ICOM_IRAM_SIZE) {
dev_err(&dev->dev, "Invalid firmware image for icom_res_dce.bin found.\n");
release_firmware(fw);
status = -1;
goto load_code_exit;
}
iram_ptr = (char __iomem *) icom_port->dram + ICOM_IRAM_OFFSET;
for (index = ICOM_DCE_IRAM_OFFSET; index < fw->size; index++)
writeb(fw->data[index], &iram_ptr[index]);
release_firmware(fw);
/* Set Hardware level */
if (icom_port->adapter->version == ADAPTER_V2)
writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));
/* Start the processor in Adapter */
start_processor(icom_port);
writeb((HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL),
&(icom_port->dram->HDLCConfigReg));
writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */
writeb(0x00, &(icom_port->dram->CmdReg));
writeb(0x10, &(icom_port->dram->async_config3));
writeb((ICOM_ACFG_DRIVE1 | ICOM_ACFG_NO_PARITY | ICOM_ACFG_8BPC |
ICOM_ACFG_1STOP_BIT), &(icom_port->dram->async_config2));
/*Set up data in icom DRAM to indicate where personality
*code is located and its length.
*/
new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL);
if (!new_page) {
dev_err(&dev->dev, "Can not allocate DMA buffer\n");
status = -1;
goto load_code_exit;
}
if (request_firmware(&fw, "icom_asc.bin", &dev->dev) < 0) {
dev_err(&dev->dev,"Unable to load icom_asc.bin firmware image\n");
status = -1;
goto load_code_exit;
}
if (fw->size > ICOM_DCE_IRAM_OFFSET) {
dev_err(&dev->dev, "Invalid firmware image for icom_asc.bin found.\n");
release_firmware(fw);
status = -1;
goto load_code_exit;
}
for (index = 0; index < fw->size; index++)
new_page[index] = fw->data[index];
writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
writel(temp_pci, &icom_port->dram->mac_load_addr);
release_firmware(fw);
/*Setting the syncReg to 0x80 causes adapter to start downloading
the personality code into adapter instruction RAM.
Once code is loaded, it will begin executing and, based on
information provided above, will start DMAing data from
shared memory to adapter DRAM.
*/
/* the wait loop below verifies this write operation has been done
and processed
*/
writeb(START_DOWNLOAD, &icom_port->dram->sync);
/* Wait max 1 Sec for data download and processor to start */
for (index = 0; index < 10; index++) {
msleep(100);
if (readb(&icom_port->dram->misc_flags) & ICOM_HDW_ACTIVE)
break;
}
if (index == 10)
status = -1;
/*
* check Cable ID
*/
cable_id = readb(&icom_port->dram->cable_id);
if (cable_id & ICOM_CABLE_ID_VALID) {
/* Get cable ID into the lower 4 bits (standard form) */
cable_id = (cable_id & ICOM_CABLE_ID_MASK) >> 4;
icom_port->cable_id = cable_id;
} else {
dev_err(&dev->dev,"Invalid or no cable attached\n");
icom_port->cable_id = NO_CABLE;
}
load_code_exit:
if (status != 0) {
/* Clear out any pending interrupts */
writew(0x3FFF, icom_port->int_reg);
/* Turn off port */
writeb(ICOM_DISABLE, &(icom_port->dram->disable));
/* Stop processor */
stop_processor(icom_port);
dev_err(&icom_port->adapter->pci_dev->dev,"Port not operational\n");
}
if (new_page != NULL)
dma_free_coherent(&dev->dev, 4096, new_page, temp_pci);
}
static int startup(struct icom_port *icom_port)
{
unsigned long temp;
unsigned char cable_id, raw_cable_id;
unsigned long flags;
int port;
trace(icom_port, "STARTUP", 0);
if (!icom_port->dram) {
/* should NEVER be NULL */
dev_err(&icom_port->adapter->pci_dev->dev,
"Unusable Port, port configuration missing\n");
return -ENODEV;
}
/*
* check Cable ID
*/
raw_cable_id = readb(&icom_port->dram->cable_id);
trace(icom_port, "CABLE_ID", raw_cable_id);
/* Get cable ID into the lower 4 bits (standard form) */
cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4;
/* Check for valid Cable ID */
if (!(raw_cable_id & ICOM_CABLE_ID_VALID) ||
(cable_id != icom_port->cable_id)) {
/* reload adapter code, pick up any potential changes in cable id */
load_code(icom_port);
/* still no sign of cable, error out */
raw_cable_id = readb(&icom_port->dram->cable_id);
cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4;
if (!(raw_cable_id & ICOM_CABLE_ID_VALID) ||
(icom_port->cable_id == NO_CABLE))
return -EIO;
}
/*
* Finally, clear and enable interrupts
*/
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
if (port >= ARRAY_SIZE(int_mask_tbl)) {
dev_err(&icom_port->adapter->pci_dev->dev,
"Invalid port assignment\n");
goto unlock;
}
if (port == 0 || port == 1)
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
else
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2;
if (port == 0 || port == 2)
writew(0x00FF, icom_port->int_reg);
else
writew(0x3F00, icom_port->int_reg);
temp = readl(int_mask_tbl[port].global_int_mask);
writel(temp & ~int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
/* write flush */
readl(int_mask_tbl[port].global_int_mask);
unlock:
spin_unlock_irqrestore(&icom_lock, flags);
return 0;
}
static void shutdown(struct icom_port *icom_port)
{
unsigned long temp;
unsigned char cmdReg;
unsigned long flags;
int port;
spin_lock_irqsave(&icom_lock, flags);
trace(icom_port, "SHUTDOWN", 0);
/*
* disable all interrupts
*/
port = icom_port->port;
if (port >= ARRAY_SIZE(int_mask_tbl)) {
dev_err(&icom_port->adapter->pci_dev->dev,
"Invalid port assignment\n");
goto unlock;
}
if (port == 0 || port == 1)
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
else
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2;
temp = readl(int_mask_tbl[port].global_int_mask);
writel(temp | int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
/* write flush */
readl(int_mask_tbl[port].global_int_mask);
unlock:
spin_unlock_irqrestore(&icom_lock, flags);
/*
* disable break condition
*/
cmdReg = readb(&icom_port->dram->CmdReg);
if (cmdReg & CMD_SND_BREAK) {
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
}
static int icom_write(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned long data_count;
unsigned char cmdReg;
unsigned long offset;
int temp_tail = port->state->xmit.tail;
trace(icom_port, "WRITE", 0);
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT) {
trace(icom_port, "WRITE_FULL", 0);
return 0;
}
data_count = 0;
while ((port->state->xmit.head != temp_tail) &&
(data_count <= XMIT_BUFF_SZ)) {
icom_port->xmit_buf[data_count++] =
port->state->xmit.buf[temp_tail];
temp_tail++;
temp_tail &= (UART_XMIT_SIZE - 1);
}
if (data_count) {
icom_port->statStg->xmit[0].flags =
cpu_to_le16(SA_FLAGS_READY_TO_XMIT);
icom_port->statStg->xmit[0].leLength =
cpu_to_le16(data_count);
offset =
(unsigned long) &icom_port->statStg->xmit[0] -
(unsigned long) icom_port->statStg;
*icom_port->xmitRestart =
cpu_to_le32(icom_port->statStg_pci + offset);
cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg | CMD_XMIT_RCV_ENABLE,
&icom_port->dram->CmdReg);
writeb(START_XMIT, &icom_port->dram->StartXmitCmd);
trace(icom_port, "WRITE_START", data_count);
/* write flush */
readb(&icom_port->dram->StartXmitCmd);
}
return data_count;
}
static inline void check_modem_status(struct icom_port *icom_port)
{
static char old_status = 0;
char delta_status;
unsigned char status;
spin_lock(&icom_port->uart_port.lock);
/*modem input register */
status = readb(&icom_port->dram->isr);
trace(icom_port, "CHECK_MODEM", status);
delta_status = status ^ old_status;
if (delta_status) {
if (delta_status & ICOM_RI)
icom_port->uart_port.icount.rng++;
if (delta_status & ICOM_DSR)
icom_port->uart_port.icount.dsr++;
if (delta_status & ICOM_DCD)
uart_handle_dcd_change(&icom_port->uart_port,
delta_status & ICOM_DCD);
if (delta_status & ICOM_CTS)
uart_handle_cts_change(&icom_port->uart_port,
delta_status & ICOM_CTS);
wake_up_interruptible(&icom_port->uart_port.state->
port.delta_msr_wait);
old_status = status;
}
spin_unlock(&icom_port->uart_port.lock);
}
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
u16 count, i;
if (port_int_reg & (INT_XMIT_COMPLETED)) {
trace(icom_port, "XMIT_COMPLETE", 0);
/* clear buffer in use bit */
icom_port->statStg->xmit[0].flags &=
cpu_to_le16(~SA_FLAGS_READY_TO_XMIT);
count = le16_to_cpu(icom_port->statStg->xmit[0].leLength);
icom_port->uart_port.icount.tx += count;
for (i=0; i<count &&
!uart_circ_empty(&icom_port->uart_port.state->xmit); i++) {
icom_port->uart_port.state->xmit.tail++;
icom_port->uart_port.state->xmit.tail &=
(UART_XMIT_SIZE - 1);
}
if (!icom_write(&icom_port->uart_port))
/* activate write queue */
uart_write_wakeup(&icom_port->uart_port);
} else
trace(icom_port, "XMIT_DISABLED", 0);
}
static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
{
short int count, rcv_buff;
struct tty_port *port = &icom_port->uart_port.state->port;
u16 status;
struct uart_icount *icount;
unsigned long offset;
unsigned char flag;
trace(icom_port, "RCV_COMPLETE", 0);
rcv_buff = icom_port->next_rcv;
status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
while (status & SA_FL_RCV_DONE) {
int first = -1;
trace(icom_port, "FID_STATUS", status);
count = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].leLength);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
offset = le32_to_cpu(icom_port->statStg->rcv[rcv_buff].leBuffer) -
icom_port->recv_buf_pci;
/* Block copy all but the last byte as this may have status */
if (count > 0) {
first = icom_port->recv_buf[offset];
tty_insert_flip_string(port, icom_port->recv_buf + offset, count - 1);
}
icount = &icom_port->uart_port.icount;
icount->rx += count;
/* Break detect logic */
if ((status & SA_FLAGS_FRAME_ERROR)
&& first == 0) {
status &= ~SA_FLAGS_FRAME_ERROR;
status |= SA_FLAGS_BREAK_DET;
trace(icom_port, "BREAK_DET", 0);
}
flag = TTY_NORMAL;
if (status &
(SA_FLAGS_BREAK_DET | SA_FLAGS_PARITY_ERROR |
SA_FLAGS_FRAME_ERROR | SA_FLAGS_OVERRUN)) {
if (status & SA_FLAGS_BREAK_DET)
icount->brk++;
if (status & SA_FLAGS_PARITY_ERROR)
icount->parity++;
if (status & SA_FLAGS_FRAME_ERROR)
icount->frame++;
if (status & SA_FLAGS_OVERRUN)
icount->overrun++;
/*
* Now check to see if character should be
* ignored, and mask off conditions which
* should be ignored.
*/
if (status & icom_port->ignore_status_mask) {
trace(icom_port, "IGNORE_CHAR", 0);
goto ignore_char;
}
status &= icom_port->read_status_mask;
if (status & SA_FLAGS_BREAK_DET) {
flag = TTY_BREAK;
} else if (status & SA_FLAGS_PARITY_ERROR) {
trace(icom_port, "PARITY_ERROR", 0);
flag = TTY_PARITY;
} else if (status & SA_FLAGS_FRAME_ERROR)
flag = TTY_FRAME;
}
tty_insert_flip_char(port, *(icom_port->recv_buf + offset + count - 1), flag);
if (status & SA_FLAGS_OVERRUN)
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
icom_port->statStg->rcv[rcv_buff].WorkingLength =
cpu_to_le16(RCV_BUFF_SZ);
rcv_buff++;
if (rcv_buff == NUM_RBUFFS)
rcv_buff = 0;
status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
tty_flip_buffer_push(port);
}
static void process_interrupt(u16 port_int_reg,
struct icom_port *icom_port)
{
spin_lock(&icom_port->uart_port.lock);
trace(icom_port, "INTERRUPT", port_int_reg);
if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
xmit_interrupt(port_int_reg, icom_port);
if (port_int_reg & INT_RCV_COMPLETED)
recv_interrupt(port_int_reg, icom_port);
spin_unlock(&icom_port->uart_port.lock);
}
static irqreturn_t icom_interrupt(int irq, void *dev_id)
{
void __iomem * int_reg;
u32 adapter_interrupts;
u16 port_int_reg;
struct icom_adapter *icom_adapter;
struct icom_port *icom_port;
/* find icom_port for this interrupt */
icom_adapter = (struct icom_adapter *) dev_id;
if (icom_adapter->version == ADAPTER_V2) {
int_reg = icom_adapter->base_addr + 0x8024;
adapter_interrupts = readl(int_reg);
if (adapter_interrupts & 0x00003FFF) {
/* port 2 interrupt, NOTE: for all ADAPTER_V2, port 2 will be active */
icom_port = &icom_adapter->port_info[2];
port_int_reg = (u16) adapter_interrupts;
process_interrupt(port_int_reg, icom_port);
check_modem_status(icom_port);
}
if (adapter_interrupts & 0x3FFF0000) {
/* port 3 interrupt */
icom_port = &icom_adapter->port_info[3];
if (icom_port->status == ICOM_PORT_ACTIVE) {
port_int_reg =
(u16) (adapter_interrupts >> 16);
process_interrupt(port_int_reg, icom_port);
check_modem_status(icom_port);
}
}
/* Clear out any pending interrupts */
writel(adapter_interrupts, int_reg);
int_reg = icom_adapter->base_addr + 0x8004;
} else {
int_reg = icom_adapter->base_addr + 0x4004;
}
adapter_interrupts = readl(int_reg);
if (adapter_interrupts & 0x00003FFF) {
/* port 0 interrupt, NOTE: for all adapters, port 0 will be active */
icom_port = &icom_adapter->port_info[0];
port_int_reg = (u16) adapter_interrupts;
process_interrupt(port_int_reg, icom_port);
check_modem_status(icom_port);
}
if (adapter_interrupts & 0x3FFF0000) {
/* port 1 interrupt */
icom_port = &icom_adapter->port_info[1];
if (icom_port->status == ICOM_PORT_ACTIVE) {
port_int_reg = (u16) (adapter_interrupts >> 16);
process_interrupt(port_int_reg, icom_port);
check_modem_status(icom_port);
}
}
/* Clear out any pending interrupts */
writel(adapter_interrupts, int_reg);
/* flush the write */
adapter_interrupts = readl(int_reg);
return IRQ_HANDLED;
}
/*
* ------------------------------------------------------------------
* Begin serial-core API
* ------------------------------------------------------------------
*/
static unsigned int icom_tx_empty(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
int ret;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT)
ret = TIOCSER_TEMT;
else
ret = 0;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char local_osr;
trace(icom_port, "SET_MODEM", 0);
local_osr = readb(&icom_port->dram->osr);
if (mctrl & TIOCM_RTS) {
trace(icom_port, "RAISE_RTS", 0);
local_osr |= ICOM_RTS;
} else {
trace(icom_port, "LOWER_RTS", 0);
local_osr &= ~ICOM_RTS;
}
if (mctrl & TIOCM_DTR) {
trace(icom_port, "RAISE_DTR", 0);
local_osr |= ICOM_DTR;
} else {
trace(icom_port, "LOWER_DTR", 0);
local_osr &= ~ICOM_DTR;
}
writeb(local_osr, &icom_port->dram->osr);
}
static unsigned int icom_get_mctrl(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char status;
unsigned int result;
trace(icom_port, "GET_MODEM", 0);
status = readb(&icom_port->dram->isr);
result = ((status & ICOM_DCD) ? TIOCM_CAR : 0)
| ((status & ICOM_RI) ? TIOCM_RNG : 0)
| ((status & ICOM_DSR) ? TIOCM_DSR : 0)
| ((status & ICOM_CTS) ? TIOCM_CTS : 0);
return result;
}
static void icom_stop_tx(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
trace(icom_port, "STOP", 0);
cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg | CMD_HOLD_XMIT, &icom_port->dram->CmdReg);
}
static void icom_start_tx(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
trace(icom_port, "START", 0);
cmdReg = readb(&icom_port->dram->CmdReg);
if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT)
writeb(cmdReg & ~CMD_HOLD_XMIT,
&icom_port->dram->CmdReg);
icom_write(port);
}
static void icom_send_xchar(struct uart_port *port, char ch)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char xdata;
int index;
unsigned long flags;
trace(icom_port, "SEND_XCHAR", ch);
/* wait .1 sec to send char */
for (index = 0; index < 10; index++) {
spin_lock_irqsave(&port->lock, flags);
xdata = readb(&icom_port->dram->xchar);
if (xdata == 0x00) {
trace(icom_port, "QUICK_WRITE", 0);
writeb(ch, &icom_port->dram->xchar);
/* flush write operation */
xdata = readb(&icom_port->dram->xchar);
spin_unlock_irqrestore(&port->lock, flags);
break;
}
spin_unlock_irqrestore(&port->lock, flags);
msleep(10);
}
}
static void icom_stop_rx(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
}
static void icom_break(struct uart_port *port, int break_state)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
trace(icom_port, "BREAK", 0);
cmdReg = readb(&icom_port->dram->CmdReg);
if (break_state == -1) {
writeb(cmdReg | CMD_SND_BREAK, &icom_port->dram->CmdReg);
} else {
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static int icom_open(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
int retval;
kref_get(&icom_port->adapter->kref);
retval = startup(icom_port);
if (retval) {
kref_put(&icom_port->adapter->kref, icom_kref_release);
trace(icom_port, "STARTUP_ERROR", 0);
return retval;
}
return 0;
}
static void icom_close(struct uart_port *port)
{
struct icom_port *icom_port = to_icom_port(port);
unsigned char cmdReg;
trace(icom_port, "CLOSE", 0);
/* stop receiver */
cmdReg = readb(&icom_port->dram->CmdReg);
writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
shutdown(icom_port);
kref_put(&icom_port->adapter->kref, icom_kref_release);
}
static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old_termios)
{
struct icom_port *icom_port = to_icom_port(port);
int baud;
unsigned cflag, iflag;
char new_config2;
char new_config3 = 0;
char tmp_byte;
int index;
int rcv_buff, xmit_buff;
unsigned long offset;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
trace(icom_port, "CHANGE_SPEED", 0);
cflag = termios->c_cflag;
iflag = termios->c_iflag;
new_config2 = ICOM_ACFG_DRIVE1;
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5: /* 5 bits/char */
new_config2 |= ICOM_ACFG_5BPC;
break;
case CS6: /* 6 bits/char */
new_config2 |= ICOM_ACFG_6BPC;
break;
case CS7: /* 7 bits/char */
new_config2 |= ICOM_ACFG_7BPC;
break;
case CS8: /* 8 bits/char */
new_config2 |= ICOM_ACFG_8BPC;
break;
default:
break;
}
if (cflag & CSTOPB) {
/* 2 stop bits */
new_config2 |= ICOM_ACFG_2STOP_BIT;
}
if (cflag & PARENB) {
/* parity bit enabled */
new_config2 |= ICOM_ACFG_PARITY_ENAB;
trace(icom_port, "PARENB", 0);
}
if (cflag & PARODD) {
/* odd parity */
new_config2 |= ICOM_ACFG_PARITY_ODD;
trace(icom_port, "PARODD", 0);
}
/* Determine divisor based on baud rate */
baud = uart_get_baud_rate(port, termios, old_termios,
icom_acfg_baud[0],
icom_acfg_baud[BAUD_TABLE_LIMIT]);
if (!baud)
baud = 9600; /* B0 transition handled in rs_set_termios */
for (index = 0; index < BAUD_TABLE_LIMIT; index++) {
if (icom_acfg_baud[index] == baud) {
new_config3 = index;
break;
}
}
uart_update_timeout(port, cflag, baud);
/* CTS flow control flag and modem status interrupts */
tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
if (cflag & CRTSCTS)
tmp_byte |= HDLC_HDW_FLOW;
else
tmp_byte &= ~HDLC_HDW_FLOW;
writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
/*
* Set up parity check flag
*/
icom_port->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
if (iflag & INPCK)
icom_port->read_status_mask |=
SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR;
if ((iflag & BRKINT) || (iflag & PARMRK))
icom_port->read_status_mask |= SA_FLAGS_BREAK_DET;
/*
* Characters to ignore
*/
icom_port->ignore_status_mask = 0;
if (iflag & IGNPAR)
icom_port->ignore_status_mask |=
SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR;
if (iflag & IGNBRK) {
icom_port->ignore_status_mask |= SA_FLAGS_BREAK_DET;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (iflag & IGNPAR)
icom_port->ignore_status_mask |= SA_FLAGS_OVERRUN;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((cflag & CREAD) == 0)
icom_port->ignore_status_mask |= SA_FL_RCV_DONE;
/* Turn off Receiver to prepare for reset */
writeb(CMD_RCV_DISABLE, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
if (readb(&icom_port->dram->PrevCmdReg) == 0x00) {
break;
}
}
/* clear all current buffers of data */
for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) {
icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
icom_port->statStg->rcv[rcv_buff].WorkingLength =
cpu_to_le16(RCV_BUFF_SZ);
}
for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) {
icom_port->statStg->xmit[xmit_buff].flags = 0;
}
/* activate changes and start xmit and receiver here */
/* Enable the receiver */
writeb(new_config3, &(icom_port->dram->async_config3));
writeb(new_config2, &(icom_port->dram->async_config2));
tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL;
writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */
writeb(0xFF, &(icom_port->dram->ier)); /* enable modem signal interrupts */
/* reset processor */
writeb(CMD_RESTART, &icom_port->dram->CmdReg);
for (index = 0; index < 10; index++) {
if (readb(&icom_port->dram->CmdReg) == 0x00) {
break;
}
}
/* Enable Transmitter and Receiver */
offset =
(unsigned long) &icom_port->statStg->rcv[0] -
(unsigned long) icom_port->statStg;
writel(icom_port->statStg_pci + offset,
&icom_port->dram->RcvStatusAddr);
icom_port->next_rcv = 0;
*icom_port->xmitRestart = 0;
writel(icom_port->xmitRestart_pci,
&icom_port->dram->XmitStatusAddr);
trace(icom_port, "XR_ENAB", 0);
writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *icom_type(struct uart_port *port)
{
return "icom";
}
static void icom_config_port(struct uart_port *port, int flags)
{
port->type = PORT_ICOM;
}
static const struct uart_ops icom_ops = {
.tx_empty = icom_tx_empty,
.set_mctrl = icom_set_mctrl,
.get_mctrl = icom_get_mctrl,
.stop_tx = icom_stop_tx,
.start_tx = icom_start_tx,
.send_xchar = icom_send_xchar,
.stop_rx = icom_stop_rx,
.break_ctl = icom_break,
.startup = icom_open,
.shutdown = icom_close,
.set_termios = icom_set_termios,
.type = icom_type,
.config_port = icom_config_port,
};
#define ICOM_CONSOLE NULL
static struct uart_driver icom_uart_driver = {
.owner = THIS_MODULE,
.driver_name = ICOM_DRIVER_NAME,
.dev_name = "ttyA",
.major = ICOM_MAJOR,
.minor = ICOM_MINOR_START,
.nr = NR_PORTS,
.cons = ICOM_CONSOLE,
};
static int icom_init_ports(struct icom_adapter *icom_adapter)
{
u32 subsystem_id = icom_adapter->subsystem_id;
int i;
struct icom_port *icom_port;
if (icom_adapter->version == ADAPTER_V1) {
icom_adapter->numb_ports = 2;
for (i = 0; i < 2; i++) {
icom_port = &icom_adapter->port_info[i];
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
}
} else {
if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) {
icom_adapter->numb_ports = 4;
for (i = 0; i < 4; i++) {
icom_port = &icom_adapter->port_info[i];
icom_port->port = i;
icom_port->status = ICOM_PORT_ACTIVE;
}
} else {
icom_adapter->numb_ports = 4;
icom_adapter->port_info[0].port = 0;
icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE;
icom_adapter->port_info[1].status = ICOM_PORT_OFF;
icom_adapter->port_info[2].port = 2;
icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE;
icom_adapter->port_info[3].status = ICOM_PORT_OFF;
}
}
return 0;
}
static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *icom_adapter, int port_num)
{
if (icom_adapter->version == ADAPTER_V1) {
icom_port->global_reg = icom_adapter->base_addr + 0x4000;
icom_port->int_reg = icom_adapter->base_addr +
0x4004 + 2 - 2 * port_num;
} else {
icom_port->global_reg = icom_adapter->base_addr + 0x8000;
if (icom_port->port < 2)
icom_port->int_reg = icom_adapter->base_addr +
0x8004 + 2 - 2 * icom_port->port;
else
icom_port->int_reg = icom_adapter->base_addr +
0x8024 + 2 - 2 * (icom_port->port - 2);
}
}
static int icom_load_ports(struct icom_adapter *icom_adapter)
{
struct icom_port *icom_port;
int port_num;
for (port_num = 0; port_num < icom_adapter->numb_ports; port_num++) {
icom_port = &icom_adapter->port_info[port_num];
if (icom_port->status == ICOM_PORT_ACTIVE) {
icom_port_active(icom_port, icom_adapter, port_num);
icom_port->dram = icom_adapter->base_addr +
0x2000 * icom_port->port;
icom_port->adapter = icom_adapter;
/* get port memory */
if (get_port_memory(icom_port) != 0) {
dev_err(&icom_port->adapter->pci_dev->dev,
"Memory allocation for port FAILED\n");
}
}
}
return 0;
}
static int icom_alloc_adapter(struct icom_adapter
**icom_adapter_ref)
{
int adapter_count = 0;
struct icom_adapter *icom_adapter;
struct icom_adapter *cur_adapter_entry;
icom_adapter = kzalloc(sizeof(struct icom_adapter), GFP_KERNEL);
if (!icom_adapter) {
return -ENOMEM;
}
list_for_each_entry(cur_adapter_entry, &icom_adapter_head,
icom_adapter_entry) {
if (cur_adapter_entry->index != adapter_count) {
break;
}
adapter_count++;
}
icom_adapter->index = adapter_count;
list_add_tail(&icom_adapter->icom_adapter_entry,
&cur_adapter_entry->icom_adapter_entry);
*icom_adapter_ref = icom_adapter;
return 0;
}
static void icom_free_adapter(struct icom_adapter *icom_adapter)
{
list_del(&icom_adapter->icom_adapter_entry);
kfree(icom_adapter);
}
static void icom_kref_release(struct kref *kref)
{
struct icom_adapter *icom_adapter = container_of(kref,
struct icom_adapter, kref);
struct icom_port *icom_port;
int index;
for (index = 0; index < icom_adapter->numb_ports; index++) {
icom_port = &icom_adapter->port_info[index];
if (icom_port->status == ICOM_PORT_ACTIVE) {
dev_info(&icom_adapter->pci_dev->dev,
"Device removed\n");
uart_remove_one_port(&icom_uart_driver,
&icom_port->uart_port);
/* be sure that DTR and RTS are dropped */
writeb(0x00, &icom_port->dram->osr);
/* Wait 0.1 Sec for simple Init to complete */
msleep(100);
/* Stop proccessor */
stop_processor(icom_port);
free_port_memory(icom_port);
}
}
free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
iounmap(icom_adapter->base_addr);
pci_release_regions(icom_adapter->pci_dev);
icom_free_adapter(icom_adapter);
}
static int icom_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int index;
unsigned int command_reg;
int retval;
struct icom_adapter *icom_adapter;
struct icom_port *icom_port;
retval = pci_enable_device(dev);
if (retval) {
dev_err(&dev->dev, "Device enable FAILED\n");
return retval;
}
retval = pci_request_regions(dev, "icom");
if (retval) {
dev_err(&dev->dev, "pci_request_regions FAILED\n");
pci_disable_device(dev);
return retval;
}
pci_set_master(dev);
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
goto probe_exit0;
}
pci_write_config_dword(dev, PCI_COMMAND,
command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
if (ent->driver_data == ADAPTER_V1) {
pci_write_config_dword(dev, 0x44, 0x8300830A);
} else {
pci_write_config_dword(dev, 0x44, 0x42004200);
pci_write_config_dword(dev, 0x48, 0x42004200);
}
retval = icom_alloc_adapter(&icom_adapter);
if (retval) {
dev_err(&dev->dev, "icom_alloc_adapter FAILED\n");
retval = -EIO;
goto probe_exit0;
}
icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
icom_adapter->pci_dev = dev;
icom_adapter->version = ent->driver_data;
icom_adapter->subsystem_id = ent->subdevice;
retval = icom_init_ports(icom_adapter);
if (retval) {
dev_err(&dev->dev, "Port configuration failed\n");
goto probe_exit1;
}
icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
if (!icom_adapter->base_addr) {
retval = -ENOMEM;
goto probe_exit1;
}
/* save off irq and request irq line */
retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, (void *)icom_adapter);
if (retval) {
goto probe_exit2;
}
retval = icom_load_ports(icom_adapter);
for (index = 0; index < icom_adapter->numb_ports; index++) {
icom_port = &icom_adapter->port_info[index];
if (icom_port->status == ICOM_PORT_ACTIVE) {
icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq;
icom_port->uart_port.type = PORT_ICOM;
icom_port->uart_port.iotype = UPIO_MEM;
icom_port->uart_port.membase =
(unsigned char __iomem *)icom_adapter->base_addr_pci;
icom_port->uart_port.fifosize = 16;
icom_port->uart_port.ops = &icom_ops;
icom_port->uart_port.line =
icom_port->port + icom_adapter->index * 4;
if (uart_add_one_port (&icom_uart_driver, &icom_port->uart_port)) {
icom_port->status = ICOM_PORT_OFF;
dev_err(&dev->dev, "Device add failed\n");
} else
dev_info(&dev->dev, "Device added\n");
}
}
kref_init(&icom_adapter->kref);
return 0;
probe_exit2:
iounmap(icom_adapter->base_addr);
probe_exit1:
icom_free_adapter(icom_adapter);
probe_exit0:
pci_release_regions(dev);
pci_disable_device(dev);
return retval;
}
static void icom_remove(struct pci_dev *dev)
{
struct icom_adapter *icom_adapter;
list_for_each_entry(icom_adapter, &icom_adapter_head,
icom_adapter_entry) {
if (icom_adapter->pci_dev == dev) {
kref_put(&icom_adapter->kref, icom_kref_release);
return;
}
}
dev_err(&dev->dev, "Unable to find device to remove\n");
}
static struct pci_driver icom_pci_driver = {
.name = ICOM_DRIVER_NAME,
.id_table = icom_pci_table,
.probe = icom_probe,
.remove = icom_remove,
};
static int __init icom_init(void)
{
int ret;
ret = uart_register_driver(&icom_uart_driver);
if (ret)
return ret;
ret = pci_register_driver(&icom_pci_driver);
if (ret < 0)
uart_unregister_driver(&icom_uart_driver);
return ret;
}
static void __exit icom_exit(void)
{
pci_unregister_driver(&icom_pci_driver);
uart_unregister_driver(&icom_uart_driver);
}
module_init(icom_init);
module_exit(icom_exit);
MODULE_AUTHOR("Michael Anderson <[email protected]>");
MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("icom_call_setup.bin");
MODULE_FIRMWARE("icom_res_dce.bin");
MODULE_FIRMWARE("icom_asc.bin");
| linux-master | drivers/tty/serial/icom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MEN 16z135 High Speed UART
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/serial_core.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/tty_flip.h>
#include <linux/bitops.h>
#include <linux/mcb.h>
#define MEN_Z135_MAX_PORTS 12
#define MEN_Z135_BASECLK 29491200
#define MEN_Z135_FIFO_SIZE 1024
#define MEN_Z135_FIFO_WATERMARK 1020
#define MEN_Z135_STAT_REG 0x0
#define MEN_Z135_RX_RAM 0x4
#define MEN_Z135_TX_RAM 0x400
#define MEN_Z135_RX_CTRL 0x800
#define MEN_Z135_TX_CTRL 0x804
#define MEN_Z135_CONF_REG 0x808
#define MEN_Z135_UART_FREQ 0x80c
#define MEN_Z135_BAUD_REG 0x810
#define MEN_Z135_TIMEOUT 0x814
#define IRQ_ID(x) ((x) & 0x1f)
#define MEN_Z135_IER_RXCIEN BIT(0) /* RX Space IRQ */
#define MEN_Z135_IER_TXCIEN BIT(1) /* TX Space IRQ */
#define MEN_Z135_IER_RLSIEN BIT(2) /* Receiver Line Status IRQ */
#define MEN_Z135_IER_MSIEN BIT(3) /* Modem Status IRQ */
#define MEN_Z135_ALL_IRQS (MEN_Z135_IER_RXCIEN \
| MEN_Z135_IER_RLSIEN \
| MEN_Z135_IER_MSIEN \
| MEN_Z135_IER_TXCIEN)
#define MEN_Z135_MCR_DTR BIT(24)
#define MEN_Z135_MCR_RTS BIT(25)
#define MEN_Z135_MCR_OUT1 BIT(26)
#define MEN_Z135_MCR_OUT2 BIT(27)
#define MEN_Z135_MCR_LOOP BIT(28)
#define MEN_Z135_MCR_RCFC BIT(29)
#define MEN_Z135_MSR_DCTS BIT(0)
#define MEN_Z135_MSR_DDSR BIT(1)
#define MEN_Z135_MSR_DRI BIT(2)
#define MEN_Z135_MSR_DDCD BIT(3)
#define MEN_Z135_MSR_CTS BIT(4)
#define MEN_Z135_MSR_DSR BIT(5)
#define MEN_Z135_MSR_RI BIT(6)
#define MEN_Z135_MSR_DCD BIT(7)
#define MEN_Z135_LCR_SHIFT 8 /* LCR shift mask */
#define MEN_Z135_WL5 0 /* CS5 */
#define MEN_Z135_WL6 1 /* CS6 */
#define MEN_Z135_WL7 2 /* CS7 */
#define MEN_Z135_WL8 3 /* CS8 */
#define MEN_Z135_STB_SHIFT 2 /* Stopbits */
#define MEN_Z135_NSTB1 0
#define MEN_Z135_NSTB2 1
#define MEN_Z135_PEN_SHIFT 3 /* Parity enable */
#define MEN_Z135_PAR_DIS 0
#define MEN_Z135_PAR_ENA 1
#define MEN_Z135_PTY_SHIFT 4 /* Parity type */
#define MEN_Z135_PTY_ODD 0
#define MEN_Z135_PTY_EVN 1
#define MEN_Z135_LSR_DR BIT(0)
#define MEN_Z135_LSR_OE BIT(1)
#define MEN_Z135_LSR_PE BIT(2)
#define MEN_Z135_LSR_FE BIT(3)
#define MEN_Z135_LSR_BI BIT(4)
#define MEN_Z135_LSR_THEP BIT(5)
#define MEN_Z135_LSR_TEXP BIT(6)
#define MEN_Z135_LSR_RXFIFOERR BIT(7)
#define MEN_Z135_IRQ_ID_RLS BIT(0)
#define MEN_Z135_IRQ_ID_RDA BIT(1)
#define MEN_Z135_IRQ_ID_CTI BIT(2)
#define MEN_Z135_IRQ_ID_TSA BIT(3)
#define MEN_Z135_IRQ_ID_MST BIT(4)
#define LCR(x) (((x) >> MEN_Z135_LCR_SHIFT) & 0xff)
#define BYTES_TO_ALIGN(x) ((x) & 0x3)
static int line;
static int txlvl = 5;
module_param(txlvl, int, S_IRUGO);
MODULE_PARM_DESC(txlvl, "TX IRQ trigger level 0-7, default 5 (128 byte)");
static int rxlvl = 6;
module_param(rxlvl, int, S_IRUGO);
MODULE_PARM_DESC(rxlvl, "RX IRQ trigger level 0-7, default 6 (256 byte)");
static int align;
module_param(align, int, S_IRUGO);
MODULE_PARM_DESC(align, "Keep hardware FIFO write pointer aligned, default 0");
static uint rx_timeout;
module_param(rx_timeout, uint, S_IRUGO);
MODULE_PARM_DESC(rx_timeout, "RX timeout. "
"Timeout in seconds = (timeout_reg * baud_reg * 4) / freq_reg");
struct men_z135_port {
struct uart_port port;
struct mcb_device *mdev;
struct resource *mem;
unsigned char *rxbuf;
u32 stat_reg;
spinlock_t lock;
bool automode;
};
#define to_men_z135(port) container_of((port), struct men_z135_port, port)
/**
* men_z135_reg_set() - Set value in register
* @uart: The UART port
* @addr: Register address
* @val: value to set
*/
static inline void men_z135_reg_set(struct men_z135_port *uart,
u32 addr, u32 val)
{
struct uart_port *port = &uart->port;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&uart->lock, flags);
reg = ioread32(port->membase + addr);
reg |= val;
iowrite32(reg, port->membase + addr);
spin_unlock_irqrestore(&uart->lock, flags);
}
/**
* men_z135_reg_clr() - Unset value in register
* @uart: The UART port
* @addr: Register address
* @val: value to clear
*/
static void men_z135_reg_clr(struct men_z135_port *uart,
u32 addr, u32 val)
{
struct uart_port *port = &uart->port;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&uart->lock, flags);
reg = ioread32(port->membase + addr);
reg &= ~val;
iowrite32(reg, port->membase + addr);
spin_unlock_irqrestore(&uart->lock, flags);
}
/**
* men_z135_handle_modem_status() - Handle change of modem status
* @uart: The UART port
*
* Handle change of modem status register. This is done by reading the "delta"
* versions of DCD (Data Carrier Detect) and CTS (Clear To Send).
*/
static void men_z135_handle_modem_status(struct men_z135_port *uart)
{
u8 msr;
msr = (uart->stat_reg >> 8) & 0xff;
if (msr & MEN_Z135_MSR_DDCD)
uart_handle_dcd_change(&uart->port,
msr & MEN_Z135_MSR_DCD);
if (msr & MEN_Z135_MSR_DCTS)
uart_handle_cts_change(&uart->port,
msr & MEN_Z135_MSR_CTS);
}
static void men_z135_handle_lsr(struct men_z135_port *uart)
{
struct uart_port *port = &uart->port;
u8 lsr;
lsr = (uart->stat_reg >> 16) & 0xff;
if (lsr & MEN_Z135_LSR_OE)
port->icount.overrun++;
if (lsr & MEN_Z135_LSR_PE)
port->icount.parity++;
if (lsr & MEN_Z135_LSR_FE)
port->icount.frame++;
if (lsr & MEN_Z135_LSR_BI) {
port->icount.brk++;
uart_handle_break(port);
}
}
/**
* get_rx_fifo_content() - Get the number of bytes in RX FIFO
* @uart: The UART port
*
* Read RXC register from hardware and return current FIFO fill size.
*/
static u16 get_rx_fifo_content(struct men_z135_port *uart)
{
struct uart_port *port = &uart->port;
u32 stat_reg;
u16 rxc;
u8 rxc_lo;
u8 rxc_hi;
stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
rxc_lo = stat_reg >> 24;
rxc_hi = (stat_reg & 0xC0) >> 6;
rxc = rxc_lo | (rxc_hi << 8);
return rxc;
}
/**
* men_z135_handle_rx() - RX tasklet routine
* @uart: Pointer to struct men_z135_port
*
* Copy from RX FIFO and acknowledge number of bytes copied.
*/
static void men_z135_handle_rx(struct men_z135_port *uart)
{
struct uart_port *port = &uart->port;
struct tty_port *tport = &port->state->port;
int copied;
u16 size;
int room;
size = get_rx_fifo_content(uart);
if (size == 0)
return;
/* Avoid accidently accessing TX FIFO instead of RX FIFO. Last
* longword in RX FIFO cannot be read.(0x004-0x3FF)
*/
if (size > MEN_Z135_FIFO_WATERMARK)
size = MEN_Z135_FIFO_WATERMARK;
room = tty_buffer_request_room(tport, size);
if (room != size)
dev_warn(&uart->mdev->dev,
"Not enough room in flip buffer, truncating to %d\n",
room);
if (room == 0)
return;
memcpy_fromio(uart->rxbuf, port->membase + MEN_Z135_RX_RAM, room);
/* Be sure to first copy all data and then acknowledge it */
mb();
iowrite32(room, port->membase + MEN_Z135_RX_CTRL);
copied = tty_insert_flip_string(tport, uart->rxbuf, room);
if (copied != room)
dev_warn(&uart->mdev->dev,
"Only copied %d instead of %d bytes\n",
copied, room);
port->icount.rx += copied;
tty_flip_buffer_push(tport);
}
/**
* men_z135_handle_tx() - TX tasklet routine
* @uart: Pointer to struct men_z135_port
*
*/
static void men_z135_handle_tx(struct men_z135_port *uart)
{
struct uart_port *port = &uart->port;
struct circ_buf *xmit = &port->state->xmit;
u32 txc;
u32 wptr;
int qlen;
int n;
int txfree;
int head;
int tail;
int s;
if (uart_circ_empty(xmit))
goto out;
if (uart_tx_stopped(port))
goto out;
if (port->x_char)
goto out;
/* calculate bytes to copy */
qlen = uart_circ_chars_pending(xmit);
if (qlen <= 0)
goto out;
wptr = ioread32(port->membase + MEN_Z135_TX_CTRL);
txc = (wptr >> 16) & 0x3ff;
wptr &= 0x3ff;
if (txc > MEN_Z135_FIFO_WATERMARK)
txc = MEN_Z135_FIFO_WATERMARK;
txfree = MEN_Z135_FIFO_WATERMARK - txc;
if (txfree <= 0) {
dev_err(&uart->mdev->dev,
"Not enough room in TX FIFO have %d, need %d\n",
txfree, qlen);
goto irq_en;
}
/* if we're not aligned, it's better to copy only 1 or 2 bytes and
* then the rest.
*/
if (align && qlen >= 3 && BYTES_TO_ALIGN(wptr))
n = 4 - BYTES_TO_ALIGN(wptr);
else if (qlen > txfree)
n = txfree;
else
n = qlen;
if (n <= 0)
goto irq_en;
head = xmit->head & (UART_XMIT_SIZE - 1);
tail = xmit->tail & (UART_XMIT_SIZE - 1);
s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
n = min(n, s);
memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n);
iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL);
uart_xmit_advance(port, n);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
irq_en:
if (!uart_circ_empty(xmit))
men_z135_reg_set(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN);
else
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN);
out:
return;
}
/**
* men_z135_intr() - Handle legacy IRQs
* @irq: The IRQ number
* @data: Pointer to UART port
*
* Check IIR register to find the cause of the interrupt and handle it.
* It is possible that multiple interrupts reason bits are set and reading
* the IIR is a destructive read, so we always need to check for all possible
* interrupts and handle them.
*/
static irqreturn_t men_z135_intr(int irq, void *data)
{
struct men_z135_port *uart = (struct men_z135_port *)data;
struct uart_port *port = &uart->port;
bool handled = false;
int irq_id;
uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
irq_id = IRQ_ID(uart->stat_reg);
if (!irq_id)
goto out;
spin_lock(&port->lock);
/* It's save to write to IIR[7:6] RXC[9:8] */
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
if (irq_id & MEN_Z135_IRQ_ID_RLS) {
men_z135_handle_lsr(uart);
handled = true;
}
if (irq_id & (MEN_Z135_IRQ_ID_RDA | MEN_Z135_IRQ_ID_CTI)) {
if (irq_id & MEN_Z135_IRQ_ID_CTI)
dev_dbg(&uart->mdev->dev, "Character Timeout Indication\n");
men_z135_handle_rx(uart);
handled = true;
}
if (irq_id & MEN_Z135_IRQ_ID_TSA) {
men_z135_handle_tx(uart);
handled = true;
}
if (irq_id & MEN_Z135_IRQ_ID_MST) {
men_z135_handle_modem_status(uart);
handled = true;
}
spin_unlock(&port->lock);
out:
return IRQ_RETVAL(handled);
}
/**
* men_z135_request_irq() - Request IRQ for 16z135 core
* @uart: z135 private uart port structure
*
* Request an IRQ for 16z135 to use. First try using MSI, if it fails
* fall back to using legacy interrupts.
*/
static int men_z135_request_irq(struct men_z135_port *uart)
{
struct device *dev = &uart->mdev->dev;
struct uart_port *port = &uart->port;
int err = 0;
err = request_irq(port->irq, men_z135_intr, IRQF_SHARED,
"men_z135_intr", uart);
if (err)
dev_err(dev, "Error %d getting interrupt\n", err);
return err;
}
/**
* men_z135_tx_empty() - Handle tx_empty call
* @port: The UART port
*
* This function tests whether the TX FIFO and shifter for the port
* described by @port is empty.
*/
static unsigned int men_z135_tx_empty(struct uart_port *port)
{
u32 wptr;
u16 txc;
wptr = ioread32(port->membase + MEN_Z135_TX_CTRL);
txc = (wptr >> 16) & 0x3ff;
if (txc == 0)
return TIOCSER_TEMT;
else
return 0;
}
/**
* men_z135_set_mctrl() - Set modem control lines
* @port: The UART port
* @mctrl: The modem control lines
*
* This function sets the modem control lines for a port described by @port
* to the state described by @mctrl
*/
static void men_z135_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 old;
u32 conf_reg;
conf_reg = old = ioread32(port->membase + MEN_Z135_CONF_REG);
if (mctrl & TIOCM_RTS)
conf_reg |= MEN_Z135_MCR_RTS;
else
conf_reg &= ~MEN_Z135_MCR_RTS;
if (mctrl & TIOCM_DTR)
conf_reg |= MEN_Z135_MCR_DTR;
else
conf_reg &= ~MEN_Z135_MCR_DTR;
if (mctrl & TIOCM_OUT1)
conf_reg |= MEN_Z135_MCR_OUT1;
else
conf_reg &= ~MEN_Z135_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
conf_reg |= MEN_Z135_MCR_OUT2;
else
conf_reg &= ~MEN_Z135_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
conf_reg |= MEN_Z135_MCR_LOOP;
else
conf_reg &= ~MEN_Z135_MCR_LOOP;
if (conf_reg != old)
iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG);
}
/**
* men_z135_get_mctrl() - Get modem control lines
* @port: The UART port
*
* Retruns the current state of modem control inputs.
*/
static unsigned int men_z135_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = 0;
u8 msr;
msr = ioread8(port->membase + MEN_Z135_STAT_REG + 1);
if (msr & MEN_Z135_MSR_CTS)
mctrl |= TIOCM_CTS;
if (msr & MEN_Z135_MSR_DSR)
mctrl |= TIOCM_DSR;
if (msr & MEN_Z135_MSR_RI)
mctrl |= TIOCM_RI;
if (msr & MEN_Z135_MSR_DCD)
mctrl |= TIOCM_CAR;
return mctrl;
}
/**
* men_z135_stop_tx() - Stop transmitting characters
* @port: The UART port
*
* Stop transmitting characters. This might be due to CTS line becomming
* inactive or the tty layer indicating we want to stop transmission due to
* an XOFF character.
*/
static void men_z135_stop_tx(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN);
}
/*
* men_z135_disable_ms() - Disable Modem Status
* port: The UART port
*
* Enable Modem Status IRQ.
*/
static void men_z135_disable_ms(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_MSIEN);
}
/**
* men_z135_start_tx() - Start transmitting characters
* @port: The UART port
*
* Start transmitting character. This actually doesn't transmit anything, but
* fires off the TX tasklet.
*/
static void men_z135_start_tx(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
if (uart->automode)
men_z135_disable_ms(port);
men_z135_handle_tx(uart);
}
/**
* men_z135_stop_rx() - Stop receiving characters
* @port: The UART port
*
* Stop receiving characters; the port is in the process of being closed.
*/
static void men_z135_stop_rx(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_RXCIEN);
}
/**
* men_z135_enable_ms() - Enable Modem Status
* @port: the port
*
* Enable Modem Status IRQ.
*/
static void men_z135_enable_ms(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
men_z135_reg_set(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_MSIEN);
}
static int men_z135_startup(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
int err;
u32 conf_reg = 0;
err = men_z135_request_irq(uart);
if (err)
return -ENODEV;
conf_reg = ioread32(port->membase + MEN_Z135_CONF_REG);
/* Activate all but TX space available IRQ */
conf_reg |= MEN_Z135_ALL_IRQS & ~MEN_Z135_IER_TXCIEN;
conf_reg &= ~(0xff << 16);
conf_reg |= (txlvl << 16);
conf_reg |= (rxlvl << 20);
iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG);
if (rx_timeout)
iowrite32(rx_timeout, port->membase + MEN_Z135_TIMEOUT);
return 0;
}
static void men_z135_shutdown(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
u32 conf_reg = 0;
conf_reg |= MEN_Z135_ALL_IRQS;
men_z135_reg_clr(uart, MEN_Z135_CONF_REG, conf_reg);
free_irq(uart->port.irq, uart);
}
static void men_z135_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct men_z135_port *uart = to_men_z135(port);
unsigned int baud;
u32 conf_reg;
u32 bd_reg;
u32 uart_freq;
u8 lcr;
conf_reg = ioread32(port->membase + MEN_Z135_CONF_REG);
lcr = LCR(conf_reg);
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr |= MEN_Z135_WL5;
break;
case CS6:
lcr |= MEN_Z135_WL6;
break;
case CS7:
lcr |= MEN_Z135_WL7;
break;
case CS8:
lcr |= MEN_Z135_WL8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
lcr |= MEN_Z135_NSTB2 << MEN_Z135_STB_SHIFT;
/* parity */
if (termios->c_cflag & PARENB) {
lcr |= MEN_Z135_PAR_ENA << MEN_Z135_PEN_SHIFT;
if (termios->c_cflag & PARODD)
lcr |= MEN_Z135_PTY_ODD << MEN_Z135_PTY_SHIFT;
else
lcr |= MEN_Z135_PTY_EVN << MEN_Z135_PTY_SHIFT;
} else
lcr |= MEN_Z135_PAR_DIS << MEN_Z135_PEN_SHIFT;
conf_reg |= MEN_Z135_IER_MSIEN;
if (termios->c_cflag & CRTSCTS) {
conf_reg |= MEN_Z135_MCR_RCFC;
uart->automode = true;
termios->c_cflag &= ~CLOCAL;
} else {
conf_reg &= ~MEN_Z135_MCR_RCFC;
uart->automode = false;
}
termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
conf_reg |= lcr << MEN_Z135_LCR_SHIFT;
iowrite32(conf_reg, port->membase + MEN_Z135_CONF_REG);
uart_freq = ioread32(port->membase + MEN_Z135_UART_FREQ);
if (uart_freq == 0)
uart_freq = MEN_Z135_BASECLK;
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
spin_lock_irq(&port->lock);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
bd_reg = uart_freq / (4 * baud);
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irq(&port->lock);
}
static const char *men_z135_type(struct uart_port *port)
{
return KBUILD_MODNAME;
}
static void men_z135_release_port(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
iounmap(port->membase);
port->membase = NULL;
mcb_release_mem(uart->mem);
}
static int men_z135_request_port(struct uart_port *port)
{
struct men_z135_port *uart = to_men_z135(port);
struct mcb_device *mdev = uart->mdev;
struct resource *mem;
mem = mcb_request_mem(uart->mdev, dev_name(&mdev->dev));
if (IS_ERR(mem))
return PTR_ERR(mem);
port->mapbase = mem->start;
uart->mem = mem;
port->membase = ioremap(mem->start, resource_size(mem));
if (port->membase == NULL) {
mcb_release_mem(mem);
return -ENOMEM;
}
return 0;
}
static void men_z135_config_port(struct uart_port *port, int type)
{
port->type = PORT_MEN_Z135;
men_z135_request_port(port);
}
static int men_z135_verify_port(struct uart_port *port,
struct serial_struct *serinfo)
{
return -EINVAL;
}
static const struct uart_ops men_z135_ops = {
.tx_empty = men_z135_tx_empty,
.set_mctrl = men_z135_set_mctrl,
.get_mctrl = men_z135_get_mctrl,
.stop_tx = men_z135_stop_tx,
.start_tx = men_z135_start_tx,
.stop_rx = men_z135_stop_rx,
.enable_ms = men_z135_enable_ms,
.startup = men_z135_startup,
.shutdown = men_z135_shutdown,
.set_termios = men_z135_set_termios,
.type = men_z135_type,
.release_port = men_z135_release_port,
.request_port = men_z135_request_port,
.config_port = men_z135_config_port,
.verify_port = men_z135_verify_port,
};
static struct uart_driver men_z135_driver = {
.owner = THIS_MODULE,
.driver_name = KBUILD_MODNAME,
.dev_name = "ttyHSU",
.major = 0,
.minor = 0,
.nr = MEN_Z135_MAX_PORTS,
};
/**
* men_z135_probe() - Probe a z135 instance
* @mdev: The MCB device
* @id: The MCB device ID
*
* men_z135_probe does the basic setup of hardware resources and registers the
* new uart port to the tty layer.
*/
static int men_z135_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
struct men_z135_port *uart;
struct resource *mem;
struct device *dev;
int err;
dev = &mdev->dev;
uart = devm_kzalloc(dev, sizeof(struct men_z135_port), GFP_KERNEL);
if (!uart)
return -ENOMEM;
uart->rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!uart->rxbuf)
return -ENOMEM;
mem = &mdev->mem;
mcb_set_drvdata(mdev, uart);
uart->port.uartclk = MEN_Z135_BASECLK * 16;
uart->port.fifosize = MEN_Z135_FIFO_SIZE;
uart->port.iotype = UPIO_MEM;
uart->port.ops = &men_z135_ops;
uart->port.irq = mcb_get_irq(mdev);
uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
uart->port.line = line++;
uart->port.dev = dev;
uart->port.type = PORT_MEN_Z135;
uart->port.mapbase = mem->start;
uart->port.membase = NULL;
uart->mdev = mdev;
spin_lock_init(&uart->lock);
err = uart_add_one_port(&men_z135_driver, &uart->port);
if (err)
goto err;
return 0;
err:
free_page((unsigned long) uart->rxbuf);
dev_err(dev, "Failed to add UART: %d\n", err);
return err;
}
/**
* men_z135_remove() - Remove a z135 instance from the system
*
* @mdev: The MCB device
*/
static void men_z135_remove(struct mcb_device *mdev)
{
struct men_z135_port *uart = mcb_get_drvdata(mdev);
line--;
uart_remove_one_port(&men_z135_driver, &uart->port);
free_page((unsigned long) uart->rxbuf);
}
static const struct mcb_device_id men_z135_ids[] = {
{ .device = 0x87 },
{ }
};
MODULE_DEVICE_TABLE(mcb, men_z135_ids);
static struct mcb_driver mcb_driver = {
.driver = {
.name = "z135-uart",
.owner = THIS_MODULE,
},
.probe = men_z135_probe,
.remove = men_z135_remove,
.id_table = men_z135_ids,
};
/**
* men_z135_init() - Driver Registration Routine
*
* men_z135_init is the first routine called when the driver is loaded. All it
* does is register with the legacy MEN Chameleon subsystem.
*/
static int __init men_z135_init(void)
{
int err;
err = uart_register_driver(&men_z135_driver);
if (err) {
pr_err("Failed to register UART: %d\n", err);
return err;
}
err = mcb_register_driver(&mcb_driver);
if (err) {
pr_err("Failed to register MCB driver: %d\n", err);
uart_unregister_driver(&men_z135_driver);
return err;
}
return 0;
}
module_init(men_z135_init);
/**
* men_z135_exit() - Driver Exit Routine
*
* men_z135_exit is called just before the driver is removed from memory.
*/
static void __exit men_z135_exit(void)
{
mcb_unregister_driver(&mcb_driver);
uart_unregister_driver(&men_z135_driver);
}
module_exit(men_z135_exit);
MODULE_AUTHOR("Johannes Thumshirn <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MEN 16z135 High Speed UART");
MODULE_ALIAS("mcb:16z135");
MODULE_IMPORT_NS(MCB);
| linux-master | drivers/tty/serial/men_z135_uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* uartlite.c: Serial driver for Xilinx uartlite serial controller
*
* Copyright (C) 2006 Peter Korsgaard <[email protected]>
* Copyright (C) 2007 Secret Lab Technologies Ltd.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/bitfield.h>
#include <linux/console.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS
/* ---------------------------------------------------------------------
* Register definitions
*
* For register details see datasheet:
* https://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
*/
#define ULITE_RX 0x00
#define ULITE_TX 0x04
#define ULITE_STATUS 0x08
#define ULITE_CONTROL 0x0c
#define ULITE_REGION 16
#define ULITE_STATUS_RXVALID 0x01
#define ULITE_STATUS_RXFULL 0x02
#define ULITE_STATUS_TXEMPTY 0x04
#define ULITE_STATUS_TXFULL 0x08
#define ULITE_STATUS_IE 0x10
#define ULITE_STATUS_OVERRUN 0x20
#define ULITE_STATUS_FRAME 0x40
#define ULITE_STATUS_PARITY 0x80
#define ULITE_CONTROL_RST_TX 0x01
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
#define UART_AUTOSUSPEND_TIMEOUT 3000 /* ms */
/* Static pointer to console port */
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
static struct uart_port *console_port;
#endif
/**
* struct uartlite_data: Driver private data
* reg_ops: Functions to read/write registers
* clk: Our parent clock, if present
* baud: The baud rate configured when this device was synthesized
* cflags: The cflags for parity and data bits
*/
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
unsigned int baud;
tcflag_t cflags;
};
struct uartlite_reg_ops {
u32 (*in)(void __iomem *addr);
void (*out)(u32 val, void __iomem *addr);
};
static u32 uartlite_inbe32(void __iomem *addr)
{
return ioread32be(addr);
}
static void uartlite_outbe32(u32 val, void __iomem *addr)
{
iowrite32be(val, addr);
}
static const struct uartlite_reg_ops uartlite_be = {
.in = uartlite_inbe32,
.out = uartlite_outbe32,
};
static u32 uartlite_inle32(void __iomem *addr)
{
return ioread32(addr);
}
static void uartlite_outle32(u32 val, void __iomem *addr)
{
iowrite32(val, addr);
}
static const struct uartlite_reg_ops uartlite_le = {
.in = uartlite_inle32,
.out = uartlite_outle32,
};
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
struct uartlite_data *pdata = port->private_data;
return pdata->reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
struct uartlite_data *pdata = port->private_data;
pdata->reg_ops->out(val, port->membase + offset);
}
static struct uart_port ulite_ports[ULITE_NR_UARTS];
static struct uart_driver ulite_uart_driver;
/* ---------------------------------------------------------------------
* Core UART driver operations
*/
static int ulite_receive(struct uart_port *port, int stat)
{
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = TTY_NORMAL;
if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
| ULITE_STATUS_FRAME)) == 0)
return 0;
/* stats */
if (stat & ULITE_STATUS_RXVALID) {
port->icount.rx++;
ch = uart_in32(ULITE_RX, port);
if (stat & ULITE_STATUS_PARITY)
port->icount.parity++;
}
if (stat & ULITE_STATUS_OVERRUN)
port->icount.overrun++;
if (stat & ULITE_STATUS_FRAME)
port->icount.frame++;
/* drop byte with parity error if IGNPAR specificed */
if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY)
stat &= ~ULITE_STATUS_RXVALID;
stat &= port->read_status_mask;
if (stat & ULITE_STATUS_PARITY)
flag = TTY_PARITY;
stat &= ~port->ignore_status_mask;
if (stat & ULITE_STATUS_RXVALID)
tty_insert_flip_char(tport, ch, flag);
if (stat & ULITE_STATUS_FRAME)
tty_insert_flip_char(tport, 0, TTY_FRAME);
if (stat & ULITE_STATUS_OVERRUN)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
return 1;
}
static int ulite_transmit(struct uart_port *port, int stat)
{
struct circ_buf *xmit = &port->state->xmit;
if (stat & ULITE_STATUS_TXFULL)
return 0;
if (port->x_char) {
uart_out32(port->x_char, ULITE_TX, port);
port->x_char = 0;
port->icount.tx++;
return 1;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return 0;
uart_out32(xmit->buf[xmit->tail], ULITE_TX, port);
uart_xmit_advance(port, 1);
/* wake up */
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
return 1;
}
static irqreturn_t ulite_isr(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
int stat, busy, n = 0;
unsigned long flags;
do {
spin_lock_irqsave(&port->lock, flags);
stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
spin_unlock_irqrestore(&port->lock, flags);
n++;
} while (busy);
/* work done? */
if (n > 1) {
tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
static unsigned int ulite_tx_empty(struct uart_port *port)
{
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
ret = uart_in32(ULITE_STATUS, port);
spin_unlock_irqrestore(&port->lock, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
}
static unsigned int ulite_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
/* N/A */
}
static void ulite_stop_tx(struct uart_port *port)
{
/* N/A */
}
static void ulite_start_tx(struct uart_port *port)
{
ulite_transmit(port, uart_in32(ULITE_STATUS, port));
}
static void ulite_stop_rx(struct uart_port *port)
{
/* don't forward any more data (like !CREAD) */
port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
}
static void ulite_break_ctl(struct uart_port *port, int ctl)
{
/* N/A */
}
static int ulite_startup(struct uart_port *port)
{
struct uartlite_data *pdata = port->private_data;
int ret;
ret = clk_enable(pdata->clk);
if (ret) {
dev_err(port->dev, "Failed to enable clock\n");
return ret;
}
ret = request_irq(port->irq, ulite_isr, IRQF_SHARED | IRQF_TRIGGER_RISING,
"uartlite", port);
if (ret)
return ret;
uart_out32(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
ULITE_CONTROL, port);
uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
return 0;
}
static void ulite_shutdown(struct uart_port *port)
{
struct uartlite_data *pdata = port->private_data;
uart_out32(0, ULITE_CONTROL, port);
uart_in32(ULITE_CONTROL, port); /* dummy */
free_irq(port->irq, port);
clk_disable(pdata->clk);
}
static void ulite_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned long flags;
struct uartlite_data *pdata = port->private_data;
/* Set termios to what the hardware supports */
termios->c_iflag &= ~BRKINT;
termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CSIZE);
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
| ULITE_STATUS_TXFULL;
if (termios->c_iflag & INPCK)
port->read_status_mask |=
ULITE_STATUS_PARITY | ULITE_STATUS_FRAME;
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= ULITE_STATUS_PARITY
| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
/* ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |=
ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
/* update timeout */
uart_update_timeout(port, termios->c_cflag, pdata->baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *ulite_type(struct uart_port *port)
{
return port->type == PORT_UARTLITE ? "uartlite" : NULL;
}
static void ulite_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, ULITE_REGION);
iounmap(port->membase);
port->membase = NULL;
}
static int ulite_request_port(struct uart_port *port)
{
struct uartlite_data *pdata = port->private_data;
int ret;
pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
port, (unsigned long long) port->mapbase);
if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
}
port->membase = ioremap(port->mapbase, ULITE_REGION);
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
release_mem_region(port->mapbase, ULITE_REGION);
return -EBUSY;
}
pdata->reg_ops = &uartlite_be;
ret = uart_in32(ULITE_CONTROL, port);
uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
ret = uart_in32(ULITE_STATUS, port);
/* Endianess detection */
if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
pdata->reg_ops = &uartlite_le;
return 0;
}
static void ulite_config_port(struct uart_port *port, int flags)
{
if (!ulite_request_port(port))
port->type = PORT_UARTLITE;
}
static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
return -EINVAL;
}
static void ulite_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
int ret;
if (!state) {
ret = pm_runtime_get_sync(port->dev);
if (ret < 0)
dev_err(port->dev, "Failed to enable clocks\n");
} else {
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
}
}
#ifdef CONFIG_CONSOLE_POLL
static int ulite_get_poll_char(struct uart_port *port)
{
if (!(uart_in32(ULITE_STATUS, port) & ULITE_STATUS_RXVALID))
return NO_POLL_CHAR;
return uart_in32(ULITE_RX, port);
}
static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
{
while (uart_in32(ULITE_STATUS, port) & ULITE_STATUS_TXFULL)
cpu_relax();
/* write char to device */
uart_out32(ch, ULITE_TX, port);
}
#endif
static const struct uart_ops ulite_ops = {
.tx_empty = ulite_tx_empty,
.set_mctrl = ulite_set_mctrl,
.get_mctrl = ulite_get_mctrl,
.stop_tx = ulite_stop_tx,
.start_tx = ulite_start_tx,
.stop_rx = ulite_stop_rx,
.break_ctl = ulite_break_ctl,
.startup = ulite_startup,
.shutdown = ulite_shutdown,
.set_termios = ulite_set_termios,
.type = ulite_type,
.release_port = ulite_release_port,
.request_port = ulite_request_port,
.config_port = ulite_config_port,
.verify_port = ulite_verify_port,
.pm = ulite_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = ulite_get_poll_char,
.poll_put_char = ulite_put_poll_char,
#endif
};
/* ---------------------------------------------------------------------
* Console driver operations
*/
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
static void ulite_console_wait_tx(struct uart_port *port)
{
u8 val;
/*
* Spin waiting for TX fifo to have space available.
* When using the Microblaze Debug Module this can take up to 1s
*/
if (read_poll_timeout_atomic(uart_in32, val, !(val & ULITE_STATUS_TXFULL),
0, 1000000, false, ULITE_STATUS, port))
dev_warn(port->dev,
"timeout waiting for TX buffer empty\n");
}
static void ulite_console_putchar(struct uart_port *port, unsigned char ch)
{
ulite_console_wait_tx(port);
uart_out32(ch, ULITE_TX, port);
}
static void ulite_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = console_port;
unsigned long flags;
unsigned int ier;
int locked = 1;
if (oops_in_progress) {
locked = spin_trylock_irqsave(&port->lock, flags);
} else
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
uart_out32(0, ULITE_CONTROL, port);
uart_console_write(port, s, count, ulite_console_putchar);
ulite_console_wait_tx(port);
/* restore interrupt state */
if (ier)
uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static int ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port = NULL;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index >= 0 && co->index < ULITE_NR_UARTS)
port = ulite_ports + co->index;
/* Has the device been initialized yet? */
if (!port || !port->mapbase) {
pr_debug("console on ttyUL%i not present\n", co->index);
return -ENODEV;
}
console_port = port;
/* not initialized yet? */
if (!port->membase) {
if (ulite_request_port(port))
return -ENODEV;
}
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console ulite_console = {
.name = ULITE_NAME,
.write = ulite_console_write,
.device = uart_console_device,
.setup = ulite_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
.data = &ulite_uart_driver,
};
static void early_uartlite_putc(struct uart_port *port, unsigned char c)
{
/*
* Limit how many times we'll spin waiting for TX FIFO status.
* This will prevent lockups if the base address is incorrectly
* set, or any other issue on the UARTLITE.
* This limit is pretty arbitrary, unless we are at about 10 baud
* we'll never timeout on a working UART.
*/
unsigned retries = 1000000;
while (--retries &&
(readl(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL))
;
/* Only attempt the iowrite if we didn't timeout */
if (retries)
writel(c & 0xff, port->membase + ULITE_TX);
}
static void early_uartlite_write(struct console *console,
const char *s, unsigned n)
{
struct earlycon_device *device = console->data;
uart_console_write(&device->port, s, n, early_uartlite_putc);
}
static int __init early_uartlite_setup(struct earlycon_device *device,
const char *options)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = early_uartlite_write;
return 0;
}
EARLYCON_DECLARE(uartlite, early_uartlite_setup);
OF_EARLYCON_DECLARE(uartlite_b, "xlnx,opb-uartlite-1.00.b", early_uartlite_setup);
OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup);
#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
static struct uart_driver ulite_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "uartlite",
.dev_name = ULITE_NAME,
.major = ULITE_MAJOR,
.minor = ULITE_MINOR,
.nr = ULITE_NR_UARTS,
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
.cons = &ulite_console,
#endif
};
/* ---------------------------------------------------------------------
* Port assignment functions (mapping devices to uart_port structures)
*/
/** ulite_assign: register a uartlite device with the driver
*
* @dev: pointer to device structure
* @id: requested id number. Pass -1 for automatic port assignment
* @base: base address of uartlite registers
* @irq: irq number for uartlite
* @pdata: private data for uartlite
*
* Returns: 0 on success, <0 otherwise
*/
static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
struct uartlite_data *pdata)
{
struct uart_port *port;
int rc;
/* if id = -1; then scan for a free id and use that */
if (id < 0) {
for (id = 0; id < ULITE_NR_UARTS; id++)
if (ulite_ports[id].mapbase == 0)
break;
}
if (id < 0 || id >= ULITE_NR_UARTS) {
dev_err(dev, "%s%i too large\n", ULITE_NAME, id);
return -EINVAL;
}
if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) {
dev_err(dev, "cannot assign to %s%i; it is already in use\n",
ULITE_NAME, id);
return -EBUSY;
}
port = &ulite_ports[id];
spin_lock_init(&port->lock);
port->fifosize = 16;
port->regshift = 2;
port->iotype = UPIO_MEM;
port->iobase = 1; /* mark port in use */
port->mapbase = base;
port->membase = NULL;
port->ops = &ulite_ops;
port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = dev;
port->type = PORT_UNKNOWN;
port->line = id;
port->private_data = pdata;
dev_set_drvdata(dev, port);
/* Register the port */
rc = uart_add_one_port(&ulite_uart_driver, port);
if (rc) {
dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc);
port->mapbase = 0;
dev_set_drvdata(dev, NULL);
return rc;
}
return 0;
}
/** ulite_release: register a uartlite device with the driver
*
* @dev: pointer to device structure
*/
static void ulite_release(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
if (port) {
uart_remove_one_port(&ulite_uart_driver, port);
dev_set_drvdata(dev, NULL);
port->mapbase = 0;
}
}
/**
* ulite_suspend - Stop the device.
*
* @dev: handle to the device structure.
* Return: 0 always.
*/
static int __maybe_unused ulite_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
if (port)
uart_suspend_port(&ulite_uart_driver, port);
return 0;
}
/**
* ulite_resume - Resume the device.
*
* @dev: handle to the device structure.
* Return: 0 on success, errno otherwise.
*/
static int __maybe_unused ulite_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
if (port)
uart_resume_port(&ulite_uart_driver, port);
return 0;
}
static int __maybe_unused ulite_runtime_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct uartlite_data *pdata = port->private_data;
clk_disable(pdata->clk);
return 0;
};
static int __maybe_unused ulite_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct uartlite_data *pdata = port->private_data;
int ret;
ret = clk_enable(pdata->clk);
if (ret) {
dev_err(dev, "Cannot enable clock.\n");
return ret;
}
return 0;
}
/* ---------------------------------------------------------------------
* Platform bus binding
*/
static const struct dev_pm_ops ulite_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ulite_suspend, ulite_resume)
SET_RUNTIME_PM_OPS(ulite_runtime_suspend,
ulite_runtime_resume, NULL)
};
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id ulite_of_match[] = {
{ .compatible = "xlnx,opb-uartlite-1.00.b", },
{ .compatible = "xlnx,xps-uartlite-1.00.a", },
{}
};
MODULE_DEVICE_TABLE(of, ulite_of_match);
#endif /* CONFIG_OF */
static int ulite_probe(struct platform_device *pdev)
{
struct resource *res;
struct uartlite_data *pdata;
int irq, ret;
int id = pdev->id;
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
if (IS_ENABLED(CONFIG_OF)) {
const char *prop;
struct device_node *np = pdev->dev.of_node;
u32 val = 0;
prop = "port-number";
ret = of_property_read_u32(np, prop, &id);
if (ret && ret != -EINVAL)
of_err:
return dev_err_probe(&pdev->dev, ret,
"could not read %s\n", prop);
prop = "current-speed";
ret = of_property_read_u32(np, prop, &pdata->baud);
if (ret)
goto of_err;
prop = "xlnx,use-parity";
ret = of_property_read_u32(np, prop, &val);
if (ret && ret != -EINVAL)
goto of_err;
if (val) {
prop = "xlnx,odd-parity";
ret = of_property_read_u32(np, prop, &val);
if (ret)
goto of_err;
if (val)
pdata->cflags |= PARODD;
pdata->cflags |= PARENB;
}
val = 8;
prop = "xlnx,data-bits";
ret = of_property_read_u32(np, prop, &val);
if (ret && ret != -EINVAL)
goto of_err;
switch (val) {
case 5:
pdata->cflags |= CS5;
break;
case 6:
pdata->cflags |= CS6;
break;
case 7:
pdata->cflags |= CS7;
break;
case 8:
pdata->cflags |= CS8;
break;
default:
return dev_err_probe(&pdev->dev, -EINVAL,
"bad data bits %d\n", val);
}
} else {
pdata->baud = 9600;
pdata->cflags = CS8;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
pdata->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(pdata->clk)) {
if (PTR_ERR(pdata->clk) != -ENOENT)
return PTR_ERR(pdata->clk);
/*
* Clock framework support is optional, continue on
* anyways if we don't find a matching clock.
*/
pdata->clk = NULL;
}
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
return ret;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
if (!ulite_uart_driver.state) {
dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
ret = uart_register_driver(&ulite_uart_driver);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register driver\n");
clk_disable_unprepare(pdata->clk);
return ret;
}
}
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return ret;
}
static int ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
clk_disable_unprepare(pdata->clk);
ulite_release(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:uartlite");
static struct platform_driver ulite_platform_driver = {
.probe = ulite_probe,
.remove = ulite_remove,
.driver = {
.name = "uartlite",
.of_match_table = of_match_ptr(ulite_of_match),
.pm = &ulite_pm_ops,
},
};
/* ---------------------------------------------------------------------
* Module setup/teardown
*/
static int __init ulite_init(void)
{
pr_debug("uartlite: calling platform_driver_register()\n");
return platform_driver_register(&ulite_platform_driver);
}
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
if (ulite_uart_driver.state)
uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
module_exit(ulite_exit);
MODULE_AUTHOR("Peter Korsgaard <[email protected]>");
MODULE_DESCRIPTION("Xilinx uartlite serial driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/uartlite.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* RDA8810PL serial device driver
*
* Copyright RDA Microelectronics Company Limited
* Copyright (c) 2017 Andreas Färber
* Copyright (c) 2018 Manivannan Sadhasivam
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#define RDA_UART_PORT_NUM 3
#define RDA_UART_DEV_NAME "ttyRDA"
#define RDA_UART_CTRL 0x00
#define RDA_UART_STATUS 0x04
#define RDA_UART_RXTX_BUFFER 0x08
#define RDA_UART_IRQ_MASK 0x0c
#define RDA_UART_IRQ_CAUSE 0x10
#define RDA_UART_IRQ_TRIGGERS 0x14
#define RDA_UART_CMD_SET 0x18
#define RDA_UART_CMD_CLR 0x1c
/* UART_CTRL Bits */
#define RDA_UART_ENABLE BIT(0)
#define RDA_UART_DBITS_8 BIT(1)
#define RDA_UART_TX_SBITS_2 BIT(2)
#define RDA_UART_PARITY_EN BIT(3)
#define RDA_UART_PARITY(x) (((x) & 0x3) << 4)
#define RDA_UART_PARITY_ODD RDA_UART_PARITY(0)
#define RDA_UART_PARITY_EVEN RDA_UART_PARITY(1)
#define RDA_UART_PARITY_SPACE RDA_UART_PARITY(2)
#define RDA_UART_PARITY_MARK RDA_UART_PARITY(3)
#define RDA_UART_DIV_MODE BIT(20)
#define RDA_UART_IRDA_EN BIT(21)
#define RDA_UART_DMA_EN BIT(22)
#define RDA_UART_FLOW_CNT_EN BIT(23)
#define RDA_UART_LOOP_BACK_EN BIT(24)
#define RDA_UART_RX_LOCK_ERR BIT(25)
#define RDA_UART_RX_BREAK_LEN(x) (((x) & 0xf) << 28)
/* UART_STATUS Bits */
#define RDA_UART_RX_FIFO(x) (((x) & 0x7f) << 0)
#define RDA_UART_RX_FIFO_MASK (0x7f << 0)
#define RDA_UART_TX_FIFO(x) (((x) & 0x1f) << 8)
#define RDA_UART_TX_FIFO_MASK (0x1f << 8)
#define RDA_UART_TX_ACTIVE BIT(14)
#define RDA_UART_RX_ACTIVE BIT(15)
#define RDA_UART_RX_OVERFLOW_ERR BIT(16)
#define RDA_UART_TX_OVERFLOW_ERR BIT(17)
#define RDA_UART_RX_PARITY_ERR BIT(18)
#define RDA_UART_RX_FRAMING_ERR BIT(19)
#define RDA_UART_RX_BREAK_INT BIT(20)
#define RDA_UART_DCTS BIT(24)
#define RDA_UART_CTS BIT(25)
#define RDA_UART_DTR BIT(28)
#define RDA_UART_CLK_ENABLED BIT(31)
/* UART_RXTX_BUFFER Bits */
#define RDA_UART_RX_DATA(x) (((x) & 0xff) << 0)
#define RDA_UART_TX_DATA(x) (((x) & 0xff) << 0)
/* UART_IRQ_MASK Bits */
#define RDA_UART_TX_MODEM_STATUS BIT(0)
#define RDA_UART_RX_DATA_AVAILABLE BIT(1)
#define RDA_UART_TX_DATA_NEEDED BIT(2)
#define RDA_UART_RX_TIMEOUT BIT(3)
#define RDA_UART_RX_LINE_ERR BIT(4)
#define RDA_UART_TX_DMA_DONE BIT(5)
#define RDA_UART_RX_DMA_DONE BIT(6)
#define RDA_UART_RX_DMA_TIMEOUT BIT(7)
#define RDA_UART_DTR_RISE BIT(8)
#define RDA_UART_DTR_FALL BIT(9)
/* UART_IRQ_CAUSE Bits */
#define RDA_UART_TX_MODEM_STATUS_U BIT(16)
#define RDA_UART_RX_DATA_AVAILABLE_U BIT(17)
#define RDA_UART_TX_DATA_NEEDED_U BIT(18)
#define RDA_UART_RX_TIMEOUT_U BIT(19)
#define RDA_UART_RX_LINE_ERR_U BIT(20)
#define RDA_UART_TX_DMA_DONE_U BIT(21)
#define RDA_UART_RX_DMA_DONE_U BIT(22)
#define RDA_UART_RX_DMA_TIMEOUT_U BIT(23)
#define RDA_UART_DTR_RISE_U BIT(24)
#define RDA_UART_DTR_FALL_U BIT(25)
/* UART_TRIGGERS Bits */
#define RDA_UART_RX_TRIGGER(x) (((x) & 0x1f) << 0)
#define RDA_UART_TX_TRIGGER(x) (((x) & 0xf) << 8)
#define RDA_UART_AFC_LEVEL(x) (((x) & 0x1f) << 16)
/* UART_CMD_SET Bits */
#define RDA_UART_RI BIT(0)
#define RDA_UART_DCD BIT(1)
#define RDA_UART_DSR BIT(2)
#define RDA_UART_TX_BREAK_CONTROL BIT(3)
#define RDA_UART_TX_FINISH_N_WAIT BIT(4)
#define RDA_UART_RTS BIT(5)
#define RDA_UART_RX_FIFO_RESET BIT(6)
#define RDA_UART_TX_FIFO_RESET BIT(7)
#define RDA_UART_TX_FIFO_SIZE 16
static struct uart_driver rda_uart_driver;
struct rda_uart_port {
struct uart_port port;
struct clk *clk;
};
#define to_rda_uart_port(port) container_of(port, struct rda_uart_port, port)
static struct rda_uart_port *rda_uart_ports[RDA_UART_PORT_NUM];
static inline void rda_uart_write(struct uart_port *port, u32 val,
unsigned int off)
{
writel(val, port->membase + off);
}
static inline u32 rda_uart_read(struct uart_port *port, unsigned int off)
{
return readl(port->membase + off);
}
static unsigned int rda_uart_tx_empty(struct uart_port *port)
{
unsigned long flags;
unsigned int ret;
u32 val;
spin_lock_irqsave(&port->lock, flags);
val = rda_uart_read(port, RDA_UART_STATUS);
ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
static unsigned int rda_uart_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = 0;
u32 cmd_set, status;
cmd_set = rda_uart_read(port, RDA_UART_CMD_SET);
status = rda_uart_read(port, RDA_UART_STATUS);
if (cmd_set & RDA_UART_RTS)
mctrl |= TIOCM_RTS;
if (!(status & RDA_UART_CTS))
mctrl |= TIOCM_CTS;
return mctrl;
}
static void rda_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val;
if (mctrl & TIOCM_RTS) {
val = rda_uart_read(port, RDA_UART_CMD_SET);
rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_SET);
} else {
/* Clear RTS to stop to receive. */
val = rda_uart_read(port, RDA_UART_CMD_CLR);
rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_CLR);
}
val = rda_uart_read(port, RDA_UART_CTRL);
if (mctrl & TIOCM_LOOP)
val |= RDA_UART_LOOP_BACK_EN;
else
val &= ~RDA_UART_LOOP_BACK_EN;
rda_uart_write(port, val, RDA_UART_CTRL);
}
static void rda_uart_stop_tx(struct uart_port *port)
{
u32 val;
val = rda_uart_read(port, RDA_UART_IRQ_MASK);
val &= ~RDA_UART_TX_DATA_NEEDED;
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
val = rda_uart_read(port, RDA_UART_CMD_SET);
val |= RDA_UART_TX_FIFO_RESET;
rda_uart_write(port, val, RDA_UART_CMD_SET);
}
static void rda_uart_stop_rx(struct uart_port *port)
{
u32 val;
val = rda_uart_read(port, RDA_UART_IRQ_MASK);
val &= ~(RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
/* Read Rx buffer before reset to avoid Rx timeout interrupt */
val = rda_uart_read(port, RDA_UART_RXTX_BUFFER);
val = rda_uart_read(port, RDA_UART_CMD_SET);
val |= RDA_UART_RX_FIFO_RESET;
rda_uart_write(port, val, RDA_UART_CMD_SET);
}
static void rda_uart_start_tx(struct uart_port *port)
{
u32 val;
if (uart_tx_stopped(port)) {
rda_uart_stop_tx(port);
return;
}
val = rda_uart_read(port, RDA_UART_IRQ_MASK);
val |= RDA_UART_TX_DATA_NEEDED;
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
}
static void rda_uart_change_baudrate(struct rda_uart_port *rda_port,
unsigned long baud)
{
clk_set_rate(rda_port->clk, baud * 8);
}
static void rda_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct rda_uart_port *rda_port = to_rda_uart_port(port);
unsigned long flags;
unsigned int ctrl, cmd_set, cmd_clr, triggers;
unsigned int baud;
u32 irq_mask;
spin_lock_irqsave(&port->lock, flags);
baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
rda_uart_change_baudrate(rda_port, baud);
ctrl = rda_uart_read(port, RDA_UART_CTRL);
cmd_set = rda_uart_read(port, RDA_UART_CMD_SET);
cmd_clr = rda_uart_read(port, RDA_UART_CMD_CLR);
switch (termios->c_cflag & CSIZE) {
case CS5:
case CS6:
dev_warn(port->dev, "bit size not supported, using 7 bits\n");
fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS7;
break;
default:
ctrl |= RDA_UART_DBITS_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
ctrl |= RDA_UART_TX_SBITS_2;
else
ctrl &= ~RDA_UART_TX_SBITS_2;
/* parity check */
if (termios->c_cflag & PARENB) {
ctrl |= RDA_UART_PARITY_EN;
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
ctrl |= RDA_UART_PARITY_MARK;
else
ctrl |= RDA_UART_PARITY_SPACE;
} else if (termios->c_cflag & PARODD) {
ctrl |= RDA_UART_PARITY_ODD;
} else {
ctrl |= RDA_UART_PARITY_EVEN;
}
} else {
ctrl &= ~RDA_UART_PARITY_EN;
}
/* Hardware handshake (RTS/CTS) */
if (termios->c_cflag & CRTSCTS) {
ctrl |= RDA_UART_FLOW_CNT_EN;
cmd_set |= RDA_UART_RTS;
} else {
ctrl &= ~RDA_UART_FLOW_CNT_EN;
cmd_clr |= RDA_UART_RTS;
}
ctrl |= RDA_UART_ENABLE;
ctrl &= ~RDA_UART_DMA_EN;
triggers = (RDA_UART_AFC_LEVEL(20) | RDA_UART_RX_TRIGGER(16));
irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
rda_uart_write(port, triggers, RDA_UART_IRQ_TRIGGERS);
rda_uart_write(port, ctrl, RDA_UART_CTRL);
rda_uart_write(port, cmd_set, RDA_UART_CMD_SET);
rda_uart_write(port, cmd_clr, RDA_UART_CMD_CLR);
rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static void rda_uart_send_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
unsigned int ch;
u32 val;
if (uart_tx_stopped(port))
return;
if (port->x_char) {
while (!(rda_uart_read(port, RDA_UART_STATUS) &
RDA_UART_TX_FIFO_MASK))
cpu_relax();
rda_uart_write(port, port->x_char, RDA_UART_RXTX_BUFFER);
port->icount.tx++;
port->x_char = 0;
}
while (rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) {
if (uart_circ_empty(xmit))
break;
ch = xmit->buf[xmit->tail];
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
uart_xmit_advance(port, 1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit)) {
/* Re-enable Tx FIFO interrupt */
val = rda_uart_read(port, RDA_UART_IRQ_MASK);
val |= RDA_UART_TX_DATA_NEEDED;
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
}
}
static void rda_uart_receive_chars(struct uart_port *port)
{
u32 status, val;
status = rda_uart_read(port, RDA_UART_STATUS);
while ((status & RDA_UART_RX_FIFO_MASK)) {
char flag = TTY_NORMAL;
if (status & RDA_UART_RX_PARITY_ERR) {
port->icount.parity++;
flag = TTY_PARITY;
}
if (status & RDA_UART_RX_FRAMING_ERR) {
port->icount.frame++;
flag = TTY_FRAME;
}
if (status & RDA_UART_RX_OVERFLOW_ERR) {
port->icount.overrun++;
flag = TTY_OVERRUN;
}
val = rda_uart_read(port, RDA_UART_RXTX_BUFFER);
val &= 0xff;
port->icount.rx++;
tty_insert_flip_char(&port->state->port, val, flag);
status = rda_uart_read(port, RDA_UART_STATUS);
}
tty_flip_buffer_push(&port->state->port);
}
static irqreturn_t rda_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned long flags;
u32 val, irq_mask;
spin_lock_irqsave(&port->lock, flags);
/* Clear IRQ cause */
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
rda_uart_write(port, val, RDA_UART_IRQ_CAUSE);
if (val & (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT))
rda_uart_receive_chars(port);
if (val & (RDA_UART_TX_DATA_NEEDED)) {
irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
irq_mask &= ~RDA_UART_TX_DATA_NEEDED;
rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK);
rda_uart_send_chars(port);
}
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
static int rda_uart_startup(struct uart_port *port)
{
unsigned long flags;
int ret;
u32 val;
spin_lock_irqsave(&port->lock, flags);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
spin_unlock_irqrestore(&port->lock, flags);
ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
"rda-uart", port);
if (ret)
return ret;
spin_lock_irqsave(&port->lock, flags);
val = rda_uart_read(port, RDA_UART_CTRL);
val |= RDA_UART_ENABLE;
rda_uart_write(port, val, RDA_UART_CTRL);
/* enable rx interrupt */
val = rda_uart_read(port, RDA_UART_IRQ_MASK);
val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void rda_uart_shutdown(struct uart_port *port)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&port->lock, flags);
rda_uart_stop_tx(port);
rda_uart_stop_rx(port);
val = rda_uart_read(port, RDA_UART_CTRL);
val &= ~RDA_UART_ENABLE;
rda_uart_write(port, val, RDA_UART_CTRL);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *rda_uart_type(struct uart_port *port)
{
return (port->type == PORT_RDA) ? "rda-uart" : NULL;
}
static int rda_uart_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
if (!devm_request_mem_region(port->dev, port->mapbase,
resource_size(res), dev_name(port->dev)))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
}
return 0;
}
static void rda_uart_config_port(struct uart_port *port, int flags)
{
unsigned long irq_flags;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_RDA;
rda_uart_request_port(port);
}
spin_lock_irqsave(&port->lock, irq_flags);
/* Clear mask, so no surprise interrupts. */
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
/* Clear status register */
rda_uart_write(port, 0, RDA_UART_STATUS);
spin_unlock_irqrestore(&port->lock, irq_flags);
}
static void rda_uart_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return;
if (port->flags & UPF_IOREMAP) {
devm_release_mem_region(port->dev, port->mapbase,
resource_size(res));
devm_iounmap(port->dev, port->membase);
port->membase = NULL;
}
}
static int rda_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (port->type != PORT_RDA)
return -EINVAL;
if (port->irq != ser->irq)
return -EINVAL;
return 0;
}
static const struct uart_ops rda_uart_ops = {
.tx_empty = rda_uart_tx_empty,
.get_mctrl = rda_uart_get_mctrl,
.set_mctrl = rda_uart_set_mctrl,
.start_tx = rda_uart_start_tx,
.stop_tx = rda_uart_stop_tx,
.stop_rx = rda_uart_stop_rx,
.startup = rda_uart_startup,
.shutdown = rda_uart_shutdown,
.set_termios = rda_uart_set_termios,
.type = rda_uart_type,
.request_port = rda_uart_request_port,
.release_port = rda_uart_release_port,
.config_port = rda_uart_config_port,
.verify_port = rda_uart_verify_port,
};
#ifdef CONFIG_SERIAL_RDA_CONSOLE
static void rda_console_putchar(struct uart_port *port, unsigned char ch)
{
if (!port->membase)
return;
while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK))
cpu_relax();
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
}
static void rda_uart_port_write(struct uart_port *port, const char *s,
u_int count)
{
u32 old_irq_mask;
unsigned long flags;
int locked;
local_irq_save(flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else {
spin_lock(&port->lock);
locked = 1;
}
old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
uart_console_write(port, s, count, rda_console_putchar);
/* wait until all contents have been sent out */
while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK))
cpu_relax();
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
}
static void rda_uart_console_write(struct console *co, const char *s,
u_int count)
{
struct rda_uart_port *rda_port;
rda_port = rda_uart_ports[co->index];
if (!rda_port)
return;
rda_uart_port_write(&rda_port->port, s, count);
}
static int rda_uart_console_setup(struct console *co, char *options)
{
struct rda_uart_port *rda_port;
int baud = 921600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= RDA_UART_PORT_NUM)
return -EINVAL;
rda_port = rda_uart_ports[co->index];
if (!rda_port || !rda_port->port.membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&rda_port->port, co, baud, parity, bits, flow);
}
static struct console rda_uart_console = {
.name = RDA_UART_DEV_NAME,
.write = rda_uart_console_write,
.device = uart_console_device,
.setup = rda_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &rda_uart_driver,
};
static int __init rda_uart_console_init(void)
{
register_console(&rda_uart_console);
return 0;
}
console_initcall(rda_uart_console_init);
static void rda_uart_early_console_write(struct console *co,
const char *s,
u_int count)
{
struct earlycon_device *dev = co->data;
rda_uart_port_write(&dev->port, s, count);
}
static int __init
rda_uart_early_console_setup(struct earlycon_device *device, const char *opt)
{
if (!device->port.membase)
return -ENODEV;
device->con->write = rda_uart_early_console_write;
return 0;
}
OF_EARLYCON_DECLARE(rda, "rda,8810pl-uart",
rda_uart_early_console_setup);
#define RDA_UART_CONSOLE (&rda_uart_console)
#else
#define RDA_UART_CONSOLE NULL
#endif /* CONFIG_SERIAL_RDA_CONSOLE */
static struct uart_driver rda_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "rda-uart",
.dev_name = RDA_UART_DEV_NAME,
.nr = RDA_UART_PORT_NUM,
.cons = RDA_UART_CONSOLE,
};
static const struct of_device_id rda_uart_dt_matches[] = {
{ .compatible = "rda,8810pl-uart" },
{ }
};
MODULE_DEVICE_TABLE(of, rda_uart_dt_matches);
static int rda_uart_probe(struct platform_device *pdev)
{
struct resource *res_mem;
struct rda_uart_port *rda_port;
int ret, irq;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0 || pdev->id >= RDA_UART_PORT_NUM) {
dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
return -EINVAL;
}
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem) {
dev_err(&pdev->dev, "could not get mem\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (rda_uart_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
return -EBUSY;
}
rda_port = devm_kzalloc(&pdev->dev, sizeof(*rda_port), GFP_KERNEL);
if (!rda_port)
return -ENOMEM;
rda_port->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rda_port->clk)) {
dev_err(&pdev->dev, "could not get clk\n");
return PTR_ERR(rda_port->clk);
}
rda_port->port.dev = &pdev->dev;
rda_port->port.regshift = 0;
rda_port->port.line = pdev->id;
rda_port->port.type = PORT_RDA;
rda_port->port.iotype = UPIO_MEM;
rda_port->port.mapbase = res_mem->start;
rda_port->port.irq = irq;
rda_port->port.uartclk = clk_get_rate(rda_port->clk);
if (rda_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
return -EINVAL;
}
rda_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP |
UPF_LOW_LATENCY;
rda_port->port.x_char = 0;
rda_port->port.fifosize = RDA_UART_TX_FIFO_SIZE;
rda_port->port.ops = &rda_uart_ops;
rda_uart_ports[pdev->id] = rda_port;
platform_set_drvdata(pdev, rda_port);
ret = uart_add_one_port(&rda_uart_driver, &rda_port->port);
if (ret)
rda_uart_ports[pdev->id] = NULL;
return ret;
}
static int rda_uart_remove(struct platform_device *pdev)
{
struct rda_uart_port *rda_port = platform_get_drvdata(pdev);
uart_remove_one_port(&rda_uart_driver, &rda_port->port);
rda_uart_ports[pdev->id] = NULL;
return 0;
}
static struct platform_driver rda_uart_platform_driver = {
.probe = rda_uart_probe,
.remove = rda_uart_remove,
.driver = {
.name = "rda-uart",
.of_match_table = rda_uart_dt_matches,
},
};
static int __init rda_uart_init(void)
{
int ret;
ret = uart_register_driver(&rda_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&rda_uart_platform_driver);
if (ret)
uart_unregister_driver(&rda_uart_driver);
return ret;
}
static void __exit rda_uart_exit(void)
{
platform_driver_unregister(&rda_uart_platform_driver);
uart_unregister_driver(&rda_uart_driver);
}
module_init(rda_uart_init);
module_exit(rda_uart_exit);
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("RDA8810PL serial device driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/rda-uart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Conexant Digicolor serial ports (USART)
*
* Author: Baruch Siach <[email protected]>
*
* Copyright (C) 2014 Paradox Innovation Ltd.
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#define UA_ENABLE 0x00
#define UA_ENABLE_ENABLE BIT(0)
#define UA_CONTROL 0x01
#define UA_CONTROL_RX_ENABLE BIT(0)
#define UA_CONTROL_TX_ENABLE BIT(1)
#define UA_CONTROL_SOFT_RESET BIT(2)
#define UA_STATUS 0x02
#define UA_STATUS_PARITY_ERR BIT(0)
#define UA_STATUS_FRAME_ERR BIT(1)
#define UA_STATUS_OVERRUN_ERR BIT(2)
#define UA_STATUS_TX_READY BIT(6)
#define UA_CONFIG 0x03
#define UA_CONFIG_CHAR_LEN BIT(0)
#define UA_CONFIG_STOP_BITS BIT(1)
#define UA_CONFIG_PARITY BIT(2)
#define UA_CONFIG_ODD_PARITY BIT(4)
#define UA_EMI_REC 0x04
#define UA_HBAUD_LO 0x08
#define UA_HBAUD_HI 0x09
#define UA_STATUS_FIFO 0x0a
#define UA_STATUS_FIFO_RX_EMPTY BIT(2)
#define UA_STATUS_FIFO_RX_INT_ALMOST BIT(3)
#define UA_STATUS_FIFO_TX_FULL BIT(4)
#define UA_STATUS_FIFO_TX_INT_ALMOST BIT(7)
#define UA_CONFIG_FIFO 0x0b
#define UA_CONFIG_FIFO_RX_THRESH 7
#define UA_CONFIG_FIFO_RX_FIFO_MODE BIT(3)
#define UA_CONFIG_FIFO_TX_FIFO_MODE BIT(7)
#define UA_INTFLAG_CLEAR 0x1c
#define UA_INTFLAG_SET 0x1d
#define UA_INT_ENABLE 0x1e
#define UA_INT_STATUS 0x1f
#define UA_INT_TX BIT(0)
#define UA_INT_RX BIT(1)
#define DIGICOLOR_USART_NR 3
/*
* We use the 16 bytes hardware FIFO to buffer Rx traffic. Rx interrupt is
* only produced when the FIFO is filled more than a certain configurable
* threshold. Unfortunately, there is no way to set this threshold below half
* FIFO. This means that we must periodically poll the FIFO status register to
* see whether there are waiting Rx bytes.
*/
struct digicolor_port {
struct uart_port port;
struct delayed_work rx_poll_work;
};
static struct uart_port *digicolor_ports[DIGICOLOR_USART_NR];
static bool digicolor_uart_tx_full(struct uart_port *port)
{
return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) &
UA_STATUS_FIFO_TX_FULL);
}
static bool digicolor_uart_rx_empty(struct uart_port *port)
{
return !!(readb_relaxed(port->membase + UA_STATUS_FIFO) &
UA_STATUS_FIFO_RX_EMPTY);
}
static void digicolor_uart_stop_tx(struct uart_port *port)
{
u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
int_enable &= ~UA_INT_TX;
writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
}
static void digicolor_uart_start_tx(struct uart_port *port)
{
u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
int_enable |= UA_INT_TX;
writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
}
static void digicolor_uart_stop_rx(struct uart_port *port)
{
u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE);
int_enable &= ~UA_INT_RX;
writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE);
}
static void digicolor_rx_poll(struct work_struct *work)
{
struct digicolor_port *dp =
container_of(to_delayed_work(work),
struct digicolor_port, rx_poll_work);
if (!digicolor_uart_rx_empty(&dp->port))
/* force RX interrupt */
writeb_relaxed(UA_INT_RX, dp->port.membase + UA_INTFLAG_SET);
schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100));
}
static void digicolor_uart_rx(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
while (1) {
u8 status, ch, ch_flag;
if (digicolor_uart_rx_empty(port))
break;
ch = readb_relaxed(port->membase + UA_EMI_REC);
status = readb_relaxed(port->membase + UA_STATUS);
port->icount.rx++;
ch_flag = TTY_NORMAL;
if (status) {
if (status & UA_STATUS_PARITY_ERR)
port->icount.parity++;
else if (status & UA_STATUS_FRAME_ERR)
port->icount.frame++;
else if (status & UA_STATUS_OVERRUN_ERR)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & UA_STATUS_PARITY_ERR)
ch_flag = TTY_PARITY;
else if (status & UA_STATUS_FRAME_ERR)
ch_flag = TTY_FRAME;
else if (status & UA_STATUS_OVERRUN_ERR)
ch_flag = TTY_OVERRUN;
}
if (status & port->ignore_status_mask)
continue;
uart_insert_char(port, status, UA_STATUS_OVERRUN_ERR, ch,
ch_flag);
}
spin_unlock_irqrestore(&port->lock, flags);
tty_flip_buffer_push(&port->state->port);
}
static void digicolor_uart_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
if (digicolor_uart_tx_full(port))
return;
spin_lock_irqsave(&port->lock, flags);
if (port->x_char) {
writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
port->icount.tx++;
port->x_char = 0;
goto out;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
digicolor_uart_stop_tx(port);
goto out;
}
while (!uart_circ_empty(xmit)) {
writeb(xmit->buf[xmit->tail], port->membase + UA_EMI_REC);
uart_xmit_advance(port, 1);
if (digicolor_uart_tx_full(port))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
out:
spin_unlock_irqrestore(&port->lock, flags);
}
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
u8 int_status = readb_relaxed(port->membase + UA_INT_STATUS);
writeb_relaxed(UA_INT_RX | UA_INT_TX,
port->membase + UA_INTFLAG_CLEAR);
if (int_status & UA_INT_RX)
digicolor_uart_rx(port);
if (int_status & UA_INT_TX)
digicolor_uart_tx(port);
return IRQ_HANDLED;
}
static unsigned int digicolor_uart_tx_empty(struct uart_port *port)
{
u8 status = readb_relaxed(port->membase + UA_STATUS);
return (status & UA_STATUS_TX_READY) ? TIOCSER_TEMT : 0;
}
static unsigned int digicolor_uart_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS;
}
static void digicolor_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static void digicolor_uart_break_ctl(struct uart_port *port, int state)
{
}
static int digicolor_uart_startup(struct uart_port *port)
{
struct digicolor_port *dp =
container_of(port, struct digicolor_port, port);
writeb_relaxed(UA_ENABLE_ENABLE, port->membase + UA_ENABLE);
writeb_relaxed(UA_CONTROL_SOFT_RESET, port->membase + UA_CONTROL);
writeb_relaxed(0, port->membase + UA_CONTROL);
writeb_relaxed(UA_CONFIG_FIFO_RX_FIFO_MODE
| UA_CONFIG_FIFO_TX_FIFO_MODE | UA_CONFIG_FIFO_RX_THRESH,
port->membase + UA_CONFIG_FIFO);
writeb_relaxed(UA_STATUS_FIFO_RX_INT_ALMOST,
port->membase + UA_STATUS_FIFO);
writeb_relaxed(UA_CONTROL_RX_ENABLE | UA_CONTROL_TX_ENABLE,
port->membase + UA_CONTROL);
writeb_relaxed(UA_INT_TX | UA_INT_RX,
port->membase + UA_INT_ENABLE);
schedule_delayed_work(&dp->rx_poll_work, msecs_to_jiffies(100));
return 0;
}
static void digicolor_uart_shutdown(struct uart_port *port)
{
struct digicolor_port *dp =
container_of(port, struct digicolor_port, port);
writeb_relaxed(0, port->membase + UA_ENABLE);
cancel_delayed_work_sync(&dp->rx_poll_work);
}
static void digicolor_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int baud, divisor;
u8 config = 0;
unsigned long flags;
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
termios->c_iflag &= ~(BRKINT | IGNBRK);
/* Limit baud rates so that we don't need the fractional divider */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / (0x10000*16),
port->uartclk / 256);
divisor = uart_get_divisor(port, baud) - 1;
switch (termios->c_cflag & CSIZE) {
case CS7:
break;
case CS8:
default:
config |= UA_CONFIG_CHAR_LEN;
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
break;
}
if (termios->c_cflag & CSTOPB)
config |= UA_CONFIG_STOP_BITS;
if (termios->c_cflag & PARENB) {
config |= UA_CONFIG_PARITY;
if (termios->c_cflag & PARODD)
config |= UA_CONFIG_ODD_PARITY;
}
/* Set read status mask */
port->read_status_mask = UA_STATUS_OVERRUN_ERR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UA_STATUS_PARITY_ERR
| UA_STATUS_FRAME_ERR;
/* Set status ignore mask */
port->ignore_status_mask = 0;
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb_relaxed(config, port->membase + UA_CONFIG);
writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *digicolor_uart_type(struct uart_port *port)
{
return (port->type == PORT_DIGICOLOR) ? "DIGICOLOR USART" : NULL;
}
static void digicolor_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_DIGICOLOR;
}
static void digicolor_uart_release_port(struct uart_port *port)
{
}
static int digicolor_uart_request_port(struct uart_port *port)
{
return 0;
}
static const struct uart_ops digicolor_uart_ops = {
.tx_empty = digicolor_uart_tx_empty,
.set_mctrl = digicolor_uart_set_mctrl,
.get_mctrl = digicolor_uart_get_mctrl,
.stop_tx = digicolor_uart_stop_tx,
.start_tx = digicolor_uart_start_tx,
.stop_rx = digicolor_uart_stop_rx,
.break_ctl = digicolor_uart_break_ctl,
.startup = digicolor_uart_startup,
.shutdown = digicolor_uart_shutdown,
.set_termios = digicolor_uart_set_termios,
.type = digicolor_uart_type,
.config_port = digicolor_uart_config_port,
.release_port = digicolor_uart_release_port,
.request_port = digicolor_uart_request_port,
};
static void digicolor_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
while (digicolor_uart_tx_full(port))
cpu_relax();
writeb_relaxed(ch, port->membase + UA_EMI_REC);
}
static void digicolor_uart_console_write(struct console *co, const char *c,
unsigned n)
{
struct uart_port *port = digicolor_ports[co->index];
u8 status;
unsigned long flags;
int locked = 1;
if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
uart_console_write(port, c, n, digicolor_uart_console_putchar);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
/* Wait for transmitter to become empty */
do {
status = readb_relaxed(port->membase + UA_STATUS);
} while ((status & UA_STATUS_TX_READY) == 0);
}
static int digicolor_uart_console_setup(struct console *co, char *options)
{
int baud = 115200, bits = 8, parity = 'n', flow = 'n';
struct uart_port *port;
if (co->index < 0 || co->index >= DIGICOLOR_USART_NR)
return -EINVAL;
port = digicolor_ports[co->index];
if (!port)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console digicolor_console = {
.name = "ttyS",
.device = uart_console_device,
.write = digicolor_uart_console_write,
.setup = digicolor_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static struct uart_driver digicolor_uart = {
.driver_name = "digicolor-usart",
.dev_name = "ttyS",
.nr = DIGICOLOR_USART_NR,
};
static int digicolor_uart_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int irq, ret, index;
struct digicolor_port *dp;
struct resource *res;
struct clk *uart_clk;
if (!np) {
dev_err(&pdev->dev, "Missing device tree node\n");
return -ENXIO;
}
index = of_alias_get_id(np, "serial");
if (index < 0 || index >= DIGICOLOR_USART_NR)
return -EINVAL;
dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
uart_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk);
dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase);
dp->port.mapbase = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
dp->port.irq = irq;
dp->port.iotype = UPIO_MEM;
dp->port.uartclk = clk_get_rate(uart_clk);
dp->port.fifosize = 16;
dp->port.dev = &pdev->dev;
dp->port.ops = &digicolor_uart_ops;
dp->port.line = index;
dp->port.type = PORT_DIGICOLOR;
spin_lock_init(&dp->port.lock);
digicolor_ports[index] = &dp->port;
platform_set_drvdata(pdev, &dp->port);
INIT_DELAYED_WORK(&dp->rx_poll_work, digicolor_rx_poll);
ret = devm_request_irq(&pdev->dev, dp->port.irq, digicolor_uart_int, 0,
dev_name(&pdev->dev), &dp->port);
if (ret)
return ret;
return uart_add_one_port(&digicolor_uart, &dp->port);
}
static int digicolor_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&digicolor_uart, port);
return 0;
}
static const struct of_device_id digicolor_uart_dt_ids[] = {
{ .compatible = "cnxt,cx92755-usart", },
{ }
};
MODULE_DEVICE_TABLE(of, digicolor_uart_dt_ids);
static struct platform_driver digicolor_uart_platform = {
.driver = {
.name = "digicolor-usart",
.of_match_table = of_match_ptr(digicolor_uart_dt_ids),
},
.probe = digicolor_uart_probe,
.remove = digicolor_uart_remove,
};
static int __init digicolor_uart_init(void)
{
int ret;
if (IS_ENABLED(CONFIG_SERIAL_CONEXANT_DIGICOLOR_CONSOLE)) {
digicolor_uart.cons = &digicolor_console;
digicolor_console.data = &digicolor_uart;
}
ret = uart_register_driver(&digicolor_uart);
if (ret)
return ret;
ret = platform_driver_register(&digicolor_uart_platform);
if (ret)
uart_unregister_driver(&digicolor_uart);
return ret;
}
module_init(digicolor_uart_init);
static void __exit digicolor_uart_exit(void)
{
platform_driver_unregister(&digicolor_uart_platform);
uart_unregister_driver(&digicolor_uart);
}
module_exit(digicolor_uart_exit);
MODULE_AUTHOR("Baruch Siach <[email protected]>");
MODULE_DESCRIPTION("Conexant Digicolor USART serial driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/digicolor-usart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Motorola/Freescale IMX serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Author: Sascha Hauer <[email protected]>
* Copyright (C) 2004 Pengutronix
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/rational.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <linux/dma/imx-dma.h>
#include "serial_mctrl_gpio.h"
/* Register definitions */
#define URXD0 0x0 /* Receiver Register */
#define URTX0 0x40 /* Transmitter Register */
#define UCR1 0x80 /* Control Register 1 */
#define UCR2 0x84 /* Control Register 2 */
#define UCR3 0x88 /* Control Register 3 */
#define UCR4 0x8c /* Control Register 4 */
#define UFCR 0x90 /* FIFO Control Register */
#define USR1 0x94 /* Status Register 1 */
#define USR2 0x98 /* Status Register 2 */
#define UESC 0x9c /* Escape Character Register */
#define UTIM 0xa0 /* Escape Timer Register */
#define UBIR 0xa4 /* BRM Incremental Register */
#define UBMR 0xa8 /* BRM Modulator Register */
#define UBRC 0xac /* Baud Rate Count Register */
#define IMX21_ONEMS 0xb0 /* One Millisecond register */
#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
#define URXD_DUMMY_READ (1<<16)
#define URXD_CHARRDY (1<<15)
#define URXD_ERR (1<<14)
#define URXD_OVRRUN (1<<13)
#define URXD_FRMERR (1<<12)
#define URXD_BRK (1<<11)
#define URXD_PRERR (1<<10)
#define URXD_RX_DATA (0xFF<<0)
#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
#define UCR1_RXDMAEN (1<<8) /* Recv ready DMA enable */
#define UCR1_IREN (1<<7) /* Infrared interface enable */
#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TXDMAEN (1<<3) /* Transmitter ready DMA enable */
#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */
#define UCR1_DOZE (1<<1) /* Doze */
#define UCR1_UARTEN (1<<0) /* UART enabled */
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
#define UCR2_CTSC (1<<13) /* CTS pin control */
#define UCR2_CTS (1<<12) /* Clear to send */
#define UCR2_ESCEN (1<<11) /* Escape enable */
#define UCR2_PREN (1<<8) /* Parity enable */
#define UCR2_PROE (1<<7) /* Parity odd/even */
#define UCR2_STPB (1<<6) /* Stop */
#define UCR2_WS (1<<5) /* Word size */
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enabled */
#define UCR2_RXEN (1<<1) /* Receiver enabled */
#define UCR2_SRST (1<<0) /* SW reset */
#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
#define UCR3_PARERREN (1<<12) /* Parity enable */
#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
#define UCR3_DSR (1<<10) /* Data set ready */
#define UCR3_DCD (1<<9) /* Data carrier detect */
#define UCR3_RI (1<<8) /* Ring indicator */
#define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
#define UCR3_DTRDEN (1<<3) /* Data Terminal Ready Delta Enable. */
#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
#define UCR4_INVR (1<<9) /* Inverted infrared reception */
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */
#define UCR4_IRSC (1<<5) /* IR special case */
#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
#define USR1_RTSS (1<<14) /* RTS pin status */
#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
#define USR1_RTSD (1<<12) /* RTS delta */
#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
#define USR1_AGTIM (1<<8) /* Ageing timer interrupt flag */
#define USR1_DTRD (1<<7) /* DTR Delta */
#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
#define USR2_IDLE (1<<12) /* Idle condition */
#define USR2_RIDELT (1<<10) /* Ring Interrupt Delta */
#define USR2_RIIN (1<<9) /* Ring Indicator Input */
#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
#define USR2_WAKE (1<<7) /* Wake */
#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */
#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
#define USR2_TXDC (1<<3) /* Transmitter complete */
#define USR2_BRCD (1<<2) /* Break condition */
#define USR2_ORE (1<<1) /* Overrun error */
#define USR2_RDR (1<<0) /* Recv data ready */
#define UTS_FRCPERR (1<<13) /* Force parity error */
#define UTS_LOOP (1<<12) /* Loop tx and rx */
#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
#define UTS_TXFULL (1<<4) /* TxFIFO full */
#define UTS_RXFULL (1<<3) /* RxFIFO full */
#define UTS_SOFTRST (1<<0) /* Software reset */
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_IMX_MAJOR 207
#define MINOR_START 16
#define DEV_NAME "ttymxc"
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
#define DRIVER_NAME "IMX-uart"
#define UART_NR 8
/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
enum imx_uart_type {
IMX1_UART,
IMX21_UART,
IMX53_UART,
IMX6Q_UART,
};
/* device type dependent stuff */
struct imx_uart_data {
unsigned uts_reg;
enum imx_uart_type devtype;
};
enum imx_tx_state {
OFF,
WAIT_AFTER_RTS,
SEND,
WAIT_AFTER_SEND,
};
struct imx_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
unsigned int have_rtscts:1;
unsigned int have_rtsgpio:1;
unsigned int dte_mode:1;
unsigned int inverted_tx:1;
unsigned int inverted_rx:1;
struct clk *clk_ipg;
struct clk *clk_per;
const struct imx_uart_data *devdata;
struct mctrl_gpios *gpios;
/* counter to stop 0xff flood */
int idle_counter;
/* DMA fields */
unsigned int dma_is_enabled:1;
unsigned int dma_is_rxing:1;
unsigned int dma_is_txing:1;
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
struct circ_buf rx_ring;
unsigned int rx_buf_size;
unsigned int rx_period_length;
unsigned int rx_periods;
dma_cookie_t rx_cookie;
unsigned int tx_bytes;
unsigned int dma_tx_nents;
unsigned int saved_reg[10];
bool context_saved;
enum imx_tx_state tx_state;
struct hrtimer trigger_start_tx;
struct hrtimer trigger_stop_tx;
};
struct imx_port_ucrs {
unsigned int ucr1;
unsigned int ucr2;
unsigned int ucr3;
};
static struct imx_uart_data imx_uart_devdata[] = {
[IMX1_UART] = {
.uts_reg = IMX1_UTS,
.devtype = IMX1_UART,
},
[IMX21_UART] = {
.uts_reg = IMX21_UTS,
.devtype = IMX21_UART,
},
[IMX53_UART] = {
.uts_reg = IMX21_UTS,
.devtype = IMX53_UART,
},
[IMX6Q_UART] = {
.uts_reg = IMX21_UTS,
.devtype = IMX6Q_UART,
},
};
static const struct of_device_id imx_uart_dt_ids[] = {
{ .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
{ .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], },
{ .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
{ .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
static inline void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset)
{
writel(val, sport->port.membase + offset);
}
static inline u32 imx_uart_readl(struct imx_port *sport, u32 offset)
{
return readl(sport->port.membase + offset);
}
static inline unsigned imx_uart_uts_reg(struct imx_port *sport)
{
return sport->devdata->uts_reg;
}
static inline int imx_uart_is_imx1(struct imx_port *sport)
{
return sport->devdata->devtype == IMX1_UART;
}
/*
* Save and restore functions for UCR1, UCR2 and UCR3 registers
*/
#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE)
static void imx_uart_ucrs_save(struct imx_port *sport,
struct imx_port_ucrs *ucr)
{
/* save control registers */
ucr->ucr1 = imx_uart_readl(sport, UCR1);
ucr->ucr2 = imx_uart_readl(sport, UCR2);
ucr->ucr3 = imx_uart_readl(sport, UCR3);
}
static void imx_uart_ucrs_restore(struct imx_port *sport,
struct imx_port_ucrs *ucr)
{
/* restore control registers */
imx_uart_writel(sport, ucr->ucr1, UCR1);
imx_uart_writel(sport, ucr->ucr2, UCR2);
imx_uart_writel(sport, ucr->ucr3, UCR3);
}
#endif
/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
}
/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
}
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
{
hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
}
/* called with port.lock taken and irqs off */
static void imx_uart_soft_reset(struct imx_port *sport)
{
int i = 10;
u32 ucr2, ubir, ubmr, uts;
/*
* According to the Reference Manual description of the UART SRST bit:
*
* "Reset the transmit and receive state machines,
* all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
* and UTS[6-3]".
*
* We don't need to restore the old values from USR1, USR2, URXD and
* UTXD. UBRC is read only, so only save/restore the other three
* registers.
*/
ubir = imx_uart_readl(sport, UBIR);
ubmr = imx_uart_readl(sport, UBMR);
uts = imx_uart_readl(sport, IMX21_UTS);
ucr2 = imx_uart_readl(sport, UCR2);
imx_uart_writel(sport, ucr2 & ~UCR2_SRST, UCR2);
while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
udelay(1);
/* Restore the registers */
imx_uart_writel(sport, ubir, UBIR);
imx_uart_writel(sport, ubmr, UBMR);
imx_uart_writel(sport, uts, IMX21_UTS);
sport->idle_counter = 0;
}
static void imx_uart_disable_loopback_rs485(struct imx_port *sport)
{
unsigned int uts;
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts &= ~UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
}
/* called with port.lock taken and irqs off */
static void imx_uart_start_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int ucr1, ucr2;
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 |= UCR2_RXEN;
if (sport->dma_is_enabled) {
ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN;
} else {
ucr1 |= UCR1_RRDYEN;
ucr2 |= UCR2_ATEN;
}
/* Write UCR2 first as it includes RXEN */
imx_uart_writel(sport, ucr2, UCR2);
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_disable_loopback_rs485(sport);
}
/* called with port.lock taken and irqs off */
static void imx_uart_stop_tx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1, ucr4, usr2;
if (sport->tx_state == OFF)
return;
/*
* We are maybe in the SMP context, so if the DMA TX thread is running
* on other cpu, we have to wait for it to finish.
*/
if (sport->dma_is_txing)
return;
ucr1 = imx_uart_readl(sport, UCR1);
imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
usr2 = imx_uart_readl(sport, USR2);
if (!(usr2 & USR2_TXDC)) {
/* The shifter is still busy, so retry once TC triggers */
return;
}
ucr4 = imx_uart_readl(sport, UCR4);
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
/* in rs485 mode disable transmitter */
if (port->rs485.flags & SER_RS485_ENABLED) {
if (sport->tx_state == SEND) {
sport->tx_state = WAIT_AFTER_SEND;
if (port->rs485.delay_rts_after_send > 0) {
start_hrtimer_ms(&sport->trigger_stop_tx,
port->rs485.delay_rts_after_send);
return;
}
/* continue without any delay */
}
if (sport->tx_state == WAIT_AFTER_RTS ||
sport->tx_state == WAIT_AFTER_SEND) {
u32 ucr2;
hrtimer_try_to_cancel(&sport->trigger_start_tx);
ucr2 = imx_uart_readl(sport, UCR2);
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
imx_uart_rts_active(sport, &ucr2);
else
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
if (!port->rs485_rx_during_tx_gpio)
imx_uart_start_rx(port);
sport->tx_state = OFF;
}
} else {
sport->tx_state = OFF;
}
}
/* called with port.lock taken and irqs off */
static void imx_uart_stop_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1, ucr2, ucr4, uts;
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
ucr4 = imx_uart_readl(sport, UCR4);
if (sport->dma_is_enabled) {
ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
} else {
ucr1 &= ~UCR1_RRDYEN;
ucr2 &= ~UCR2_ATEN;
ucr4 &= ~UCR4_OREN;
}
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_writel(sport, ucr4, UCR4);
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
sport->have_rtscts && !sport->have_rtsgpio) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
ucr2 |= UCR2_RXEN;
} else {
ucr2 &= ~UCR2_RXEN;
}
imx_uart_writel(sport, ucr2, UCR2);
}
/* called with port.lock taken and irqs off */
static void imx_uart_enable_ms(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
mod_timer(&sport->timer, jiffies);
mctrl_gpio_enable_ms(sport->gpios);
}
static void imx_uart_dma_tx(struct imx_port *sport);
/* called with port.lock taken and irqs off */
static inline void imx_uart_transmit_buffer(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
if (sport->port.x_char) {
/* Send next char */
imx_uart_writel(sport, sport->port.x_char, URTX0);
sport->port.icount.tx++;
sport->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
imx_uart_stop_tx(&sport->port);
return;
}
if (sport->dma_is_enabled) {
u32 ucr1;
/*
* We've just sent a X-char Ensure the TX DMA is enabled
* and the TX IRQ is disabled.
**/
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~UCR1_TRDYEN;
if (sport->dma_is_txing) {
ucr1 |= UCR1_TXDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
} else {
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_dma_tx(sport);
}
return;
}
while (!uart_circ_empty(xmit) &&
!(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0);
uart_xmit_advance(&sport->port, 1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (uart_circ_empty(xmit))
imx_uart_stop_tx(&sport->port);
}
static void imx_uart_dma_tx_callback(void *data)
{
struct imx_port *sport = data;
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
u32 ucr1;
spin_lock_irqsave(&sport->port.lock, flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~UCR1_TXDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
uart_xmit_advance(&sport->port, sport->tx_bytes);
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
sport->dma_is_txing = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
imx_uart_dma_tx(sport);
else if (sport->port.rs485.flags & SER_RS485_ENABLED) {
u32 ucr4 = imx_uart_readl(sport, UCR4);
ucr4 |= UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
}
spin_unlock_irqrestore(&sport->port.lock, flags);
}
/* called with port.lock taken and irqs off */
static void imx_uart_dma_tx(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
u32 ucr1, ucr4;
int ret;
if (sport->dma_is_txing)
return;
ucr4 = imx_uart_readl(sport, UCR4);
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
sport->tx_bytes = uart_circ_chars_pending(xmit);
if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
} else {
sport->dma_tx_nents = 2;
sg_init_table(sgl, 2);
sg_set_buf(sgl, xmit->buf + xmit->tail,
UART_XMIT_SIZE - xmit->tail);
sg_set_buf(sgl + 1, xmit->buf, xmit->head);
}
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
desc = dmaengine_prep_slave_sg(chan, sgl, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
DMA_TO_DEVICE);
dev_err(dev, "We cannot prepare for the TX slave dma!\n");
return;
}
desc->callback = imx_uart_dma_tx_callback;
desc->callback_param = sport;
dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
uart_circ_chars_pending(xmit));
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 |= UCR1_TXDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
/* fire it */
sport->dma_is_txing = 1;
dmaengine_submit(desc);
dma_async_issue_pending(chan);
return;
}
/* called with port.lock taken and irqs off */
static void imx_uart_start_tx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1;
if (!sport->port.x_char && uart_circ_empty(&port->state->xmit))
return;
/*
* We cannot simply do nothing here if sport->tx_state == SEND already
* because UCR1_TXMPTYEN might already have been cleared in
* imx_uart_stop_tx(), but tx_state is still SEND.
*/
if (port->rs485.flags & SER_RS485_ENABLED) {
if (sport->tx_state == OFF) {
u32 ucr2 = imx_uart_readl(sport, UCR2);
if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
imx_uart_rts_active(sport, &ucr2);
else
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
!port->rs485_rx_during_tx_gpio)
imx_uart_stop_rx(port);
sport->tx_state = WAIT_AFTER_RTS;
if (port->rs485.delay_rts_before_send > 0) {
start_hrtimer_ms(&sport->trigger_start_tx,
port->rs485.delay_rts_before_send);
return;
}
/* continue without any delay */
}
if (sport->tx_state == WAIT_AFTER_SEND
|| sport->tx_state == WAIT_AFTER_RTS) {
hrtimer_try_to_cancel(&sport->trigger_stop_tx);
/*
* Enable transmitter and shifter empty irq only if DMA
* is off. In the DMA case this is done in the
* tx-callback.
*/
if (!sport->dma_is_enabled) {
u32 ucr4 = imx_uart_readl(sport, UCR4);
ucr4 |= UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
}
sport->tx_state = SEND;
}
} else {
sport->tx_state = SEND;
}
if (!sport->dma_is_enabled) {
ucr1 = imx_uart_readl(sport, UCR1);
imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1);
}
if (sport->dma_is_enabled) {
if (sport->port.x_char) {
/* We have X-char to send, so enable TX IRQ and
* disable TX DMA to let TX interrupt to send X-char */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~UCR1_TXDMAEN;
ucr1 |= UCR1_TRDYEN;
imx_uart_writel(sport, ucr1, UCR1);
return;
}
if (!uart_circ_empty(&port->state->xmit) &&
!uart_tx_stopped(port))
imx_uart_dma_tx(sport);
return;
}
}
static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
u32 usr1;
imx_uart_writel(sport, USR1_RTSD, USR1);
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, usr1);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
return IRQ_HANDLED;
}
static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
irqreturn_t ret;
spin_lock(&sport->port.lock);
ret = __imx_uart_rtsint(irq, dev_id);
spin_unlock(&sport->port.lock);
return ret;
}
static irqreturn_t imx_uart_txint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
spin_lock(&sport->port.lock);
imx_uart_transmit_buffer(sport);
spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
/* Check if hardware Rx flood is in progress, and issue soft reset to stop it.
* This is to be called from Rx ISRs only when some bytes were actually
* received.
*
* A way to reproduce the flood (checked on iMX6SX) is: open iMX UART at 9600
* 8N1, and from external source send 0xf0 char at 115200 8N1. In about 90% of
* cases this starts a flood of "receiving" of 0xff characters by the iMX6 UART
* that is terminated by any activity on RxD line, or could be stopped by
* issuing soft reset to the UART (just stop/start of RX does not help). Note
* that what we do here is sending isolated start bit about 2.4 times shorter
* than it is to be on UART configured baud rate.
*/
static void imx_uart_check_flood(struct imx_port *sport, u32 usr2)
{
/* To detect hardware 0xff flood we monitor RxD line between RX
* interrupts to isolate "receiving" of char(s) with no activity
* on RxD line, that'd never happen on actual data transfers.
*
* We use USR2_WAKE bit to check for activity on RxD line, but we have a
* race here if we clear USR2_WAKE when receiving of a char is in
* progress, so we might get RX interrupt later with USR2_WAKE bit
* cleared. Note though that as we don't try to clear USR2_WAKE when we
* detected no activity, this race may hide actual activity only once.
*
* Yet another case where receive interrupt may occur without RxD
* activity is expiration of aging timer, so we consider this as well.
*
* We use 'idle_counter' to ensure that we got at least so many RX
* interrupts without any detected activity on RxD line. 2 cases
* described plus 1 to be on the safe side gives us a margin of 3,
* below. In practice I was not able to produce a false positive to
* induce soft reset at regular data transfers even using 1 as the
* margin, so 3 is actually very strong.
*
* We count interrupts, not chars in 'idle-counter' for simplicity.
*/
if (usr2 & USR2_WAKE) {
imx_uart_writel(sport, USR2_WAKE, USR2);
sport->idle_counter = 0;
} else if (++sport->idle_counter > 3) {
dev_warn(sport->port.dev, "RX flood detected: soft reset.");
imx_uart_soft_reset(sport); /* also clears 'sport->idle_counter' */
}
}
static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
struct tty_port *port = &sport->port.state->port;
u32 usr2, rx;
/* If we received something, check for 0xff flood */
usr2 = imx_uart_readl(sport, USR2);
if (usr2 & USR2_RDR)
imx_uart_check_flood(sport, usr2);
while ((rx = imx_uart_readl(sport, URXD0)) & URXD_CHARRDY) {
unsigned int flg = TTY_NORMAL;
sport->port.icount.rx++;
if (unlikely(rx & URXD_ERR)) {
if (rx & URXD_BRK) {
sport->port.icount.brk++;
if (uart_handle_break(&sport->port))
continue;
}
else if (rx & URXD_PRERR)
sport->port.icount.parity++;
else if (rx & URXD_FRMERR)
sport->port.icount.frame++;
if (rx & URXD_OVRRUN)
sport->port.icount.overrun++;
if (rx & sport->port.ignore_status_mask)
continue;
rx &= (sport->port.read_status_mask | 0xFF);
if (rx & URXD_BRK)
flg = TTY_BREAK;
else if (rx & URXD_PRERR)
flg = TTY_PARITY;
else if (rx & URXD_FRMERR)
flg = TTY_FRAME;
if (rx & URXD_OVRRUN)
flg = TTY_OVERRUN;
sport->port.sysrq = 0;
} else if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) {
continue;
}
if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
continue;
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
tty_flip_buffer_push(port);
return IRQ_HANDLED;
}
static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
irqreturn_t ret;
spin_lock(&sport->port.lock);
ret = __imx_uart_rxint(irq, dev_id);
spin_unlock(&sport->port.lock);
return ret;
}
static void imx_uart_clear_rx_errors(struct imx_port *sport);
/*
* We have a modem side uart, so the meanings of RTS and CTS are inverted.
*/
static unsigned int imx_uart_get_hwmctrl(struct imx_port *sport)
{
unsigned int tmp = TIOCM_DSR;
unsigned usr1 = imx_uart_readl(sport, USR1);
unsigned usr2 = imx_uart_readl(sport, USR2);
if (usr1 & USR1_RTSS)
tmp |= TIOCM_CTS;
/* in DCE mode DCDIN is always 0 */
if (!(usr2 & USR2_DCDIN))
tmp |= TIOCM_CAR;
if (sport->dte_mode)
if (!(imx_uart_readl(sport, USR2) & USR2_RIIN))
tmp |= TIOCM_RI;
return tmp;
}
/*
* Handle any change of modem status signal since we were last called.
*/
static void imx_uart_mctrl_check(struct imx_port *sport)
{
unsigned int status, changed;
status = imx_uart_get_hwmctrl(sport);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI && status & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
static irqreturn_t imx_uart_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
spin_lock(&sport->port.lock);
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
ucr3 = imx_uart_readl(sport, UCR3);
ucr4 = imx_uart_readl(sport, UCR4);
/*
* Even if a condition is true that can trigger an irq only handle it if
* the respective irq source is enabled. This prevents some undesired
* actions, for example if a character that sits in the RX FIFO and that
* should be fetched via DMA is tried to be fetched using PIO. Or the
* receiver is currently off and so reading from URXD0 results in an
* exception. So just mask the (raw) status bits for disabled irqs.
*/
if ((ucr1 & UCR1_RRDYEN) == 0)
usr1 &= ~USR1_RRDY;
if ((ucr2 & UCR2_ATEN) == 0)
usr1 &= ~USR1_AGTIM;
if ((ucr1 & UCR1_TRDYEN) == 0)
usr1 &= ~USR1_TRDY;
if ((ucr4 & UCR4_TCEN) == 0)
usr2 &= ~USR2_TXDC;
if ((ucr3 & UCR3_DTRDEN) == 0)
usr1 &= ~USR1_DTRD;
if ((ucr1 & UCR1_RTSDEN) == 0)
usr1 &= ~USR1_RTSD;
if ((ucr3 & UCR3_AWAKEN) == 0)
usr1 &= ~USR1_AWAKE;
if ((ucr4 & UCR4_OREN) == 0)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
imx_uart_writel(sport, USR1_AGTIM, USR1);
__imx_uart_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
imx_uart_transmit_buffer(sport);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_DTRD) {
imx_uart_writel(sport, USR1_DTRD, USR1);
imx_uart_mctrl_check(sport);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_RTSD) {
__imx_uart_rtsint(irq, dev_id);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_AWAKE) {
imx_uart_writel(sport, USR1_AWAKE, USR1);
ret = IRQ_HANDLED;
}
if (usr2 & USR2_ORE) {
sport->port.icount.overrun++;
imx_uart_writel(sport, USR2_ORE, USR2);
ret = IRQ_HANDLED;
}
spin_unlock(&sport->port.lock);
return ret;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int imx_uart_tx_empty(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int ret;
ret = (imx_uart_readl(sport, USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
/* If the TX DMA is working, return 0. */
if (sport->dma_is_txing)
ret = 0;
return ret;
}
/* called with port.lock taken and irqs off */
static unsigned int imx_uart_get_mctrl(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int ret = imx_uart_get_hwmctrl(sport);
mctrl_gpio_get(sport->gpios, &ret);
return ret;
}
/* called with port.lock taken and irqs off */
static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr3, uts;
if (!(port->rs485.flags & SER_RS485_ENABLED)) {
u32 ucr2;
/*
* Turn off autoRTS if RTS is lowered and restore autoRTS
* setting if RTS is raised.
*/
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
if (mctrl & TIOCM_RTS) {
ucr2 |= UCR2_CTS;
/*
* UCR2_IRTS is unset if and only if the port is
* configured for CRTSCTS, so we use inverted UCR2_IRTS
* to get the state to restore to.
*/
if (!(ucr2 & UCR2_IRTS))
ucr2 |= UCR2_CTSC;
}
imx_uart_writel(sport, ucr2, UCR2);
}
ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_DSR;
if (!(mctrl & TIOCM_DTR))
ucr3 |= UCR3_DSR;
imx_uart_writel(sport, ucr3, UCR3);
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)) & ~UTS_LOOP;
if (mctrl & TIOCM_LOOP)
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
mctrl_gpio_set(sport->gpios, mctrl);
}
/*
* Interrupts always disabled.
*/
static void imx_uart_break_ctl(struct uart_port *port, int break_state)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
u32 ucr1;
spin_lock_irqsave(&sport->port.lock, flags);
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
if (break_state != 0)
ucr1 |= UCR1_SNDBRK;
imx_uart_writel(sport, ucr1, UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void imx_uart_timeout(struct timer_list *t)
{
struct imx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
imx_uart_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
* [2] the aging timer expires
*
* Condition [2] is triggered when a character has been sitting in the FIFO
* for at least 8 byte durations.
*/
static void imx_uart_dma_rx_callback(void *data)
{
struct imx_port *sport = data;
struct dma_chan *chan = sport->dma_chan_rx;
struct scatterlist *sgl = &sport->rx_sgl;
struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
struct circ_buf *rx_ring = &sport->rx_ring;
enum dma_status status;
unsigned int w_bytes = 0;
unsigned int r_bytes;
unsigned int bd_size;
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
if (status == DMA_ERROR) {
spin_lock(&sport->port.lock);
imx_uart_clear_rx_errors(sport);
spin_unlock(&sport->port.lock);
return;
}
/*
* The state-residue variable represents the empty space
* relative to the entire buffer. Taking this in consideration
* the head is always calculated base on the buffer total
* length - DMA transaction residue. The UART script from the
* SDMA firmware will jump to the next buffer descriptor,
* once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
* Taking this in consideration the tail is always at the
* beginning of the buffer descriptor that contains the head.
*/
/* Calculate the head */
rx_ring->head = sg_dma_len(sgl) - state.residue;
/* Calculate the tail. */
bd_size = sg_dma_len(sgl) / sport->rx_periods;
rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
if (rx_ring->head <= sg_dma_len(sgl) &&
rx_ring->head > rx_ring->tail) {
/* Move data from tail to head */
r_bytes = rx_ring->head - rx_ring->tail;
/* If we received something, check for 0xff flood */
spin_lock(&sport->port.lock);
imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
spin_unlock(&sport->port.lock);
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
/* CPU claims ownership of RX DMA buffer */
dma_sync_sg_for_cpu(sport->port.dev, sgl, 1,
DMA_FROM_DEVICE);
w_bytes = tty_insert_flip_string(port,
sport->rx_buf + rx_ring->tail, r_bytes);
/* UART retrieves ownership of RX DMA buffer */
dma_sync_sg_for_device(sport->port.dev, sgl, 1,
DMA_FROM_DEVICE);
if (w_bytes != r_bytes)
sport->port.icount.buf_overrun++;
sport->port.icount.rx += w_bytes;
}
} else {
WARN_ON(rx_ring->head > sg_dma_len(sgl));
WARN_ON(rx_ring->head <= rx_ring->tail);
}
if (w_bytes) {
tty_flip_buffer_push(port);
dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes);
}
}
static int imx_uart_start_rx_dma(struct imx_port *sport)
{
struct scatterlist *sgl = &sport->rx_sgl;
struct dma_chan *chan = sport->dma_chan_rx;
struct device *dev = sport->port.dev;
struct dma_async_tx_descriptor *desc;
int ret;
sport->rx_ring.head = 0;
sport->rx_ring.tail = 0;
sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for RX.\n");
return -EINVAL;
}
desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl),
sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!desc) {
dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
dev_err(dev, "We cannot prepare for the RX slave dma!\n");
return -EINVAL;
}
desc->callback = imx_uart_dma_rx_callback;
desc->callback_param = sport;
dev_dbg(dev, "RX: prepare for the DMA.\n");
sport->dma_is_rxing = 1;
sport->rx_cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
return 0;
}
static void imx_uart_clear_rx_errors(struct imx_port *sport)
{
struct tty_port *port = &sport->port.state->port;
u32 usr1, usr2;
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
if (usr2 & USR2_BRCD) {
sport->port.icount.brk++;
imx_uart_writel(sport, USR2_BRCD, USR2);
uart_handle_break(&sport->port);
if (tty_insert_flip_char(port, 0, TTY_BREAK) == 0)
sport->port.icount.buf_overrun++;
tty_flip_buffer_push(port);
} else {
if (usr1 & USR1_FRAMERR) {
sport->port.icount.frame++;
imx_uart_writel(sport, USR1_FRAMERR, USR1);
} else if (usr1 & USR1_PARITYERR) {
sport->port.icount.parity++;
imx_uart_writel(sport, USR1_PARITYERR, USR1);
}
}
if (usr2 & USR2_ORE) {
sport->port.icount.overrun++;
imx_uart_writel(sport, USR2_ORE, USR2);
}
sport->idle_counter = 0;
}
#define TXTL_DEFAULT 2 /* reset default */
#define RXTL_DEFAULT 8 /* 8 characters or aging timer */
#define TXTL_DMA 8 /* DMA burst setting */
#define RXTL_DMA 9 /* DMA burst setting */
static void imx_uart_setup_ufcr(struct imx_port *sport,
unsigned char txwl, unsigned char rxwl)
{
unsigned int val;
/* set receiver / transmitter trigger level */
val = imx_uart_readl(sport, UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
val |= txwl << UFCR_TXTL_SHF | rxwl;
imx_uart_writel(sport, val, UFCR);
}
static void imx_uart_dma_exit(struct imx_port *sport)
{
if (sport->dma_chan_rx) {
dmaengine_terminate_sync(sport->dma_chan_rx);
dma_release_channel(sport->dma_chan_rx);
sport->dma_chan_rx = NULL;
sport->rx_cookie = -EINVAL;
kfree(sport->rx_buf);
sport->rx_buf = NULL;
}
if (sport->dma_chan_tx) {
dmaengine_terminate_sync(sport->dma_chan_tx);
dma_release_channel(sport->dma_chan_tx);
sport->dma_chan_tx = NULL;
}
}
static int imx_uart_dma_init(struct imx_port *sport)
{
struct dma_slave_config slave_config = {};
struct device *dev = sport->port.dev;
int ret;
/* Prepare for RX : */
sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
if (!sport->dma_chan_rx) {
dev_dbg(dev, "cannot get the DMA channel.\n");
ret = -EINVAL;
goto err;
}
slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = sport->port.mapbase + URXD0;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
/* one byte less than the watermark level to enable the aging timer */
slave_config.src_maxburst = RXTL_DMA - 1;
ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
if (ret) {
dev_err(dev, "error in RX dma configuration.\n");
goto err;
}
sport->rx_buf_size = sport->rx_period_length * sport->rx_periods;
sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
}
sport->rx_ring.buf = sport->rx_buf;
/* Prepare for TX : */
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
if (!sport->dma_chan_tx) {
dev_err(dev, "cannot get the TX DMA channel!\n");
ret = -EINVAL;
goto err;
}
slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = sport->port.mapbase + URTX0;
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
slave_config.dst_maxburst = TXTL_DMA;
ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
if (ret) {
dev_err(dev, "error in TX dma configuration.");
goto err;
}
return 0;
err:
imx_uart_dma_exit(sport);
return ret;
}
static void imx_uart_enable_dma(struct imx_port *sport)
{
u32 ucr1;
imx_uart_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
/* set UCR1 */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 |= UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
sport->dma_is_enabled = 1;
}
static void imx_uart_disable_dma(struct imx_port *sport)
{
u32 ucr1;
/* clear UCR1 */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN);
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
sport->dma_is_enabled = 0;
}
/* half the RX buffer size */
#define CTSTL 16
static int imx_uart_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
int retval;
unsigned long flags;
int dma_is_inited = 0;
u32 ucr1, ucr2, ucr3, ucr4;
retval = clk_prepare_enable(sport->clk_per);
if (retval)
return retval;
retval = clk_prepare_enable(sport->clk_ipg);
if (retval) {
clk_disable_unprepare(sport->clk_per);
return retval;
}
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
/* disable the DREN bit (Data Ready interrupt enable) before
* requesting IRQs
*/
ucr4 = imx_uart_readl(sport, UCR4);
/* set the trigger level for CTS */
ucr4 &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
ucr4 |= CTSTL << UCR4_CTSTL_SHF;
imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4);
/* Can we enable the DMA support? */
if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
dma_is_inited = 1;
spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
imx_uart_soft_reset(sport);
/*
* Finally, clear and enable interrupts
*/
imx_uart_writel(sport, USR1_RTSD | USR1_DTRD, USR1);
imx_uart_writel(sport, USR2_ORE, USR2);
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_RRDYEN;
ucr1 |= UCR1_UARTEN;
if (sport->have_rtscts)
ucr1 |= UCR1_RTSDEN;
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
if (!dma_is_inited)
ucr4 |= UCR4_OREN;
if (sport->inverted_rx)
ucr4 |= UCR4_INVR;
imx_uart_writel(sport, ucr4, UCR4);
ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT;
/*
* configure tx polarity before enabling tx
*/
if (sport->inverted_tx)
ucr3 |= UCR3_INVT;
if (!imx_uart_is_imx1(sport)) {
ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
if (sport->dte_mode)
/* disable broken interrupts */
ucr3 &= ~(UCR3_RI | UCR3_DCD);
}
imx_uart_writel(sport, ucr3, UCR3);
ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN;
ucr2 |= (UCR2_RXEN | UCR2_TXEN);
if (!sport->have_rtscts)
ucr2 |= UCR2_IRTS;
/*
* make sure the edge sensitive RTS-irq is disabled,
* we're using RTSD instead.
*/
if (!imx_uart_is_imx1(sport))
ucr2 &= ~UCR2_RTSEN;
imx_uart_writel(sport, ucr2, UCR2);
/*
* Enable modem status interrupts
*/
imx_uart_enable_ms(&sport->port);
if (dma_is_inited) {
imx_uart_enable_dma(sport);
imx_uart_start_rx_dma(sport);
} else {
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 |= UCR1_RRDYEN;
imx_uart_writel(sport, ucr1, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 |= UCR2_ATEN;
imx_uart_writel(sport, ucr2, UCR2);
}
imx_uart_disable_loopback_rs485(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
}
static void imx_uart_shutdown(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
u32 ucr1, ucr2, ucr4, uts;
if (sport->dma_is_enabled) {
dmaengine_terminate_sync(sport->dma_chan_tx);
if (sport->dma_is_txing) {
dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
sport->dma_tx_nents, DMA_TO_DEVICE);
sport->dma_is_txing = 0;
}
dmaengine_terminate_sync(sport->dma_chan_rx);
if (sport->dma_is_rxing) {
dma_unmap_sg(sport->port.dev, &sport->rx_sgl,
1, DMA_FROM_DEVICE);
sport->dma_is_rxing = 0;
}
spin_lock_irqsave(&sport->port.lock, flags);
imx_uart_stop_tx(port);
imx_uart_stop_rx(port);
imx_uart_disable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
imx_uart_dma_exit(sport);
}
mctrl_gpio_disable_ms(sport->gpios);
spin_lock_irqsave(&sport->port.lock, flags);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
imx_uart_writel(sport, ucr2, UCR2);
spin_unlock_irqrestore(&sport->port.lock, flags);
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Disable all interrupts, port and break condition.
*/
spin_lock_irqsave(&sport->port.lock, flags);
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
UCR1_ATDMAEN | UCR1_SNDBRK);
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
sport->have_rtscts && !sport->have_rtsgpio) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
ucr1 |= UCR1_UARTEN;
} else {
ucr1 &= ~UCR1_UARTEN;
}
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4);
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
spin_unlock_irqrestore(&sport->port.lock, flags);
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
}
/* called with port.lock taken and irqs off */
static void imx_uart_flush_buffer(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
struct scatterlist *sgl = &sport->tx_sgl[0];
if (!sport->dma_chan_tx)
return;
sport->tx_bytes = 0;
dmaengine_terminate_all(sport->dma_chan_tx);
if (sport->dma_is_txing) {
u32 ucr1;
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
DMA_TO_DEVICE);
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~UCR1_TXDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
sport->dma_is_txing = 0;
}
imx_uart_soft_reset(sport);
}
static void
imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
u32 ucr2, old_ucr2, ufcr;
unsigned int baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned long div;
unsigned long num, denom, old_ubir, old_ubmr;
uint64_t tdiv64;
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
del_timer_sync(&sport->timer);
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Read current UCR2 and save it for future use, then clear all the bits
* except those we will or may need to preserve.
*/
old_ucr2 = imx_uart_readl(sport, UCR2);
ucr2 = old_ucr2 & (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN | UCR2_CTS);
ucr2 |= UCR2_SRST | UCR2_IRTS;
if ((termios->c_cflag & CSIZE) == CS8)
ucr2 |= UCR2_WS;
if (!sport->have_rtscts)
termios->c_cflag &= ~CRTSCTS;
if (port->rs485.flags & SER_RS485_ENABLED) {
/*
* RTS is mandatory for rs485 operation, so keep
* it under manual control and keep transmitter
* disabled.
*/
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
imx_uart_rts_active(sport, &ucr2);
else
imx_uart_rts_inactive(sport, &ucr2);
} else if (termios->c_cflag & CRTSCTS) {
/*
* Only let receiver control RTS output if we were not requested
* to have RTS inactive (which then should take precedence).
*/
if (ucr2 & UCR2_CTS)
ucr2 |= UCR2_CTSC;
}
if (termios->c_cflag & CRTSCTS)
ucr2 &= ~UCR2_IRTS;
if (termios->c_cflag & CSTOPB)
ucr2 |= UCR2_STPB;
if (termios->c_cflag & PARENB) {
ucr2 |= UCR2_PREN;
if (termios->c_cflag & PARODD)
ucr2 |= UCR2_PROE;
}
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |= URXD_BRK;
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |= URXD_BRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
if ((termios->c_cflag & CREAD) == 0)
sport->port.ignore_status_mask |= URXD_DUMMY_READ;
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/* custom-baudrate handling */
div = sport->port.uartclk / (baud * 16);
if (baud == 38400 && quot != div)
baud = sport->port.uartclk / (quot * 16);
div = sport->port.uartclk / (baud * 16);
if (div > 7)
div = 7;
if (!div)
div = 1;
rational_best_approximation(16 * div * baud, sport->port.uartclk,
1 << 16, 1 << 16, &num, &denom);
tdiv64 = sport->port.uartclk;
tdiv64 *= num;
do_div(tdiv64, denom * 16 * div);
tty_termios_encode_baud_rate(termios,
(speed_t)tdiv64, (speed_t)tdiv64);
num -= 1;
denom -= 1;
ufcr = imx_uart_readl(sport, UFCR);
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
imx_uart_writel(sport, ufcr, UFCR);
/*
* Two registers below should always be written both and in this
* particular order. One consequence is that we need to check if any of
* them changes and then update both. We do need the check for change
* as even writing the same values seem to "restart"
* transmission/receiving logic in the hardware, that leads to data
* breakage even when rate doesn't in fact change. E.g., user switches
* RTS/CTS handshake and suddenly gets broken bytes.
*/
old_ubir = imx_uart_readl(sport, UBIR);
old_ubmr = imx_uart_readl(sport, UBMR);
if (old_ubir != num || old_ubmr != denom) {
imx_uart_writel(sport, num, UBIR);
imx_uart_writel(sport, denom, UBMR);
}
if (!imx_uart_is_imx1(sport))
imx_uart_writel(sport, sport->port.uartclk / div / 1000,
IMX21_ONEMS);
imx_uart_writel(sport, ucr2, UCR2);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_uart_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *imx_uart_type(struct uart_port *port)
{
return port->type == PORT_IMX ? "IMX" : NULL;
}
/*
* Configure/autoconfigure the port.
*/
static void imx_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_IMX;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_IMX and PORT_UNKNOWN
*/
static int
imx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != UPIO_MEM)
ret = -EINVAL;
if (port->uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if (port->mapbase != (unsigned long)ser->iomem_base)
ret = -EINVAL;
if (port->iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
#if defined(CONFIG_CONSOLE_POLL)
static int imx_uart_poll_init(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
u32 ucr1, ucr2;
int retval;
retval = clk_prepare_enable(sport->clk_ipg);
if (retval)
return retval;
retval = clk_prepare_enable(sport->clk_per);
if (retval)
clk_disable_unprepare(sport->clk_ipg);
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Be careful about the order of enabling bits here. First enable the
* receiver (UARTEN + RXEN) and only then the corresponding irqs.
* This prevents that a character that already sits in the RX fifo is
* triggering an irq but the try to fetch it from there results in an
* exception because UARTEN or RXEN is still off.
*/
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
if (imx_uart_is_imx1(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN);
ucr2 |= UCR2_RXEN | UCR2_TXEN;
ucr2 &= ~UCR2_ATEN;
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_writel(sport, ucr2, UCR2);
/* now enable irqs */
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
}
static int imx_uart_poll_get_char(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
if (!(imx_uart_readl(sport, USR2) & USR2_RDR))
return NO_POLL_CHAR;
return imx_uart_readl(sport, URXD0) & URXD_RX_DATA;
}
static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int status;
/* drain */
do {
status = imx_uart_readl(sport, USR1);
} while (~status & USR1_TRDY);
/* write */
imx_uart_writel(sport, c, URTX0);
/* flush */
do {
status = imx_uart_readl(sport, USR2);
} while (~status & USR2_TXDC);
}
#endif
/* called with port.lock taken and irqs off or from .probe without locking */
static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr2;
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
if (sport->have_rtscts && !sport->have_rtsgpio &&
!(rs485conf->flags & SER_RS485_RTS_ON_SEND))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
/* disable transmitter */
ucr2 = imx_uart_readl(sport, UCR2);
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
imx_uart_rts_active(sport, &ucr2);
else
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
}
/* Make sure Rx is enabled in case Tx is active with Rx disabled */
if (!(rs485conf->flags & SER_RS485_ENABLED) ||
rs485conf->flags & SER_RS485_RX_DURING_TX)
imx_uart_start_rx(port);
if (port->rs485_rx_during_tx_gpio)
gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
!!(rs485conf->flags & SER_RS485_RX_DURING_TX));
return 0;
}
static const struct uart_ops imx_uart_pops = {
.tx_empty = imx_uart_tx_empty,
.set_mctrl = imx_uart_set_mctrl,
.get_mctrl = imx_uart_get_mctrl,
.stop_tx = imx_uart_stop_tx,
.start_tx = imx_uart_start_tx,
.stop_rx = imx_uart_stop_rx,
.enable_ms = imx_uart_enable_ms,
.break_ctl = imx_uart_break_ctl,
.startup = imx_uart_startup,
.shutdown = imx_uart_shutdown,
.flush_buffer = imx_uart_flush_buffer,
.set_termios = imx_uart_set_termios,
.type = imx_uart_type,
.config_port = imx_uart_config_port,
.verify_port = imx_uart_verify_port,
#if defined(CONFIG_CONSOLE_POLL)
.poll_init = imx_uart_poll_init,
.poll_get_char = imx_uart_poll_get_char,
.poll_put_char = imx_uart_poll_put_char,
#endif
};
static struct imx_port *imx_uart_ports[UART_NR];
#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE)
static void imx_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct imx_port *sport = (struct imx_port *)port;
while (imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)
barrier();
imx_uart_writel(sport, ch, URTX0);
}
/*
* Interrupts are disabled on entering
*/
static void
imx_uart_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_uart_ports[co->index];
struct imx_port_ucrs old_ucr;
unsigned long flags;
unsigned int ucr1;
int locked = 1;
if (sport->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock_irqsave(&sport->port.lock, flags);
else
spin_lock_irqsave(&sport->port.lock, flags);
/*
* First, save UCR1/2/3 and then disable interrupts
*/
imx_uart_ucrs_save(sport, &old_ucr);
ucr1 = old_ucr.ucr1;
if (imx_uart_is_imx1(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
imx_uart_writel(sport, old_ucr.ucr2 | UCR2_TXEN, UCR2);
uart_console_write(&sport->port, s, count, imx_uart_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore UCR1/2/3
*/
while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
imx_uart_ucrs_restore(sport, &old_ucr);
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void
imx_uart_console_get_options(struct imx_port *sport, int *baud,
int *parity, int *bits)
{
if (imx_uart_readl(sport, UCR1) & UCR1_UARTEN) {
/* ok, the port was enabled */
unsigned int ucr2, ubir, ubmr, uartclk;
unsigned int baud_raw;
unsigned int ucfr_rfdiv;
ucr2 = imx_uart_readl(sport, UCR2);
*parity = 'n';
if (ucr2 & UCR2_PREN) {
if (ucr2 & UCR2_PROE)
*parity = 'o';
else
*parity = 'e';
}
if (ucr2 & UCR2_WS)
*bits = 8;
else
*bits = 7;
ubir = imx_uart_readl(sport, UBIR) & 0xffff;
ubmr = imx_uart_readl(sport, UBMR) & 0xffff;
ucfr_rfdiv = (imx_uart_readl(sport, UFCR) & UFCR_RFDIV) >> 7;
if (ucfr_rfdiv == 6)
ucfr_rfdiv = 7;
else
ucfr_rfdiv = 6 - ucfr_rfdiv;
uartclk = clk_get_rate(sport->clk_per);
uartclk /= ucfr_rfdiv;
{ /*
* The next code provides exact computation of
* baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
* without need of float support or long long division,
* which would be required to prevent 32bit arithmetic overflow
*/
unsigned int mul = ubir + 1;
unsigned int div = 16 * (ubmr + 1);
unsigned int rem = uartclk % div;
baud_raw = (uartclk / div) * mul;
baud_raw += (rem * mul + div / 2) / div;
*baud = (baud_raw + 50) / 100 * 100;
}
if (*baud != baud_raw)
dev_info(sport->port.dev, "Console IMX rounded baud rate from %d to %d\n",
baud_raw, *baud);
}
}
static int
imx_uart_console_setup(struct console *co, char *options)
{
struct imx_port *sport;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int retval;
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_uart_ports))
co->index = 0;
sport = imx_uart_ports[co->index];
if (sport == NULL)
return -ENODEV;
/* For setting the registers, we only need to enable the ipg clock. */
retval = clk_prepare_enable(sport->clk_ipg);
if (retval)
goto error_console;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
imx_uart_console_get_options(sport, &baud, &parity, &bits);
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
if (retval) {
clk_disable_unprepare(sport->clk_ipg);
goto error_console;
}
retval = clk_prepare_enable(sport->clk_per);
if (retval)
clk_disable_unprepare(sport->clk_ipg);
error_console:
return retval;
}
static int
imx_uart_console_exit(struct console *co)
{
struct imx_port *sport = imx_uart_ports[co->index];
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
return 0;
}
static struct uart_driver imx_uart_uart_driver;
static struct console imx_uart_console = {
.name = DEV_NAME,
.write = imx_uart_console_write,
.device = uart_console_device,
.setup = imx_uart_console_setup,
.exit = imx_uart_console_exit,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &imx_uart_uart_driver,
};
#define IMX_CONSOLE &imx_uart_console
#else
#define IMX_CONSOLE NULL
#endif
static struct uart_driver imx_uart_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = DEV_NAME,
.major = SERIAL_IMX_MAJOR,
.minor = MINOR_START,
.nr = ARRAY_SIZE(imx_uart_ports),
.cons = IMX_CONSOLE,
};
static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t)
{
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
if (sport->tx_state == WAIT_AFTER_RTS)
imx_uart_start_tx(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
{
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
if (sport->tx_state == WAIT_AFTER_SEND)
imx_uart_stop_tx(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
return HRTIMER_NORESTART;
}
static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
static const struct serial_rs485 imx_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
/* Default RX DMA buffer configuration */
#define RX_DMA_PERIODS 16
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
static int imx_uart_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct imx_port *sport;
void __iomem *base;
u32 dma_buf_conf[2];
int ret = 0;
u32 ucr1, ucr2, uts;
struct resource *res;
int txirq, rxirq, rtsirq;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
sport->devdata = of_device_get_match_data(&pdev->dev);
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
return ret;
}
sport->port.line = ret;
sport->have_rtscts = of_property_read_bool(np, "uart-has-rtscts") ||
of_property_read_bool(np, "fsl,uart-has-rtscts"); /* deprecated */
sport->dte_mode = of_property_read_bool(np, "fsl,dte-mode");
sport->have_rtsgpio = of_property_present(np, "rts-gpios");
sport->inverted_tx = of_property_read_bool(np, "fsl,inverted-tx");
sport->inverted_rx = of_property_read_bool(np, "fsl,inverted-rx");
if (!of_property_read_u32_array(np, "fsl,dma-info", dma_buf_conf, 2)) {
sport->rx_period_length = dma_buf_conf[0];
sport->rx_periods = dma_buf_conf[1];
} else {
sport->rx_period_length = RX_DMA_PERIOD_LEN;
sport->rx_periods = RX_DMA_PERIODS;
}
if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n",
sport->port.line);
return -EINVAL;
}
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
rxirq = platform_get_irq(pdev, 0);
if (rxirq < 0)
return rxirq;
txirq = platform_get_irq_optional(pdev, 1);
rtsirq = platform_get_irq_optional(pdev, 2);
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
sport->port.membase = base;
sport->port.type = PORT_IMX;
sport->port.iotype = UPIO_MEM;
sport->port.irq = rxirq;
sport->port.fifosize = 32;
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
/* RTS is required to control the RS485 transmitter */
if (sport->have_rtscts || sport->have_rtsgpio)
sport->port.rs485_supported = imx_rs485_supported;
else
sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
return PTR_ERR(sport->gpios);
sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->clk_ipg)) {
ret = PTR_ERR(sport->clk_ipg);
dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
return ret;
}
sport->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(sport->clk_per)) {
ret = PTR_ERR(sport->clk_per);
dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
return ret;
}
sport->port.uartclk = clk_get_rate(sport->clk_per);
/* For register access, we only need to enable the ipg clock. */
ret = clk_prepare_enable(sport->clk_ipg);
if (ret) {
dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
return ret;
}
ret = uart_get_rs485_mode(&sport->port);
if (ret) {
clk_disable_unprepare(sport->clk_ipg);
return ret;
}
if (sport->port.rs485.flags & SER_RS485_ENABLED &&
(!sport->have_rtscts && !sport->have_rtsgpio))
dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
/*
* If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
* signal cannot be set low during transmission in case the
* receiver is off (limitation of the i.MX UART IP).
*/
if (sport->port.rs485.flags & SER_RS485_ENABLED &&
sport->have_rtscts && !sport->have_rtsgpio &&
(!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
!(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
/* Disable Ageing Timer interrupt */
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~UCR2_ATEN;
imx_uart_writel(sport, ucr2, UCR2);
/*
* In case RS485 is enabled without GPIO RTS control, the UART IP
* is used to control CTS signal. Keep both the UART and Receiver
* enabled, otherwise the UART IP pulls CTS signal always HIGH no
* matter how the UCR2 CTSC and CTS bits are set. To prevent any
* data from being fed into the RX FIFO, enable loopback mode in
* UTS register, which disconnects the RX path from external RXD
* pin and connects it to the Transceiver, which is disabled, so
* no data can be fed to the RX FIFO that way.
*/
if (sport->port.rs485.flags & SER_RS485_ENABLED &&
sport->have_rtscts && !sport->have_rtsgpio) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 |= UCR1_UARTEN;
imx_uart_writel(sport, ucr1, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 |= UCR2_RXEN;
imx_uart_writel(sport, ucr2, UCR2);
}
if (!imx_uart_is_imx1(sport) && sport->dte_mode) {
/*
* The DCEDTE bit changes the direction of DSR, DCD, DTR and RI
* and influences if UCR3_RI and UCR3_DCD changes the level of RI
* and DCD (when they are outputs) or enables the respective
* irqs. So set this bit early, i.e. before requesting irqs.
*/
u32 ufcr = imx_uart_readl(sport, UFCR);
if (!(ufcr & UFCR_DCEDTE))
imx_uart_writel(sport, ufcr | UFCR_DCEDTE, UFCR);
/*
* Disable UCR3_RI and UCR3_DCD irqs. They are also not
* enabled later because they cannot be cleared
* (confirmed on i.MX25) which makes them unusable.
*/
imx_uart_writel(sport,
IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR,
UCR3);
} else {
u32 ucr3 = UCR3_DSR;
u32 ufcr = imx_uart_readl(sport, UFCR);
if (ufcr & UFCR_DCEDTE)
imx_uart_writel(sport, ufcr & ~UFCR_DCEDTE, UFCR);
if (!imx_uart_is_imx1(sport))
ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
imx_uart_writel(sport, ucr3, UCR3);
}
clk_disable_unprepare(sport->clk_ipg);
hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sport->trigger_start_tx.function = imx_trigger_start_tx;
sport->trigger_stop_tx.function = imx_trigger_stop_tx;
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
*/
if (txirq > 0) {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_rxint, 0,
dev_name(&pdev->dev), sport);
if (ret) {
dev_err(&pdev->dev, "failed to request rx irq: %d\n",
ret);
return ret;
}
ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
dev_name(&pdev->dev), sport);
if (ret) {
dev_err(&pdev->dev, "failed to request tx irq: %d\n",
ret);
return ret;
}
ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
dev_name(&pdev->dev), sport);
if (ret) {
dev_err(&pdev->dev, "failed to request rts irq: %d\n",
ret);
return ret;
}
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
dev_name(&pdev->dev), sport);
if (ret) {
dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
return ret;
}
}
imx_uart_ports[sport->port.line] = sport;
platform_set_drvdata(pdev, sport);
return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
}
static int imx_uart_remove(struct platform_device *pdev)
{
struct imx_port *sport = platform_get_drvdata(pdev);
uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
return 0;
}
static void imx_uart_restore_context(struct imx_port *sport)
{
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
if (!sport->context_saved) {
spin_unlock_irqrestore(&sport->port.lock, flags);
return;
}
imx_uart_writel(sport, sport->saved_reg[4], UFCR);
imx_uart_writel(sport, sport->saved_reg[5], UESC);
imx_uart_writel(sport, sport->saved_reg[6], UTIM);
imx_uart_writel(sport, sport->saved_reg[7], UBIR);
imx_uart_writel(sport, sport->saved_reg[8], UBMR);
imx_uart_writel(sport, sport->saved_reg[9], IMX21_UTS);
imx_uart_writel(sport, sport->saved_reg[0], UCR1);
imx_uart_writel(sport, sport->saved_reg[1] | UCR2_SRST, UCR2);
imx_uart_writel(sport, sport->saved_reg[2], UCR3);
imx_uart_writel(sport, sport->saved_reg[3], UCR4);
sport->context_saved = false;
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void imx_uart_save_context(struct imx_port *sport)
{
unsigned long flags;
/* Save necessary regs */
spin_lock_irqsave(&sport->port.lock, flags);
sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
sport->saved_reg[3] = imx_uart_readl(sport, UCR4);
sport->saved_reg[4] = imx_uart_readl(sport, UFCR);
sport->saved_reg[5] = imx_uart_readl(sport, UESC);
sport->saved_reg[6] = imx_uart_readl(sport, UTIM);
sport->saved_reg[7] = imx_uart_readl(sport, UBIR);
sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
sport->context_saved = true;
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
{
u32 ucr3;
ucr3 = imx_uart_readl(sport, UCR3);
if (on) {
imx_uart_writel(sport, USR1_AWAKE, USR1);
ucr3 |= UCR3_AWAKEN;
} else {
ucr3 &= ~UCR3_AWAKEN;
}
imx_uart_writel(sport, ucr3, UCR3);
if (sport->have_rtscts) {
u32 ucr1 = imx_uart_readl(sport, UCR1);
if (on) {
imx_uart_writel(sport, USR1_RTSD, USR1);
ucr1 |= UCR1_RTSDEN;
} else {
ucr1 &= ~UCR1_RTSDEN;
}
imx_uart_writel(sport, ucr1, UCR1);
}
}
static int imx_uart_suspend_noirq(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
imx_uart_save_context(sport);
clk_disable(sport->clk_ipg);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int imx_uart_resume_noirq(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
ret = clk_enable(sport->clk_ipg);
if (ret)
return ret;
imx_uart_restore_context(sport);
return 0;
}
static int imx_uart_suspend(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
int ret;
uart_suspend_port(&imx_uart_uart_driver, &sport->port);
disable_irq(sport->port.irq);
ret = clk_prepare_enable(sport->clk_ipg);
if (ret)
return ret;
/* enable wakeup from i.MX UART */
imx_uart_enable_wakeup(sport, true);
return 0;
}
static int imx_uart_resume(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
/* disable wakeup from i.MX UART */
imx_uart_enable_wakeup(sport, false);
uart_resume_port(&imx_uart_uart_driver, &sport->port);
enable_irq(sport->port.irq);
clk_disable_unprepare(sport->clk_ipg);
return 0;
}
static int imx_uart_freeze(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
uart_suspend_port(&imx_uart_uart_driver, &sport->port);
return clk_prepare_enable(sport->clk_ipg);
}
static int imx_uart_thaw(struct device *dev)
{
struct imx_port *sport = dev_get_drvdata(dev);
uart_resume_port(&imx_uart_uart_driver, &sport->port);
clk_disable_unprepare(sport->clk_ipg);
return 0;
}
static const struct dev_pm_ops imx_uart_pm_ops = {
.suspend_noirq = imx_uart_suspend_noirq,
.resume_noirq = imx_uart_resume_noirq,
.freeze_noirq = imx_uart_suspend_noirq,
.thaw_noirq = imx_uart_resume_noirq,
.restore_noirq = imx_uart_resume_noirq,
.suspend = imx_uart_suspend,
.resume = imx_uart_resume,
.freeze = imx_uart_freeze,
.thaw = imx_uart_thaw,
.restore = imx_uart_thaw,
};
static struct platform_driver imx_uart_platform_driver = {
.probe = imx_uart_probe,
.remove = imx_uart_remove,
.driver = {
.name = "imx-uart",
.of_match_table = imx_uart_dt_ids,
.pm = &imx_uart_pm_ops,
},
};
static int __init imx_uart_init(void)
{
int ret = uart_register_driver(&imx_uart_uart_driver);
if (ret)
return ret;
ret = platform_driver_register(&imx_uart_platform_driver);
if (ret != 0)
uart_unregister_driver(&imx_uart_uart_driver);
return ret;
}
static void __exit imx_uart_exit(void)
{
platform_driver_unregister(&imx_uart_platform_driver);
uart_unregister_driver(&imx_uart_uart_driver);
}
module_init(imx_uart_init);
module_exit(imx_uart_exit);
MODULE_AUTHOR("Sascha Hauer");
MODULE_DESCRIPTION("IMX generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-uart");
| linux-master | drivers/tty/serial/imx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Serial core controller driver
*
* Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
* Author: Tony Lindgren <[email protected]>
*
* This driver manages the serial core controller struct device instances.
* The serial core controller devices are children of the physical serial
* port device.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
#include "serial_base.h"
static int serial_ctrl_probe(struct device *dev)
{
pm_runtime_enable(dev);
return 0;
}
static int serial_ctrl_remove(struct device *dev)
{
pm_runtime_disable(dev);
return 0;
}
/*
* Serial core controller device init functions. Note that the physical
* serial port device driver may not have completed probe at this point.
*/
int serial_ctrl_register_port(struct uart_driver *drv, struct uart_port *port)
{
return serial_core_register_port(drv, port);
}
void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port)
{
serial_core_unregister_port(drv, port);
}
static struct device_driver serial_ctrl_driver = {
.name = "ctrl",
.suppress_bind_attrs = true,
.probe = serial_ctrl_probe,
.remove = serial_ctrl_remove,
};
int serial_base_ctrl_init(void)
{
return serial_base_driver_register(&serial_ctrl_driver);
}
void serial_base_ctrl_exit(void)
{
serial_base_driver_unregister(&serial_ctrl_driver);
}
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("Serial core controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/serial_ctrl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on the same principle as kgdboe using the NETPOLL api, this
* driver uses a console polling api to implement a gdb serial inteface
* which is multiplexed on a console port.
*
* Maintainer: Jason Wessel <[email protected]>
*
* 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/kgdb.h>
#include <linux/kdb.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/vt_kern.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdboc_io_ops;
/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
static int configured = -1;
static DEFINE_MUTEX(config_mutex);
static char config[MAX_CONFIG_LEN];
static struct kparam_string kps = {
.string = config,
.maxlen = MAX_CONFIG_LEN,
};
static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
static struct platform_device *kgdboc_pdev;
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static struct kgdb_io kgdboc_earlycon_io_ops;
static int (*earlycon_orig_exit)(struct console *con);
#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
#ifdef CONFIG_KDB_KEYBOARD
static int kgdboc_reset_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
input_reset_device(dev);
/* Return an error - we do not want to bind, just to reset */
return -ENODEV;
}
static void kgdboc_reset_disconnect(struct input_handle *handle)
{
/* We do not expect anyone to actually bind to us */
BUG();
}
static const struct input_device_id kgdboc_reset_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_KEY) },
},
{ }
};
static struct input_handler kgdboc_reset_handler = {
.connect = kgdboc_reset_connect,
.disconnect = kgdboc_reset_disconnect,
.name = "kgdboc_reset",
.id_table = kgdboc_reset_ids,
};
static DEFINE_MUTEX(kgdboc_reset_mutex);
static void kgdboc_restore_input_helper(struct work_struct *dummy)
{
/*
* We need to take a mutex to prevent several instances of
* this work running on different CPUs so they don't try
* to register again already registered handler.
*/
mutex_lock(&kgdboc_reset_mutex);
if (input_register_handler(&kgdboc_reset_handler) == 0)
input_unregister_handler(&kgdboc_reset_handler);
mutex_unlock(&kgdboc_reset_mutex);
}
static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
static void kgdboc_restore_input(void)
{
if (likely(system_state == SYSTEM_RUNNING))
schedule_work(&kgdboc_restore_input_work);
}
static int kgdboc_register_kbd(char **cptr)
{
if (strncmp(*cptr, "kbd", 3) == 0 ||
strncmp(*cptr, "kdb", 3) == 0) {
if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
kdb_poll_idx++;
if (cptr[0][3] == ',')
*cptr += 4;
else
return 1;
}
}
return 0;
}
static void kgdboc_unregister_kbd(void)
{
int i;
for (i = 0; i < kdb_poll_idx; i++) {
if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
kdb_poll_idx--;
kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
kdb_poll_funcs[kdb_poll_idx] = NULL;
i--;
}
}
flush_work(&kgdboc_restore_input_work);
}
#else /* ! CONFIG_KDB_KEYBOARD */
#define kgdboc_register_kbd(x) 0
#define kgdboc_unregister_kbd()
#define kgdboc_restore_input()
#endif /* ! CONFIG_KDB_KEYBOARD */
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static void cleanup_earlycon(void)
{
if (kgdboc_earlycon_io_ops.cons)
kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
}
#else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
static inline void cleanup_earlycon(void) { }
#endif /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
static void cleanup_kgdboc(void)
{
cleanup_earlycon();
if (configured != 1)
return;
if (kgdb_unregister_nmi_console())
return;
kgdboc_unregister_kbd();
kgdb_unregister_io_module(&kgdboc_io_ops);
}
static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
int err = -ENODEV;
char *cptr = config;
struct console *cons;
int cookie;
if (!strlen(config) || isspace(config[0])) {
err = 0;
goto noconfig;
}
kgdboc_io_ops.cons = NULL;
kgdb_tty_driver = NULL;
kgdboc_use_kms = 0;
if (strncmp(cptr, "kms,", 4) == 0) {
cptr += 4;
kgdboc_use_kms = 1;
}
if (kgdboc_register_kbd(&cptr))
goto do_register;
p = tty_find_polling_driver(cptr, &tty_line);
if (!p)
goto noconfig;
/*
* Take console_lock to serialize device() callback with
* other console operations. For example, fg_console is
* modified under console_lock when switching vt.
*/
console_lock();
cookie = console_srcu_read_lock();
for_each_console_srcu(cons) {
int idx;
if (cons->device && cons->device(cons, &idx) == p &&
idx == tty_line) {
kgdboc_io_ops.cons = cons;
break;
}
}
console_srcu_read_unlock(cookie);
console_unlock();
kgdb_tty_driver = p;
kgdb_tty_line = tty_line;
do_register:
err = kgdb_register_io_module(&kgdboc_io_ops);
if (err)
goto noconfig;
err = kgdb_register_nmi_console();
if (err)
goto nmi_con_failed;
configured = 1;
return 0;
nmi_con_failed:
kgdb_unregister_io_module(&kgdboc_io_ops);
noconfig:
kgdboc_unregister_kbd();
configured = 0;
return err;
}
static int kgdboc_probe(struct platform_device *pdev)
{
int ret = 0;
mutex_lock(&config_mutex);
if (configured != 1) {
ret = configure_kgdboc();
/* Convert "no device" to "defer" so we'll keep trying */
if (ret == -ENODEV)
ret = -EPROBE_DEFER;
}
mutex_unlock(&config_mutex);
return ret;
}
static struct platform_driver kgdboc_platform_driver = {
.probe = kgdboc_probe,
.driver = {
.name = "kgdboc",
.suppress_bind_attrs = true,
},
};
static int __init init_kgdboc(void)
{
int ret;
/*
* kgdboc is a little bit of an odd "platform_driver". It can be
* up and running long before the platform_driver object is
* created and thus doesn't actually store anything in it. There's
* only one instance of kgdb so anything is stored as global state.
* The platform_driver is only created so that we can leverage the
* kernel's mechanisms (like -EPROBE_DEFER) to call us when our
* underlying tty is ready. Here we init our platform driver and
* then create the single kgdboc instance.
*/
ret = platform_driver_register(&kgdboc_platform_driver);
if (ret)
return ret;
kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE);
if (!kgdboc_pdev) {
ret = -ENOMEM;
goto err_did_register;
}
ret = platform_device_add(kgdboc_pdev);
if (!ret)
return 0;
platform_device_put(kgdboc_pdev);
err_did_register:
platform_driver_unregister(&kgdboc_platform_driver);
return ret;
}
static void exit_kgdboc(void)
{
mutex_lock(&config_mutex);
cleanup_kgdboc();
mutex_unlock(&config_mutex);
platform_device_unregister(kgdboc_pdev);
platform_driver_unregister(&kgdboc_platform_driver);
}
static int kgdboc_get_char(void)
{
if (!kgdb_tty_driver)
return -1;
return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
kgdb_tty_line);
}
static void kgdboc_put_char(u8 chr)
{
if (!kgdb_tty_driver)
return;
kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
kgdb_tty_line, chr);
}
static int param_set_kgdboc_var(const char *kmessage,
const struct kernel_param *kp)
{
size_t len = strlen(kmessage);
int ret = 0;
if (len >= MAX_CONFIG_LEN) {
pr_err("config string too long\n");
return -ENOSPC;
}
if (kgdb_connected) {
pr_err("Cannot reconfigure while KGDB is connected.\n");
return -EBUSY;
}
mutex_lock(&config_mutex);
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
if (configured == 1)
cleanup_kgdboc();
/*
* Configure with the new params as long as init already ran.
* Note that we can get called before init if someone loads us
* with "modprobe kgdboc kgdboc=..." or if they happen to use
* the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
*/
if (configured >= 0)
ret = configure_kgdboc();
/*
* If we couldn't configure then clear out the config. Note that
* specifying an invalid config on the kernel command line vs.
* through sysfs have slightly different behaviors. If we fail
* to configure what was specified on the kernel command line
* we'll leave it in the 'config' and return -EPROBE_DEFER from
* our probe. When specified through sysfs userspace is
* responsible for loading the tty driver before setting up.
*/
if (ret)
config[0] = '\0';
mutex_unlock(&config_mutex);
return ret;
}
static int dbg_restore_graphics;
static void kgdboc_pre_exp_handler(void)
{
if (!dbg_restore_graphics && kgdboc_use_kms) {
dbg_restore_graphics = 1;
con_debug_enter(vc_cons[fg_console].d);
}
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
}
static void kgdboc_post_exp_handler(void)
{
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
if (kgdboc_use_kms && dbg_restore_graphics) {
dbg_restore_graphics = 0;
con_debug_leave();
}
kgdboc_restore_input();
}
static struct kgdb_io kgdboc_io_ops = {
.name = "kgdboc",
.read_char = kgdboc_get_char,
.write_char = kgdboc_put_char,
.pre_exception = kgdboc_pre_exp_handler,
.post_exception = kgdboc_post_exp_handler,
};
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static int kgdboc_option_setup(char *opt)
{
if (!opt) {
pr_err("config string not provided\n");
return 1;
}
if (strlen(opt) >= MAX_CONFIG_LEN) {
pr_err("config string too long\n");
return 1;
}
strcpy(config, opt);
return 1;
}
__setup("kgdboc=", kgdboc_option_setup);
/* This is only available if kgdboc is a built in for early debugging */
static int __init kgdboc_early_init(char *opt)
{
kgdboc_option_setup(opt);
configure_kgdboc();
return 0;
}
early_param("ekgdboc", kgdboc_early_init);
static int kgdboc_earlycon_get_char(void)
{
char c;
if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons,
&c, 1))
return NO_POLL_CHAR;
return c;
}
static void kgdboc_earlycon_put_char(u8 chr)
{
kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr,
1);
}
static void kgdboc_earlycon_pre_exp_handler(void)
{
struct console *con;
static bool already_warned;
int cookie;
if (already_warned)
return;
/*
* When the first normal console comes up the kernel will take all
* the boot consoles out of the list. Really, we should stop using
* the boot console when it does that but until a TTY is registered
* we have no other choice so we keep using it. Since not all
* serial drivers might be OK with this, print a warning once per
* boot if we detect this case.
*/
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
if (con == kgdboc_earlycon_io_ops.cons)
break;
}
console_srcu_read_unlock(cookie);
if (con)
return;
already_warned = true;
pr_warn("kgdboc_earlycon is still using bootconsole\n");
}
static int kgdboc_earlycon_deferred_exit(struct console *con)
{
/*
* If we get here it means the boot console is going away but we
* don't yet have a suitable replacement. Don't pass through to
* the original exit routine. We'll call it later in our deinit()
* function. For now, restore the original exit() function pointer
* as a sentinal that we've hit this point.
*/
con->exit = earlycon_orig_exit;
return 0;
}
static void kgdboc_earlycon_deinit(void)
{
if (!kgdboc_earlycon_io_ops.cons)
return;
if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit)
/*
* kgdboc_earlycon is exiting but original boot console exit
* was never called (AKA kgdboc_earlycon_deferred_exit()
* didn't ever run). Undo our trap.
*/
kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit;
else if (kgdboc_earlycon_io_ops.cons->exit)
/*
* We skipped calling the exit() routine so we could try to
* keep using the boot console even after it went away. We're
* finally done so call the function now.
*/
kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons);
kgdboc_earlycon_io_ops.cons = NULL;
}
static struct kgdb_io kgdboc_earlycon_io_ops = {
.name = "kgdboc_earlycon",
.read_char = kgdboc_earlycon_get_char,
.write_char = kgdboc_earlycon_put_char,
.pre_exception = kgdboc_earlycon_pre_exp_handler,
.deinit = kgdboc_earlycon_deinit,
};
#define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
static char kgdboc_earlycon_param[MAX_CONSOLE_NAME_LEN] __initdata;
static bool kgdboc_earlycon_late_enable __initdata;
static int __init kgdboc_earlycon_init(char *opt)
{
struct console *con;
kdb_init(KDB_INIT_EARLY);
/*
* Look for a matching console, or if the name was left blank just
* pick the first one we find.
*/
/*
* Hold the console_list_lock to guarantee that no consoles are
* unregistered until the kgdboc_earlycon setup is complete.
* Trapping the exit() callback relies on exit() not being
* called until the trap is setup. This also allows safe
* traversal of the console list and race-free reading of @flags.
*/
console_list_lock();
for_each_console(con) {
if (con->write && con->read &&
(con->flags & (CON_BOOT | CON_ENABLED)) &&
(!opt || !opt[0] || strcmp(con->name, opt) == 0))
break;
}
if (!con) {
/*
* Both earlycon and kgdboc_earlycon are initialized during
* early parameter parsing. We cannot guarantee earlycon gets
* in first and, in any case, on ACPI systems earlycon may
* defer its own initialization (usually to somewhere within
* setup_arch() ). To cope with either of these situations
* we can defer our own initialization to a little later in
* the boot.
*/
if (!kgdboc_earlycon_late_enable) {
pr_info("No suitable earlycon yet, will try later\n");
if (opt)
strscpy(kgdboc_earlycon_param, opt,
sizeof(kgdboc_earlycon_param));
kgdboc_earlycon_late_enable = true;
} else {
pr_info("Couldn't find kgdb earlycon\n");
}
goto unlock;
}
kgdboc_earlycon_io_ops.cons = con;
pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
kgdboc_earlycon_io_ops.cons = NULL;
pr_info("Failed to register kgdb with earlycon\n");
} else {
/* Trap exit so we can keep earlycon longer if needed. */
earlycon_orig_exit = con->exit;
con->exit = kgdboc_earlycon_deferred_exit;
}
unlock:
console_list_unlock();
/* Non-zero means malformed option so we always return zero */
return 0;
}
early_param("kgdboc_earlycon", kgdboc_earlycon_init);
/*
* This is only intended for the late adoption of an early console.
*
* It is not a reliable way to adopt regular consoles because we can not
* control what order console initcalls are made and, in any case, many
* regular consoles are registered much later in the boot process than
* the console initcalls!
*/
static int __init kgdboc_earlycon_late_init(void)
{
if (kgdboc_earlycon_late_enable)
kgdboc_earlycon_init(kgdboc_earlycon_param);
return 0;
}
console_initcall(kgdboc_earlycon_late_init);
#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
module_init(init_kgdboc);
module_exit(exit_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
MODULE_DESCRIPTION("KGDB Console TTY Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/kgdboc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <[email protected]>
* Gerald Baeza <[email protected]>
* Erwan Le Ray <[email protected]>
*
* Inspired by st-asc.c from STMicroelectronics (c)
*/
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/dma-direction.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
#include <linux/tty.h>
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
/* Register offsets */
static struct stm32_usart_info __maybe_unused stm32f4_info = {
.ofs = {
.isr = 0x00,
.rdr = 0x04,
.tdr = 0x04,
.brr = 0x08,
.cr1 = 0x0c,
.cr2 = 0x10,
.cr3 = 0x14,
.gtpr = 0x18,
.rtor = UNDEF_REG,
.rqr = UNDEF_REG,
.icr = UNDEF_REG,
},
.cfg = {
.uart_enable_bit = 13,
.has_7bits_data = false,
.fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32f7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
.cr3 = 0x08,
.brr = 0x0c,
.gtpr = 0x10,
.rtor = 0x14,
.rqr = 0x18,
.isr = 0x1c,
.icr = 0x20,
.rdr = 0x24,
.tdr = 0x28,
},
.cfg = {
.uart_enable_bit = 0,
.has_7bits_data = true,
.has_swap = true,
.fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32h7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
.cr3 = 0x08,
.brr = 0x0c,
.gtpr = 0x10,
.rtor = 0x14,
.rqr = 0x18,
.isr = 0x1c,
.icr = 0x20,
.rdr = 0x24,
.tdr = 0x28,
},
.cfg = {
.uart_enable_bit = 0,
.has_7bits_data = true,
.has_swap = true,
.has_wakeup = true,
.has_fifo = true,
.fifosize = 16,
}
};
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
static inline struct stm32_port *to_stm32_port(struct uart_port *port)
{
return container_of(port, struct stm32_port, port);
}
static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
{
u32 val;
val = readl_relaxed(port->membase + reg);
val |= bits;
writel_relaxed(val, port->membase + reg);
}
static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
{
u32 val;
val = readl_relaxed(port->membase + reg);
val &= ~bits;
writel_relaxed(val, port->membase + reg);
}
static unsigned int stm32_usart_tx_empty(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
return TIOCSER_TEMT;
return 0;
}
static void stm32_usart_rs485_rts_enable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
if (stm32_port->hw_flow_control ||
!(rs485conf->flags & SER_RS485_ENABLED))
return;
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
mctrl_gpio_set(stm32_port->gpios,
stm32_port->port.mctrl | TIOCM_RTS);
} else {
mctrl_gpio_set(stm32_port->gpios,
stm32_port->port.mctrl & ~TIOCM_RTS);
}
}
static void stm32_usart_rs485_rts_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
if (stm32_port->hw_flow_control ||
!(rs485conf->flags & SER_RS485_ENABLED))
return;
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
mctrl_gpio_set(stm32_port->gpios,
stm32_port->port.mctrl & ~TIOCM_RTS);
} else {
mctrl_gpio_set(stm32_port->gpios,
stm32_port->port.mctrl | TIOCM_RTS);
}
}
static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
u32 delay_DDE, u32 baud)
{
u32 rs485_deat_dedt;
u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
bool over8;
*cr3 |= USART_CR3_DEM;
over8 = *cr1 & USART_CR1_OVER8;
*cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
if (over8)
rs485_deat_dedt = delay_ADE * baud * 8;
else
rs485_deat_dedt = delay_ADE * baud * 16;
rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
rs485_deat_dedt_max : rs485_deat_dedt;
rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
USART_CR1_DEAT_MASK;
*cr1 |= rs485_deat_dedt;
if (over8)
rs485_deat_dedt = delay_DDE * baud * 8;
else
rs485_deat_dedt = delay_DDE * baud * 16;
rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
rs485_deat_dedt_max : rs485_deat_dedt;
rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
USART_CR1_DEDT_MASK;
*cr1 |= rs485_deat_dedt;
}
static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 usartdiv, baud, cr1, cr3;
bool over8;
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
if (port->rs485_rx_during_tx_gpio)
gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
!!(rs485conf->flags & SER_RS485_RX_DURING_TX));
else
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (rs485conf->flags & SER_RS485_ENABLED) {
cr1 = readl_relaxed(port->membase + ofs->cr1);
cr3 = readl_relaxed(port->membase + ofs->cr3);
usartdiv = readl_relaxed(port->membase + ofs->brr);
usartdiv = usartdiv & GENMASK(15, 0);
over8 = cr1 & USART_CR1_OVER8;
if (over8)
usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
<< USART_BRR_04_R_SHIFT;
baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
stm32_usart_config_reg_rs485(&cr1, &cr3,
rs485conf->delay_rts_before_send,
rs485conf->delay_rts_after_send,
baud);
if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
cr3 &= ~USART_CR3_DEP;
else
cr3 |= USART_CR3_DEP;
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
} else {
stm32_usart_clr_bits(port, ofs->cr3,
USART_CR3_DEM | USART_CR3_DEP);
stm32_usart_clr_bits(port, ofs->cr1,
USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
}
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
/* Adjust RTS polarity in case it's driven in software */
if (stm32_usart_tx_empty(port))
stm32_usart_rs485_rts_disable(port);
else
stm32_usart_rs485_rts_enable(port);
return 0;
}
static int stm32_usart_init_rs485(struct uart_port *port,
struct platform_device *pdev)
{
struct serial_rs485 *rs485conf = &port->rs485;
rs485conf->flags = 0;
rs485conf->delay_rts_before_send = 0;
rs485conf->delay_rts_after_send = 0;
if (!pdev->dev.of_node)
return -ENODEV;
return uart_get_rs485_mode(port);
}
static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
{
return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
}
static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
{
dmaengine_terminate_async(stm32_port->rx_ch);
stm32_port->rx_dma_busy = false;
}
static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
struct dma_chan *chan,
enum dma_status expected_status,
int dmaengine_pause_or_resume(struct dma_chan *),
bool stm32_usart_xx_dma_started(struct stm32_port *),
void stm32_usart_xx_dma_terminate(struct stm32_port *))
{
struct uart_port *port = &stm32_port->port;
enum dma_status dma_status;
int ret;
if (!stm32_usart_xx_dma_started(stm32_port))
return -EPERM;
dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
if (dma_status != expected_status)
return -EAGAIN;
ret = dmaengine_pause_or_resume(chan);
if (ret) {
dev_err(port->dev, "DMA failed with error code: %d\n", ret);
stm32_usart_xx_dma_terminate(stm32_port);
}
return ret;
}
static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
DMA_IN_PROGRESS, dmaengine_pause,
stm32_usart_rx_dma_started,
stm32_usart_rx_dma_terminate);
}
static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
DMA_PAUSED, dmaengine_resume,
stm32_usart_rx_dma_started,
stm32_usart_rx_dma_terminate);
}
/* Return true when data is pending (in pio mode), and false when no data is pending. */
static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
*sr = readl_relaxed(port->membase + ofs->isr);
/* Get pending characters in RDR or FIFO */
if (*sr & USART_SR_RXNE) {
/* Get all pending characters from the RDR or the FIFO when using interrupts */
if (!stm32_usart_rx_dma_started(stm32_port))
return true;
/* Handle only RX data errors when using DMA */
if (*sr & USART_SR_ERR_MASK)
return true;
}
return false;
}
static u8 stm32_usart_get_char_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
c = readl_relaxed(port->membase + ofs->rdr);
/* Apply RDR data mask */
c &= stm32_port->rdr_mask;
return c;
}
static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned int size = 0;
u32 sr;
u8 c, flag;
while (stm32_usart_pending_rx_pio(port, &sr)) {
sr |= USART_SR_DUMMY_RX;
flag = TTY_NORMAL;
/*
* Status bits has to be cleared before reading the RDR:
* In FIFO mode, reading the RDR will pop the next data
* (if any) along with its status bits into the SR.
* Not doing so leads to misalignement between RDR and SR,
* and clear status bits of the next rx data.
*
* Clear errors flags for stm32f7 and stm32h7 compatible
* devices. On stm32f4 compatible devices, the error bit is
* cleared by the sequence [read SR - read DR].
*/
if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
writel_relaxed(sr & USART_SR_ERR_MASK,
port->membase + ofs->icr);
c = stm32_usart_get_char_pio(port);
port->icount.rx++;
size++;
if (sr & USART_SR_ERR_MASK) {
if (sr & USART_SR_ORE) {
port->icount.overrun++;
} else if (sr & USART_SR_PE) {
port->icount.parity++;
} else if (sr & USART_SR_FE) {
/* Break detection if character is null */
if (!c) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else {
port->icount.frame++;
}
}
sr &= port->read_status_mask;
if (sr & USART_SR_PE) {
flag = TTY_PARITY;
} else if (sr & USART_SR_FE) {
if (!c)
flag = TTY_BREAK;
else
flag = TTY_FRAME;
}
}
if (uart_prepare_sysrq_char(port, c))
continue;
uart_insert_char(port, sr, USART_SR_ORE, c, flag);
}
return size;
}
static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct tty_port *ttyport = &stm32_port->port.state->port;
unsigned char *dma_start;
int dma_count, i;
dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
/*
* Apply rdr_mask on buffer in order to mask parity bit.
* This loop is useless in cs8 mode because DMA copies only
* 8 bits and already ignores parity bit.
*/
if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
for (i = 0; i < dma_size; i++)
*(dma_start + i) &= stm32_port->rdr_mask;
dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
port->icount.rx += dma_count;
if (dma_count != dma_size)
port->icount.buf_overrun++;
stm32_port->last_res -= dma_count;
if (stm32_port->last_res == 0)
stm32_port->last_res = RX_BUF_L;
}
static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
unsigned int dma_size, size = 0;
/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
/* Conditional first part: from last_res to end of DMA buffer */
dma_size = stm32_port->last_res;
stm32_usart_push_buffer_dma(port, dma_size);
size = dma_size;
}
dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
stm32_usart_push_buffer_dma(port, dma_size);
size += dma_size;
return size;
}
static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
enum dma_status rx_dma_status;
u32 sr;
unsigned int size = 0;
if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
&stm32_port->rx_dma_state);
if (rx_dma_status == DMA_IN_PROGRESS ||
rx_dma_status == DMA_PAUSED) {
/* Empty DMA buffer */
size = stm32_usart_receive_chars_dma(port);
sr = readl_relaxed(port->membase + ofs->isr);
if (sr & USART_SR_ERR_MASK) {
/* Disable DMA request line */
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
/* Switch to PIO mode to handle the errors */
size += stm32_usart_receive_chars_pio(port);
/* Switch back to DMA mode */
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
}
} else {
/* Disable RX DMA */
stm32_usart_rx_dma_terminate(stm32_port);
/* Fall back to interrupt mode */
dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
size = stm32_usart_receive_chars_pio(port);
}
} else {
size = stm32_usart_receive_chars_pio(port);
}
return size;
}
static void stm32_usart_rx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct tty_port *tport = &port->state->port;
unsigned int size;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
tty_flip_buffer_push(tport);
}
static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct dma_async_tx_descriptor *desc;
enum dma_status rx_dma_status;
int ret;
if (stm32_port->throttled)
return 0;
if (stm32_port->rx_dma_busy) {
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
NULL);
if (rx_dma_status == DMA_IN_PROGRESS)
return 0;
if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
return 0;
dev_err(port->dev, "DMA failed : status error.\n");
stm32_usart_rx_dma_terminate(stm32_port);
}
stm32_port->rx_dma_busy = true;
stm32_port->last_res = RX_BUF_L;
/* Prepare a DMA cyclic transaction */
desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
stm32_port->rx_dma_buf,
RX_BUF_L, RX_BUF_P,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(port->dev, "rx dma prep cyclic failed\n");
stm32_port->rx_dma_busy = false;
return -ENODEV;
}
desc->callback = stm32_usart_rx_dma_complete;
desc->callback_param = port;
/* Push current DMA transaction in the pending queue */
ret = dma_submit_error(dmaengine_submit(desc));
if (ret) {
dmaengine_terminate_sync(stm32_port->rx_ch);
stm32_port->rx_dma_busy = false;
return ret;
}
/* Issue pending DMA requests */
dma_async_issue_pending(stm32_port->rx_ch);
return 0;
}
static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
{
dmaengine_terminate_async(stm32_port->tx_ch);
stm32_port->tx_dma_busy = false;
}
static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
{
/*
* We cannot use the function "dmaengine_tx_status" to know the
* status of DMA. This function does not show if the "dma complete"
* callback of the DMA transaction has been called. So we prefer
* to use "tx_dma_busy" flag to prevent dual DMA transaction at the
* same time.
*/
return stm32_port->tx_dma_busy;
}
static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
DMA_IN_PROGRESS, dmaengine_pause,
stm32_usart_tx_dma_started,
stm32_usart_tx_dma_terminate);
}
static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
DMA_PAUSED, dmaengine_resume,
stm32_usart_tx_dma_started,
stm32_usart_tx_dma_terminate);
}
static void stm32_usart_tx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct stm32_port *stm32port = to_stm32_port(port);
unsigned long flags;
stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
spin_lock_irqsave(&port->lock, flags);
stm32_usart_transmit_chars(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
/*
* Enables TX FIFO threashold irq when FIFO is enabled,
* or TX empty irq when FIFO is disabled
*/
if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
else
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
}
static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
else
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
}
static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
while (!uart_circ_empty(xmit)) {
/* Check that TDR is empty before filling FIFO */
if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
break;
writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
uart_xmit_advance(port, 1);
}
/* rely on TXE irq (mask or unmask) for sending remaining data */
if (uart_circ_empty(xmit))
stm32_usart_tx_interrupt_disable(port);
else
stm32_usart_tx_interrupt_enable(port);
}
static void stm32_usart_transmit_chars_dma(struct uart_port *port)
{
struct stm32_port *stm32port = to_stm32_port(port);
struct circ_buf *xmit = &port->state->xmit;
struct dma_async_tx_descriptor *desc = NULL;
unsigned int count;
int ret;
if (stm32_usart_tx_dma_started(stm32port)) {
ret = stm32_usart_tx_dma_resume(stm32port);
if (ret < 0 && ret != -EAGAIN)
goto fallback_err;
return;
}
count = uart_circ_chars_pending(xmit);
if (count > TX_BUF_L)
count = TX_BUF_L;
if (xmit->tail < xmit->head) {
memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
} else {
size_t one = UART_XMIT_SIZE - xmit->tail;
size_t two;
if (one > count)
one = count;
two = count - one;
memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
if (two)
memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
}
desc = dmaengine_prep_slave_single(stm32port->tx_ch,
stm32port->tx_dma_buf,
count,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!desc)
goto fallback_err;
/*
* Set "tx_dma_busy" flag. This flag will be released when
* dmaengine_terminate_async will be called. This flag helps
* transmit_chars_dma not to start another DMA transaction
* if the callback of the previous is not yet called.
*/
stm32port->tx_dma_busy = true;
desc->callback = stm32_usart_tx_dma_complete;
desc->callback_param = port;
/* Push current DMA TX transaction in the pending queue */
/* DMA no yet started, safe to free resources */
ret = dma_submit_error(dmaengine_submit(desc));
if (ret) {
dev_err(port->dev, "DMA failed with error code: %d\n", ret);
stm32_usart_tx_dma_terminate(stm32port);
goto fallback_err;
}
/* Issue pending DMA TX requests */
dma_async_issue_pending(stm32port->tx_ch);
uart_xmit_advance(port, count);
return;
fallback_err:
stm32_usart_transmit_chars_pio(port);
}
static void stm32_usart_transmit_chars(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
u32 isr;
int ret;
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED &&
(port->x_char ||
!(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_enable(port);
}
if (port->x_char) {
/* dma terminate may have been called in case of dma pause failure */
stm32_usart_tx_dma_pause(stm32_port);
/* Check that TDR is empty before filling FIFO */
ret =
readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
isr,
(isr & USART_SR_TXE),
10, 1000);
if (ret)
dev_warn(port->dev, "1 character may be erased\n");
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
/* dma terminate may have been called in case of dma resume failure */
stm32_usart_tx_dma_resume(stm32_port);
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
stm32_usart_tx_interrupt_disable(port);
return;
}
if (ofs->icr == UNDEF_REG)
stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
else
writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
if (stm32_port->tx_ch)
stm32_usart_transmit_chars_dma(port);
else
stm32_usart_transmit_chars_pio(port);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit)) {
stm32_usart_tx_interrupt_disable(port);
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED) {
stm32_usart_tc_interrupt_enable(port);
}
}
}
static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
struct tty_port *tport = &port->state->port;
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
sr = readl_relaxed(port->membase + ofs->isr);
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED &&
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
}
if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
writel_relaxed(USART_ICR_WUCF,
port->membase + ofs->icr);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
}
/*
* rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
* line has been masked by HW and rx data are stacking in FIFO.
*/
if (!stm32_port->throttled) {
if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
}
}
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
spin_lock(&port->lock);
stm32_usart_transmit_chars(port);
spin_unlock(&port->lock);
}
/* Receiver timeout irq for DMA RX */
if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
}
return IRQ_HANDLED;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
else
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
mctrl_gpio_set(stm32_port->gpios, mctrl);
}
static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
unsigned int ret;
/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
return mctrl_gpio_get(stm32_port->gpios, &ret);
}
static void stm32_usart_enable_ms(struct uart_port *port)
{
mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
}
static void stm32_usart_disable_ms(struct uart_port *port)
{
mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
}
/* Transmit stop */
static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
stm32_usart_tx_interrupt_disable(port);
/* dma terminate may have been called in case of dma pause failure */
stm32_usart_tx_dma_pause(stm32_port);
stm32_usart_rs485_rts_disable(port);
}
/* There are probably characters waiting to be transmitted. */
static void stm32_usart_start_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit) && !port->x_char) {
stm32_usart_rs485_rts_disable(port);
return;
}
stm32_usart_rs485_rts_enable(port);
stm32_usart_transmit_chars(port);
}
/* Flush the transmit buffer. */
static void stm32_usart_flush_buffer(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
if (stm32_port->tx_ch)
stm32_usart_tx_dma_terminate(stm32_port);
}
/* Throttle the remote when input buffer is about to overflow. */
static void stm32_usart_throttle(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/*
* Pause DMA transfer, so the RX data gets queued into the FIFO.
* Hardware flow control is triggered when RX FIFO is full.
*/
stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = true;
spin_unlock_irqrestore(&port->lock, flags);
}
/* Unthrottle the remote, the input buffer can now accept data. */
static void stm32_usart_unthrottle(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = false;
/*
* Switch back to DMA mode (resume DMA).
* Hardware flow control is stopped when FIFO is not full any more.
*/
if (stm32_port->rx_ch)
stm32_usart_rx_dma_start_or_resume(port);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Receive stop */
static void stm32_usart_stop_rx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
/* Disable DMA request line. */
stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
}
/* Handle breaks - ignored by us */
static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
}
static int stm32_usart_startup(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
const char *name = to_platform_device(port->dev)->name;
u32 val;
int ret;
ret = request_irq(port->irq, stm32_usart_interrupt,
IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
if (stm32_port->swap) {
val = readl_relaxed(port->membase + ofs->cr2);
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
if (stm32_port->rx_ch) {
ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret) {
free_irq(port->irq, port);
return ret;
}
}
/* RX enabling */
val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
stm32_usart_set_bits(port, ofs->cr1, val);
return 0;
}
static void stm32_usart_shutdown(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 val, isr;
int ret;
if (stm32_usart_tx_dma_started(stm32_port))
stm32_usart_tx_dma_terminate(stm32_port);
if (stm32_port->tx_ch)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
/* Disable modem control interrupts */
stm32_usart_disable_ms(port);
val = USART_CR1_TXEIE | USART_CR1_TE;
val |= stm32_port->cr1_irq | USART_CR1_RE;
val |= BIT(cfg->uart_enable_bit);
if (stm32_port->fifoen)
val |= USART_CR1_FIFOEN;
ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
isr, (isr & USART_SR_TC),
10, 100000);
/* Send the TC error message only when ISR_TC is not set */
if (ret)
dev_err(port->dev, "Transmission is not complete\n");
/* Disable RX DMA. */
if (stm32_port->rx_ch) {
stm32_usart_rx_dma_terminate(stm32_port);
dmaengine_synchronize(stm32_port->rx_ch);
}
/* flush RX & TX FIFO */
if (ofs->rqr != UNDEF_REG)
writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
port->membase + ofs->rqr);
stm32_usart_clr_bits(port, ofs->cr1, val);
free_irq(port->irq, port);
}
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
struct serial_rs485 *rs485conf = &port->rs485;
unsigned int baud, bits;
u32 usartdiv, mantissa, fraction, oversampling;
tcflag_t cflag = termios->c_cflag;
u32 cr1, cr2, cr3, isr;
unsigned long flags;
int ret;
if (!stm32_port->hw_flow_control)
cflag &= ~CRTSCTS;
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
spin_lock_irqsave(&port->lock, flags);
ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
isr,
(isr & USART_SR_TC),
10, 100000);
/* Send the TC error message only when ISR_TC is not set. */
if (ret)
dev_err(port->dev, "Transmission is not complete\n");
/* Stop serial port and reset value */
writel_relaxed(0, port->membase + ofs->cr1);
/* flush RX & TX FIFO */
if (ofs->rqr != UNDEF_REG)
writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
port->membase + ofs->rqr);
cr1 = USART_CR1_TE | USART_CR1_RE;
if (stm32_port->fifoen)
cr1 |= USART_CR1_FIFOEN;
cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
/* Tx and RX FIFO configuration */
cr3 = readl_relaxed(port->membase + ofs->cr3);
cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
if (stm32_port->fifoen) {
if (stm32_port->txftcfg >= 0)
cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
if (stm32_port->rxftcfg >= 0)
cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
}
if (cflag & CSTOPB)
cr2 |= USART_CR2_STOP_2B;
bits = tty_get_char_size(cflag);
stm32_port->rdr_mask = (BIT(bits) - 1);
if (cflag & PARENB) {
bits++;
cr1 |= USART_CR1_PCE;
}
/*
* Word length configuration:
* CS8 + parity, 9 bits word aka [M1:M0] = 0b01
* CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
* CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
* M0 and M1 already cleared by cr1 initialization.
*/
if (bits == 9) {
cr1 |= USART_CR1_M0;
} else if ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
} else if (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
cflag &= ~CSIZE;
cflag |= CS8;
termios->c_cflag = cflag;
bits = 8;
if (cflag & PARENB) {
bits++;
cr1 |= USART_CR1_M0;
}
}
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
(stm32_port->fifoen &&
stm32_port->rxftcfg >= 0))) {
if (cflag & CSTOPB)
bits = bits + 3; /* 1 start bit + 2 stop bits */
else
bits = bits + 2; /* 1 start bit + 1 stop bit */
/* RX timeout irq to occur after last stop bit + bits */
stm32_port->cr1_irq = USART_CR1_RTOIE;
writel_relaxed(bits, port->membase + ofs->rtor);
cr2 |= USART_CR2_RTOEN;
/*
* Enable fifo threshold irq in two cases, either when there is no DMA, or when
* wake up over usart, from low power until the DMA gets re-enabled by resume.
*/
stm32_port->cr3_irq = USART_CR3_RXFTIE;
}
cr1 |= stm32_port->cr1_irq;
cr3 |= stm32_port->cr3_irq;
if (cflag & PARODD)
cr1 |= USART_CR1_PS;
port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
if (cflag & CRTSCTS) {
port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
}
usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
/*
* The USART supports 16 or 8 times oversampling.
* By default we prefer 16 times oversampling, so that the receiver
* has a better tolerance to clock deviations.
* 8 times oversampling is only used to achieve higher speeds.
*/
if (usartdiv < 16) {
oversampling = 8;
cr1 |= USART_CR1_OVER8;
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
} else {
oversampling = 16;
cr1 &= ~USART_CR1_OVER8;
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
}
mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
fraction = usartdiv % oversampling;
writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
uart_update_timeout(port, cflag, baud);
port->read_status_mask = USART_SR_ORE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= USART_SR_PE | USART_SR_FE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= USART_SR_FE;
/* Characters to ignore */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= USART_SR_FE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= USART_SR_ORE;
}
/* Ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= USART_SR_DUMMY_RX;
if (stm32_port->rx_ch) {
/*
* Setup DMA to collect only valid data and enable error irqs.
* This also enables break reception when using DMA.
*/
cr1 |= USART_CR1_PEIE;
cr3 |= USART_CR3_EIE;
cr3 |= USART_CR3_DMAR;
cr3 |= USART_CR3_DDRE;
}
if (stm32_port->tx_ch)
cr3 |= USART_CR3_DMAT;
if (rs485conf->flags & SER_RS485_ENABLED) {
stm32_usart_config_reg_rs485(&cr1, &cr3,
rs485conf->delay_rts_before_send,
rs485conf->delay_rts_after_send,
baud);
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
cr3 &= ~USART_CR3_DEP;
rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
} else {
cr3 |= USART_CR3_DEP;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
}
} else {
cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
}
/* Configure wake up from low power on start bit detection */
if (stm32_port->wakeup_src) {
cr3 &= ~USART_CR3_WUS_MASK;
cr3 |= USART_CR3_WUS_START_BIT;
}
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr2, port->membase + ofs->cr2);
writel_relaxed(cr1, port->membase + ofs->cr1);
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
/* Handle modem control interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
stm32_usart_enable_ms(port);
else
stm32_usart_disable_ms(port);
}
static const char *stm32_usart_type(struct uart_port *port)
{
return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
}
static void stm32_usart_release_port(struct uart_port *port)
{
}
static int stm32_usart_request_port(struct uart_port *port)
{
return 0;
}
static void stm32_usart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_STM32;
}
static int
stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* No user changeable parameters */
return -EINVAL;
}
static void stm32_usart_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct stm32_port *stm32port = container_of(port,
struct stm32_port, port);
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
const struct stm32_usart_config *cfg = &stm32port->info->cfg;
unsigned long flags;
switch (state) {
case UART_PM_STATE_ON:
pm_runtime_get_sync(port->dev);
break;
case UART_PM_STATE_OFF:
spin_lock_irqsave(&port->lock, flags);
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
pm_runtime_put_sync(port->dev);
break;
}
}
#if defined(CONFIG_CONSOLE_POLL)
/* Callbacks for characters polling in debug context (i.e. KGDB). */
static int stm32_usart_poll_init(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
return clk_prepare_enable(stm32_port->clk);
}
static int stm32_usart_poll_get_char(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
return NO_POLL_CHAR;
return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
}
static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
{
stm32_usart_console_putchar(port, ch);
}
#endif /* CONFIG_CONSOLE_POLL */
static const struct uart_ops stm32_uart_ops = {
.tx_empty = stm32_usart_tx_empty,
.set_mctrl = stm32_usart_set_mctrl,
.get_mctrl = stm32_usart_get_mctrl,
.stop_tx = stm32_usart_stop_tx,
.start_tx = stm32_usart_start_tx,
.throttle = stm32_usart_throttle,
.unthrottle = stm32_usart_unthrottle,
.stop_rx = stm32_usart_stop_rx,
.enable_ms = stm32_usart_enable_ms,
.break_ctl = stm32_usart_break_ctl,
.startup = stm32_usart_startup,
.shutdown = stm32_usart_shutdown,
.flush_buffer = stm32_usart_flush_buffer,
.set_termios = stm32_usart_set_termios,
.pm = stm32_usart_pm,
.type = stm32_usart_type,
.release_port = stm32_usart_release_port,
.request_port = stm32_usart_request_port,
.config_port = stm32_usart_config_port,
.verify_port = stm32_usart_verify_port,
#if defined(CONFIG_CONSOLE_POLL)
.poll_init = stm32_usart_poll_init,
.poll_get_char = stm32_usart_poll_get_char,
.poll_put_char = stm32_usart_poll_put_char,
#endif /* CONFIG_CONSOLE_POLL */
};
/*
* STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
* Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
* RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
* So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
*/
static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
int *ftcfg)
{
u32 bytes, i;
/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
bytes = 8;
for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
break;
if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
stm32h7_usart_fifo_thresh_cfg[i]);
/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
if (i)
*ftcfg = i - 1;
else
*ftcfg = -EINVAL;
}
static void stm32_usart_deinit_port(struct stm32_port *stm32port)
{
clk_disable_unprepare(stm32port->clk);
}
static const struct serial_rs485 stm32_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
static int stm32_usart_init_port(struct stm32_port *stm32port,
struct platform_device *pdev)
{
struct uart_port *port = &stm32port->port;
struct resource *res;
int ret, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &stm32_uart_ops;
port->dev = &pdev->dev;
port->fifosize = stm32port->info->cfg.fifosize;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
port->rs485_supported = stm32_rs485_supported;
ret = stm32_usart_init_rs485(port, pdev);
if (ret)
return ret;
stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
of_property_read_bool(pdev->dev.of_node, "wakeup-source");
stm32port->swap = stm32port->info->cfg.has_swap &&
of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
stm32port->fifoen = stm32port->info->cfg.has_fifo;
if (stm32port->fifoen) {
stm32_usart_get_ftcfg(pdev, "rx-threshold",
&stm32port->rxftcfg);
stm32_usart_get_ftcfg(pdev, "tx-threshold",
&stm32port->txftcfg);
}
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
port->mapbase = res->start;
spin_lock_init(&port->lock);
stm32port->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stm32port->clk))
return PTR_ERR(stm32port->clk);
/* Ensure that clk rate is correct by enabling the clk */
ret = clk_prepare_enable(stm32port->clk);
if (ret)
return ret;
stm32port->port.uartclk = clk_get_rate(stm32port->clk);
if (!stm32port->port.uartclk) {
ret = -EINVAL;
goto err_clk;
}
stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
if (IS_ERR(stm32port->gpios)) {
ret = PTR_ERR(stm32port->gpios);
goto err_clk;
}
/*
* Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
* properties should not be specified.
*/
if (stm32port->hw_flow_control) {
if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
ret = -EINVAL;
goto err_clk;
}
}
return ret;
err_clk:
clk_disable_unprepare(stm32port->clk);
return ret;
}
static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int id;
if (!np)
return NULL;
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
return NULL;
}
if (WARN_ON(id >= STM32_MAX_PORTS))
return NULL;
stm32_ports[id].hw_flow_control =
of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
of_property_read_bool (np, "uart-has-rtscts");
stm32_ports[id].port.line = id;
stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
stm32_ports[id].cr3_irq = 0;
stm32_ports[id].last_res = RX_BUF_L;
return &stm32_ports[id];
}
#ifdef CONFIG_OF
static const struct of_device_id stm32_match[] = {
{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
{},
};
MODULE_DEVICE_TABLE(of, stm32_match);
#endif
static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
struct platform_device *pdev)
{
if (stm32port->rx_buf)
dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
stm32port->rx_dma_buf);
}
static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct platform_device *pdev)
{
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct uart_port *port = &stm32port->port;
struct device *dev = &pdev->dev;
struct dma_slave_config config;
int ret;
stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
&stm32port->rx_dma_buf,
GFP_KERNEL);
if (!stm32port->rx_buf)
return -ENOMEM;
/* Configure DMA channel */
memset(&config, 0, sizeof(config));
config.src_addr = port->mapbase + ofs->rdr;
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
ret = dmaengine_slave_config(stm32port->rx_ch, &config);
if (ret < 0) {
dev_err(dev, "rx dma channel config failed\n");
stm32_usart_of_dma_rx_remove(stm32port, pdev);
return ret;
}
return 0;
}
static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
struct platform_device *pdev)
{
if (stm32port->tx_buf)
dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
stm32port->tx_dma_buf);
}
static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
struct platform_device *pdev)
{
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct uart_port *port = &stm32port->port;
struct device *dev = &pdev->dev;
struct dma_slave_config config;
int ret;
stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
&stm32port->tx_dma_buf,
GFP_KERNEL);
if (!stm32port->tx_buf)
return -ENOMEM;
/* Configure DMA channel */
memset(&config, 0, sizeof(config));
config.dst_addr = port->mapbase + ofs->tdr;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
ret = dmaengine_slave_config(stm32port->tx_ch, &config);
if (ret < 0) {
dev_err(dev, "tx dma channel config failed\n");
stm32_usart_of_dma_tx_remove(stm32port, pdev);
return ret;
}
return 0;
}
static int stm32_usart_serial_probe(struct platform_device *pdev)
{
struct stm32_port *stm32port;
int ret;
stm32port = stm32_usart_of_get_port(pdev);
if (!stm32port)
return -ENODEV;
stm32port->info = of_device_get_match_data(&pdev->dev);
if (!stm32port->info)
return -EINVAL;
stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* Fall back in interrupt mode for any non-deferral error */
if (IS_ERR(stm32port->rx_ch))
stm32port->rx_ch = NULL;
stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_dma_rx;
}
/* Fall back in interrupt mode for any non-deferral error */
if (IS_ERR(stm32port->tx_ch))
stm32port->tx_ch = NULL;
ret = stm32_usart_init_port(stm32port, pdev);
if (ret)
goto err_dma_tx;
if (stm32port->wakeup_src) {
device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
if (ret)
goto err_deinit_port;
}
if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
/* Fall back in interrupt mode */
dma_release_channel(stm32port->rx_ch);
stm32port->rx_ch = NULL;
}
if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
/* Fall back in interrupt mode */
dma_release_channel(stm32port->tx_ch);
stm32port->tx_ch = NULL;
}
if (!stm32port->rx_ch)
dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
if (!stm32port->tx_ch)
dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
platform_set_drvdata(pdev, &stm32port->port);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
if (ret)
goto err_port;
pm_runtime_put_sync(&pdev->dev);
return 0;
err_port:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
if (stm32port->tx_ch)
stm32_usart_of_dma_tx_remove(stm32port, pdev);
if (stm32port->rx_ch)
stm32_usart_of_dma_rx_remove(stm32port, pdev);
if (stm32port->wakeup_src)
dev_pm_clear_wake_irq(&pdev->dev);
err_deinit_port:
if (stm32port->wakeup_src)
device_set_wakeup_capable(&pdev->dev, false);
stm32_usart_deinit_port(stm32port);
err_dma_tx:
if (stm32port->tx_ch)
dma_release_channel(stm32port->tx_ch);
err_dma_rx:
if (stm32port->rx_ch)
dma_release_channel(stm32port->rx_ch);
return ret;
}
static int stm32_usart_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 cr3;
pm_runtime_get_sync(&pdev->dev);
uart_remove_one_port(&stm32_usart_driver, port);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
if (stm32_port->tx_ch) {
stm32_usart_of_dma_tx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->tx_ch);
}
if (stm32_port->rx_ch) {
stm32_usart_of_dma_rx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->rx_ch);
}
cr3 = readl_relaxed(port->membase + ofs->cr3);
cr3 &= ~USART_CR3_EIE;
cr3 &= ~USART_CR3_DMAR;
cr3 &= ~USART_CR3_DMAT;
cr3 &= ~USART_CR3_DDRE;
writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->wakeup_src) {
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
}
stm32_usart_deinit_port(stm32_port);
return 0;
}
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 isr;
int ret;
ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
(isr & USART_SR_TXE), 100,
STM32_USART_TIMEOUT_USEC);
if (ret != 0) {
dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
return;
}
writel_relaxed(ch, port->membase + ofs->tdr);
}
#ifdef CONFIG_SERIAL_STM32_CONSOLE
static void stm32_usart_console_write(struct console *co, const char *s,
unsigned int cnt)
{
struct uart_port *port = &stm32_ports[co->index].port;
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
unsigned long flags;
u32 old_cr1, new_cr1;
int locked = 1;
if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
/* Save and disable interrupts, enable the transmitter */
old_cr1 = readl_relaxed(port->membase + ofs->cr1);
new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
writel_relaxed(new_cr1, port->membase + ofs->cr1);
uart_console_write(port, s, cnt, stm32_usart_console_putchar);
/* Restore interrupt state */
writel_relaxed(old_cr1, port->membase + ofs->cr1);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
}
static int stm32_usart_console_setup(struct console *co, char *options)
{
struct stm32_port *stm32port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index >= STM32_MAX_PORTS)
return -ENODEV;
stm32port = &stm32_ports[co->index];
/*
* This driver does not support early console initialization
* (use ARM early printk support instead), so we only expect
* this to be called during the uart port registration when the
* driver gets probed and the port should be mapped at that point.
*/
if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
return -ENXIO;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
}
static struct console stm32_console = {
.name = STM32_SERIAL_NAME,
.device = uart_console_device,
.write = stm32_usart_console_write,
.setup = stm32_usart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &stm32_usart_driver,
};
#define STM32_SERIAL_CONSOLE (&stm32_console)
#else
#define STM32_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_STM32_CONSOLE */
#ifdef CONFIG_SERIAL_EARLYCON
static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct stm32_usart_info *info = port->private_data;
while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
cpu_relax();
writel_relaxed(ch, port->membase + info->ofs.tdr);
}
static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
{
struct earlycon_device *device = console->data;
struct uart_port *port = &device->port;
uart_console_write(port, s, count, early_stm32_usart_console_putchar);
}
static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
{
if (!(device->port.membase || device->port.iobase))
return -ENODEV;
device->port.private_data = &stm32h7_info;
device->con->write = early_stm32_serial_write;
return 0;
}
static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
{
if (!(device->port.membase || device->port.iobase))
return -ENODEV;
device->port.private_data = &stm32f7_info;
device->con->write = early_stm32_serial_write;
return 0;
}
static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
{
if (!(device->port.membase || device->port.iobase))
return -ENODEV;
device->port.private_data = &stm32f4_info;
device->con->write = early_stm32_serial_write;
return 0;
}
OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
#endif /* CONFIG_SERIAL_EARLYCON */
static struct uart_driver stm32_usart_driver = {
.driver_name = DRIVER_NAME,
.dev_name = STM32_SERIAL_NAME,
.major = 0,
.minor = 0,
.nr = STM32_MAX_PORTS,
.cons = STM32_SERIAL_CONSOLE,
};
static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
bool enable)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct tty_port *tport = &port->state->port;
int ret;
unsigned int size = 0;
unsigned long flags;
if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
return 0;
/*
* Enable low-power wake-up and wake-up irq if argument is set to
* "enable", disable low-power wake-up and wake-up irq otherwise
*/
if (enable) {
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
mctrl_gpio_enable_irq_wake(stm32_port->gpios);
/*
* When DMA is used for reception, it must be disabled before
* entering low-power mode and re-enabled when exiting from
* low-power mode.
*/
if (stm32_port->rx_ch) {
spin_lock_irqsave(&port->lock, flags);
/* Poll data from DMA RX buffer if any */
if (!stm32_usart_rx_dma_pause(stm32_port))
size += stm32_usart_receive_chars(port, true);
stm32_usart_rx_dma_terminate(stm32_port);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
tty_flip_buffer_push(tport);
}
/* Poll data from RX FIFO if any */
stm32_usart_receive_chars(port, false);
} else {
if (stm32_port->rx_ch) {
ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret)
return ret;
}
mctrl_gpio_disable_irq_wake(stm32_port->gpios);
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
}
return 0;
}
static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
int ret;
uart_suspend_port(&stm32_usart_driver, port);
if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
ret = stm32_usart_serial_en_wakeup(port, true);
if (ret)
return ret;
}
/*
* When "no_console_suspend" is enabled, keep the pinctrl default state
* and rely on bootloader stage to restore this state upon resume.
* Otherwise, apply the idle or sleep states depending on wakeup
* capabilities.
*/
if (console_suspend_enabled || !uart_console(port)) {
if (device_may_wakeup(dev) || device_wakeup_path(dev))
pinctrl_pm_select_idle_state(dev);
else
pinctrl_pm_select_sleep_state(dev);
}
return 0;
}
static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
ret = stm32_usart_serial_en_wakeup(port, false);
if (ret)
return ret;
}
return uart_resume_port(&stm32_usart_driver, port);
}
static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct stm32_port *stm32port = container_of(port,
struct stm32_port, port);
clk_disable_unprepare(stm32port->clk);
return 0;
}
static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct stm32_port *stm32port = container_of(port,
struct stm32_port, port);
return clk_prepare_enable(stm32port->clk);
}
static const struct dev_pm_ops stm32_serial_pm_ops = {
SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
stm32_usart_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
stm32_usart_serial_resume)
};
static struct platform_driver stm32_serial_driver = {
.probe = stm32_usart_serial_probe,
.remove = stm32_usart_serial_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_serial_pm_ops,
.of_match_table = of_match_ptr(stm32_match),
},
};
static int __init stm32_usart_init(void)
{
static char banner[] __initdata = "STM32 USART driver initialized";
int ret;
pr_info("%s\n", banner);
ret = uart_register_driver(&stm32_usart_driver);
if (ret)
return ret;
ret = platform_driver_register(&stm32_serial_driver);
if (ret)
uart_unregister_driver(&stm32_usart_driver);
return ret;
}
static void __exit stm32_usart_exit(void)
{
platform_driver_unregister(&stm32_serial_driver);
uart_unregister_driver(&stm32_usart_driver);
}
module_init(stm32_usart_init);
module_exit(stm32_usart_exit);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/tty/serial/stm32-usart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
** mux.c:
** serial driver for the Mux console found in some PA-RISC servers.
**
** (c) Copyright 2002 Ryan Bradetich
** (c) Copyright 2002 Hewlett-Packard Company
**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
**
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/console.h>
#include <linux/delay.h> /* for udelay */
#include <linux/device.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/parisc-device.h>
#include <linux/sysrq.h>
#include <linux/serial_core.h>
#define MUX_OFFSET 0x800
#define MUX_LINE_OFFSET 0x80
#define MUX_FIFO_SIZE 255
#define MUX_POLL_DELAY (30 * HZ / 1000)
#define IO_DATA_REG_OFFSET 0x3c
#define IO_DCOUNT_REG_OFFSET 0x40
#define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000)
#define MUX_STATUS(status) ((status & 0xF000) == 0x8000)
#define MUX_BREAK(status) ((status & 0xF000) == 0x2000)
#define MUX_NR 256
static unsigned int port_cnt __read_mostly;
struct mux_port {
struct uart_port port;
int enabled;
};
static struct mux_port mux_ports[MUX_NR];
static struct uart_driver mux_driver = {
.owner = THIS_MODULE,
.driver_name = "ttyB",
.dev_name = "ttyB",
.major = MUX_MAJOR,
.minor = 0,
.nr = MUX_NR,
};
static struct timer_list mux_timer;
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
/**
* get_mux_port_count - Get the number of available ports on the Mux.
* @dev: The parisc device.
*
* This function is used to determine the number of ports the Mux
* supports. The IODC data reports the number of ports the Mux
* can support, but there are cases where not all the Mux ports
* are connected. This function can override the IODC and
* return the true port count.
*/
static int __init get_mux_port_count(struct parisc_device *dev)
{
int status;
u8 iodc_data[32];
unsigned long bytecnt;
/* If this is the built-in Mux for the K-Class (Eole CAP/MUX),
* we only need to allocate resources for 1 port since the
* other 7 ports are not connected.
*/
if(dev->id.hversion == 0x15)
return 1;
status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
BUG_ON(status != PDC_OK);
/* Return the number of ports specified in the iodc data. */
return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8;
}
/**
* mux_tx_empty - Check if the transmitter fifo is empty.
* @port: Ptr to the uart_port.
*
* This function test if the transmitter fifo for the port
* described by 'port' is empty. If it is empty, this function
* should return TIOCSER_TEMT, otherwise return 0.
*/
static unsigned int mux_tx_empty(struct uart_port *port)
{
return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
}
/**
* mux_set_mctrl - Set the current state of the modem control inputs.
* @ports: Ptr to the uart_port.
* @mctrl: Modem control bits.
*
* The Serial MUX does not support CTS, DCD or DSR so this function
* is ignored.
*/
static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/**
* mux_get_mctrl - Returns the current state of modem control inputs.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support CTS, DCD or DSR so these lines are
* treated as permanently active.
*/
static unsigned int mux_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
/**
* mux_stop_tx - Stop transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support this function.
*/
static void mux_stop_tx(struct uart_port *port)
{
}
/**
* mux_start_tx - Start transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_start_tx(struct uart_port *port)
{
}
/**
* mux_stop_rx - Stop receiving characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_stop_rx(struct uart_port *port)
{
}
/**
* mux_break_ctl - Control the transmitssion of a break signal.
* @port: Ptr to the uart_port.
* @break_state: Raise/Lower the break signal.
*
* The Serial Mux does not support this function.
*/
static void mux_break_ctl(struct uart_port *port, int break_state)
{
}
static void mux_tx_done(struct uart_port *port)
{
/* FIXME js: really needs to wait? */
while (UART_GET_FIFO_CNT(port))
udelay(1);
}
/**
* mux_write - Write chars to the mux fifo.
* @port: Ptr to the uart_port.
*
* This function writes all the data from the uart buffer to
* the mux fifo.
*/
static void mux_write(struct uart_port *port)
{
u8 ch;
uart_port_tx_limited(port, ch,
port->fifosize - UART_GET_FIFO_CNT(port),
true,
UART_PUT_CHAR(port, ch),
mux_tx_done(port));
}
/**
* mux_read - Read chars from the mux fifo.
* @port: Ptr to the uart_port.
*
* This reads all available data from the mux's fifo and pushes
* the data to the tty layer.
*/
static void mux_read(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
int data;
__u32 start_count = port->icount.rx;
while(1) {
data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
if (MUX_STATUS(data))
continue;
if (MUX_EOFIFO(data))
break;
port->icount.rx++;
if (MUX_BREAK(data)) {
port->icount.brk++;
if(uart_handle_break(port))
continue;
}
if (uart_handle_sysrq_char(port, data & 0xffu))
continue;
tty_insert_flip_char(tport, data & 0xFF, TTY_NORMAL);
}
if (start_count != port->icount.rx)
tty_flip_buffer_push(tport);
}
/**
* mux_startup - Initialize the port.
* @port: Ptr to the uart_port.
*
* Grab any resources needed for this port and start the
* mux timer.
*/
static int mux_startup(struct uart_port *port)
{
mux_ports[port->line].enabled = 1;
return 0;
}
/**
* mux_shutdown - Disable the port.
* @port: Ptr to the uart_port.
*
* Release any resources needed for the port.
*/
static void mux_shutdown(struct uart_port *port)
{
mux_ports[port->line].enabled = 0;
}
/**
* mux_set_termios - Chane port parameters.
* @port: Ptr to the uart_port.
* @termios: new termios settings.
* @old: old termios settings.
*
* The Serial Mux does not support this function.
*/
static void
mux_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
}
/**
* mux_type - Describe the port.
* @port: Ptr to the uart_port.
*
* Return a pointer to a string constant describing the
* specified port.
*/
static const char *mux_type(struct uart_port *port)
{
return "Mux";
}
/**
* mux_release_port - Release memory and IO regions.
* @port: Ptr to the uart_port.
*
* Release any memory and IO region resources currently in use by
* the port.
*/
static void mux_release_port(struct uart_port *port)
{
}
/**
* mux_request_port - Request memory and IO regions.
* @port: Ptr to the uart_port.
*
* Request any memory and IO region resources required by the port.
* If any fail, no resources should be registered when this function
* returns, and it should return -EBUSY on failure.
*/
static int mux_request_port(struct uart_port *port)
{
return 0;
}
/**
* mux_config_port - Perform port autoconfiguration.
* @port: Ptr to the uart_port.
* @type: Bitmask of required configurations.
*
* Perform any autoconfiguration steps for the port. This function is
* called if the UPF_BOOT_AUTOCONF flag is specified for the port.
* [Note: This is required for now because of a bug in the Serial core.
* rmk has already submitted a patch to linus, should be available for
* 2.5.47.]
*/
static void mux_config_port(struct uart_port *port, int type)
{
port->type = PORT_MUX;
}
/**
* mux_verify_port - Verify the port information.
* @port: Ptr to the uart_port.
* @ser: Ptr to the serial information.
*
* Verify the new serial port information contained within serinfo is
* suitable for this port type.
*/
static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if(port->membase == NULL)
return -EINVAL;
return 0;
}
/**
* mux_drv_poll - Mux poll function.
* @unused: Unused variable
*
* This function periodically polls the Serial MUX to check for new data.
*/
static void mux_poll(struct timer_list *unused)
{
int i;
for(i = 0; i < port_cnt; ++i) {
if(!mux_ports[i].enabled)
continue;
mux_read(&mux_ports[i].port);
mux_write(&mux_ports[i].port);
}
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
}
#ifdef CONFIG_SERIAL_MUX_CONSOLE
static void mux_console_write(struct console *co, const char *s, unsigned count)
{
/* Wait until the FIFO drains. */
while(UART_GET_FIFO_CNT(&mux_ports[0].port))
udelay(1);
while(count--) {
if(*s == '\n') {
UART_PUT_CHAR(&mux_ports[0].port, '\r');
}
UART_PUT_CHAR(&mux_ports[0].port, *s++);
}
}
static int mux_console_setup(struct console *co, char *options)
{
return 0;
}
static struct console mux_console = {
.name = "ttyB",
.write = mux_console_write,
.device = uart_console_device,
.setup = mux_console_setup,
.flags = CON_ENABLED | CON_PRINTBUFFER,
.index = 0,
.data = &mux_driver,
};
#define MUX_CONSOLE &mux_console
#else
#define MUX_CONSOLE NULL
#endif
static const struct uart_ops mux_pops = {
.tx_empty = mux_tx_empty,
.set_mctrl = mux_set_mctrl,
.get_mctrl = mux_get_mctrl,
.stop_tx = mux_stop_tx,
.start_tx = mux_start_tx,
.stop_rx = mux_stop_rx,
.break_ctl = mux_break_ctl,
.startup = mux_startup,
.shutdown = mux_shutdown,
.set_termios = mux_set_termios,
.type = mux_type,
.release_port = mux_release_port,
.request_port = mux_request_port,
.config_port = mux_config_port,
.verify_port = mux_verify_port,
};
/**
* mux_probe - Determine if the Serial Mux should claim this device.
* @dev: The parisc device.
*
* Deterimine if the Serial Mux should claim this chip (return 0)
* or not (return 1).
*/
static int __init mux_probe(struct parisc_device *dev)
{
int i, status;
int port_count = get_mux_port_count(dev);
printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count);
dev_set_drvdata(&dev->dev, (void *)(long)port_count);
request_mem_region(dev->hpa.start + MUX_OFFSET,
port_count * MUX_LINE_OFFSET, "Mux");
if(!port_cnt) {
mux_driver.cons = MUX_CONSOLE;
status = uart_register_driver(&mux_driver);
if(status) {
printk(KERN_ERR "Serial mux: Unable to register driver.\n");
return 1;
}
}
for(i = 0; i < port_count; ++i, ++port_cnt) {
struct uart_port *port = &mux_ports[port_cnt].port;
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
port->uartclk = 0;
port->fifosize = MUX_FIFO_SIZE;
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
BUG_ON(status);
}
return 0;
}
static void __exit mux_remove(struct parisc_device *dev)
{
int i, j;
int port_count = (long)dev_get_drvdata(&dev->dev);
/* Find Port 0 for this card in the mux_ports list. */
for(i = 0; i < port_cnt; ++i) {
if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET)
break;
}
BUG_ON(i + port_count > port_cnt);
/* Release the resources associated with each port on the device. */
for(j = 0; j < port_count; ++j, ++i) {
struct uart_port *port = &mux_ports[i].port;
uart_remove_one_port(&mux_driver, port);
if(port->membase)
iounmap(port->membase);
}
release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
}
/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
* the serial port detection in the proper order. The idea is we always
* want the builtin mux to be detected before addin mux cards, so we
* specifically probe for the builtin mux cards first.
*
* This table only contains the parisc_device_id of known builtin mux
* devices. All other mux cards will be detected by the generic mux_tbl.
*/
static const struct parisc_device_id builtin_mux_tbl[] __initconst = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */
{ 0, }
};
static const struct parisc_device_id mux_tbl[] __initconst = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl);
MODULE_DEVICE_TABLE(parisc, mux_tbl);
static struct parisc_driver builtin_serial_mux_driver __refdata = {
.name = "builtin_serial_mux",
.id_table = builtin_mux_tbl,
.probe = mux_probe,
.remove = __exit_p(mux_remove),
};
static struct parisc_driver serial_mux_driver __refdata = {
.name = "serial_mux",
.id_table = mux_tbl,
.probe = mux_probe,
.remove = __exit_p(mux_remove),
};
/**
* mux_init - Serial MUX initialization procedure.
*
* Register the Serial MUX driver.
*/
static int __init mux_init(void)
{
register_parisc_driver(&builtin_serial_mux_driver);
register_parisc_driver(&serial_mux_driver);
if(port_cnt > 0) {
/* Start the Mux timer */
timer_setup(&mux_timer, mux_poll, 0);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
register_console(&mux_console);
#endif
}
return 0;
}
/**
* mux_exit - Serial MUX cleanup procedure.
*
* Unregister the Serial MUX driver from the tty layer.
*/
static void __exit mux_exit(void)
{
/* Delete the Mux timer. */
if(port_cnt > 0) {
del_timer_sync(&mux_timer);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
unregister_console(&mux_console);
#endif
}
unregister_parisc_driver(&builtin_serial_mux_driver);
unregister_parisc_driver(&serial_mux_driver);
uart_unregister_driver(&mux_driver);
}
module_init(mux_init);
module_exit(mux_exit);
MODULE_AUTHOR("Ryan Bradetich");
MODULE_DESCRIPTION("Serial MUX driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR);
| linux-master | drivers/tty/serial/mux.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
*
* FIXME According to the usermanual the status bits in the status register
* are only updated when the peripherals access the FIFO and not when the
* CPU access them. So since we use this bits to know when we stop writing
* and reading, they may not be updated in-time and a race condition may
* exists. But I haven't be able to prove this and I don't care. But if
* any problem arises, it might worth checking. The TX/RX FIFO Stats
* registers should be used in addition.
* Update: Actually, they seem updated ... At least the bits we use.
*
*
* Maintainer : Sylvain Munaut <[email protected]>
*
* Some of the code has been inspired/copied from the 2.4 code written
* by Dale Farnsworth <[email protected]>.
*
* Copyright (C) 2008 Freescale Semiconductor Inc.
* John Rigby <[email protected]>
* Added support for MPC5121
* Copyright (C) 2006 Secret Lab Technologies Ltd.
* Grant Likely <[email protected]>
* Copyright (C) 2004-2006 Sylvain Munaut <[email protected]>
* Copyright (C) 2003 MontaVista, Software, Inc.
*/
#undef DEBUG
#include <linux/device.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <asm/mpc52xx.h>
#include <asm/mpc52xx_psc.h>
#include <linux/serial_core.h>
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_PSC_MAJOR 204
#define SERIAL_PSC_MINOR 148
#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
/* Rem: - We use the read_status_mask as a shadow of
* psc->mpc52xx_psc_imr
* - It's important that is array is all zero on start as we
* use it to know if it's initialized or not ! If it's not sure
* it's cleared, then a memset(...,0,...) should be added to
* the console_init
*/
/* lookup table for matching device nodes to index numbers */
static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
static void mpc52xx_uart_of_enumerate(void);
#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
/* Forward declaration of the interruption handling routine */
static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port);
/* ======================================================================== */
/* PSC fifo operations for isolating differences between 52xx and 512x */
/* ======================================================================== */
struct psc_ops {
void (*fifo_init)(struct uart_port *port);
unsigned int (*raw_rx_rdy)(struct uart_port *port);
unsigned int (*raw_tx_rdy)(struct uart_port *port);
unsigned int (*rx_rdy)(struct uart_port *port);
unsigned int (*tx_rdy)(struct uart_port *port);
unsigned int (*tx_empty)(struct uart_port *port);
void (*stop_rx)(struct uart_port *port);
void (*start_tx)(struct uart_port *port);
void (*stop_tx)(struct uart_port *port);
void (*rx_clr_irq)(struct uart_port *port);
void (*tx_clr_irq)(struct uart_port *port);
void (*write_char)(struct uart_port *port, unsigned char c);
unsigned char (*read_char)(struct uart_port *port);
void (*cw_disable_ints)(struct uart_port *port);
void (*cw_restore_ints)(struct uart_port *port);
unsigned int (*set_baudrate)(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old);
int (*clock_alloc)(struct uart_port *port);
void (*clock_relse)(struct uart_port *port);
int (*clock)(struct uart_port *port, int enable);
int (*fifoc_init)(void);
void (*fifoc_uninit)(void);
void (*get_irq)(struct uart_port *, struct device_node *);
irqreturn_t (*handle_irq)(struct uart_port *port);
u16 (*get_status)(struct uart_port *port);
u8 (*get_ipcr)(struct uart_port *port);
void (*command)(struct uart_port *port, u8 cmd);
void (*set_mode)(struct uart_port *port, u8 mr1, u8 mr2);
void (*set_rts)(struct uart_port *port, int state);
void (*enable_ms)(struct uart_port *port);
void (*set_sicr)(struct uart_port *port, u32 val);
void (*set_imr)(struct uart_port *port, u16 val);
u8 (*get_mr1)(struct uart_port *port);
};
/* setting the prescaler and divisor reg is common for all chips */
static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc,
u16 prescaler, unsigned int divisor)
{
/* select prescaler */
out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
out_8(&psc->ctur, divisor >> 8);
out_8(&psc->ctlr, divisor & 0xff);
}
static u16 mpc52xx_psc_get_status(struct uart_port *port)
{
return in_be16(&PSC(port)->mpc52xx_psc_status);
}
static u8 mpc52xx_psc_get_ipcr(struct uart_port *port)
{
return in_8(&PSC(port)->mpc52xx_psc_ipcr);
}
static void mpc52xx_psc_command(struct uart_port *port, u8 cmd)
{
out_8(&PSC(port)->command, cmd);
}
static void mpc52xx_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2)
{
out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1);
out_8(&PSC(port)->mode, mr1);
out_8(&PSC(port)->mode, mr2);
}
static void mpc52xx_psc_set_rts(struct uart_port *port, int state)
{
if (state)
out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS);
else
out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS);
}
static void mpc52xx_psc_enable_ms(struct uart_port *port)
{
struct mpc52xx_psc __iomem *psc = PSC(port);
/* clear D_*-bits by reading them */
in_8(&psc->mpc52xx_psc_ipcr);
/* enable CTS and DCD as IPC interrupts */
out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
}
static void mpc52xx_psc_set_sicr(struct uart_port *port, u32 val)
{
out_be32(&PSC(port)->sicr, val);
}
static void mpc52xx_psc_set_imr(struct uart_port *port, u16 val)
{
out_be16(&PSC(port)->mpc52xx_psc_imr, val);
}
static u8 mpc52xx_psc_get_mr1(struct uart_port *port)
{
out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1);
return in_8(&PSC(port)->mode);
}
#ifdef CONFIG_PPC_MPC52xx
#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
static void mpc52xx_psc_fifo_init(struct uart_port *port)
{
struct mpc52xx_psc __iomem *psc = PSC(port);
struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
out_8(&fifo->rfcntl, 0x00);
out_be16(&fifo->rfalarm, 0x1ff);
out_8(&fifo->tfcntl, 0x07);
out_be16(&fifo->tfalarm, 0x80);
port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
}
static unsigned int mpc52xx_psc_raw_rx_rdy(struct uart_port *port)
{
return in_be16(&PSC(port)->mpc52xx_psc_status)
& MPC52xx_PSC_SR_RXRDY;
}
static unsigned int mpc52xx_psc_raw_tx_rdy(struct uart_port *port)
{
return in_be16(&PSC(port)->mpc52xx_psc_status)
& MPC52xx_PSC_SR_TXRDY;
}
static unsigned int mpc52xx_psc_rx_rdy(struct uart_port *port)
{
return in_be16(&PSC(port)->mpc52xx_psc_isr)
& port->read_status_mask
& MPC52xx_PSC_IMR_RXRDY;
}
static unsigned int mpc52xx_psc_tx_rdy(struct uart_port *port)
{
return in_be16(&PSC(port)->mpc52xx_psc_isr)
& port->read_status_mask
& MPC52xx_PSC_IMR_TXRDY;
}
static unsigned int mpc52xx_psc_tx_empty(struct uart_port *port)
{
u16 sts = in_be16(&PSC(port)->mpc52xx_psc_status);
return (sts & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
}
static void mpc52xx_psc_start_tx(struct uart_port *port)
{
port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
static void mpc52xx_psc_stop_tx(struct uart_port *port)
{
port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
static void mpc52xx_psc_stop_rx(struct uart_port *port)
{
port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
static void mpc52xx_psc_rx_clr_irq(struct uart_port *port)
{
}
static void mpc52xx_psc_tx_clr_irq(struct uart_port *port)
{
}
static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c)
{
out_8(&PSC(port)->mpc52xx_psc_buffer_8, c);
}
static unsigned char mpc52xx_psc_read_char(struct uart_port *port)
{
return in_8(&PSC(port)->mpc52xx_psc_buffer_8);
}
static void mpc52xx_psc_cw_disable_ints(struct uart_port *port)
{
out_be16(&PSC(port)->mpc52xx_psc_imr, 0);
}
static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
{
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
/* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */
baud = uart_get_baud_rate(port, new, old,
port->uartclk / (32 * 0xffff) + 1,
port->uartclk / 32);
divisor = (port->uartclk + 16 * baud) / (32 * baud);
/* enable the /32 prescaler and set the divisor */
mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
return baud;
}
static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
u16 prescaler;
/* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the
* ipb freq */
baud = uart_get_baud_rate(port, new, old,
port->uartclk / (32 * 0xffff) + 1,
port->uartclk / 4);
divisor = (port->uartclk + 2 * baud) / (4 * baud);
/* select the proper prescaler and set the divisor
* prefer high prescaler for more tolerance on low baudrates */
if (divisor > 0xffff || baud <= 115200) {
divisor = (divisor + 4) / 8;
prescaler = 0xdd00; /* /32 */
} else
prescaler = 0xff00; /* /4 */
mpc52xx_set_divisor(PSC(port), prescaler, divisor);
return baud;
}
static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
{
port->irqflags = 0;
port->irq = irq_of_parse_and_map(np, 0);
}
/* 52xx specific interrupt handler. The caller holds the port lock */
static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port)
{
return mpc5xxx_uart_process_int(port);
}
static const struct psc_ops mpc52xx_psc_ops = {
.fifo_init = mpc52xx_psc_fifo_init,
.raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
.raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
.rx_rdy = mpc52xx_psc_rx_rdy,
.tx_rdy = mpc52xx_psc_tx_rdy,
.tx_empty = mpc52xx_psc_tx_empty,
.stop_rx = mpc52xx_psc_stop_rx,
.start_tx = mpc52xx_psc_start_tx,
.stop_tx = mpc52xx_psc_stop_tx,
.rx_clr_irq = mpc52xx_psc_rx_clr_irq,
.tx_clr_irq = mpc52xx_psc_tx_clr_irq,
.write_char = mpc52xx_psc_write_char,
.read_char = mpc52xx_psc_read_char,
.cw_disable_ints = mpc52xx_psc_cw_disable_ints,
.cw_restore_ints = mpc52xx_psc_cw_restore_ints,
.set_baudrate = mpc5200_psc_set_baudrate,
.get_irq = mpc52xx_psc_get_irq,
.handle_irq = mpc52xx_psc_handle_irq,
.get_status = mpc52xx_psc_get_status,
.get_ipcr = mpc52xx_psc_get_ipcr,
.command = mpc52xx_psc_command,
.set_mode = mpc52xx_psc_set_mode,
.set_rts = mpc52xx_psc_set_rts,
.enable_ms = mpc52xx_psc_enable_ms,
.set_sicr = mpc52xx_psc_set_sicr,
.set_imr = mpc52xx_psc_set_imr,
.get_mr1 = mpc52xx_psc_get_mr1,
};
static const struct psc_ops mpc5200b_psc_ops = {
.fifo_init = mpc52xx_psc_fifo_init,
.raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
.raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
.rx_rdy = mpc52xx_psc_rx_rdy,
.tx_rdy = mpc52xx_psc_tx_rdy,
.tx_empty = mpc52xx_psc_tx_empty,
.stop_rx = mpc52xx_psc_stop_rx,
.start_tx = mpc52xx_psc_start_tx,
.stop_tx = mpc52xx_psc_stop_tx,
.rx_clr_irq = mpc52xx_psc_rx_clr_irq,
.tx_clr_irq = mpc52xx_psc_tx_clr_irq,
.write_char = mpc52xx_psc_write_char,
.read_char = mpc52xx_psc_read_char,
.cw_disable_ints = mpc52xx_psc_cw_disable_ints,
.cw_restore_ints = mpc52xx_psc_cw_restore_ints,
.set_baudrate = mpc5200b_psc_set_baudrate,
.get_irq = mpc52xx_psc_get_irq,
.handle_irq = mpc52xx_psc_handle_irq,
.get_status = mpc52xx_psc_get_status,
.get_ipcr = mpc52xx_psc_get_ipcr,
.command = mpc52xx_psc_command,
.set_mode = mpc52xx_psc_set_mode,
.set_rts = mpc52xx_psc_set_rts,
.enable_ms = mpc52xx_psc_enable_ms,
.set_sicr = mpc52xx_psc_set_sicr,
.set_imr = mpc52xx_psc_set_imr,
.get_mr1 = mpc52xx_psc_get_mr1,
};
#endif /* CONFIG_PPC_MPC52xx */
#ifdef CONFIG_PPC_MPC512x
#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))
/* PSC FIFO Controller for mpc512x */
struct psc_fifoc {
u32 fifoc_cmd;
u32 fifoc_int;
u32 fifoc_dma;
u32 fifoc_axe;
u32 fifoc_debug;
};
static struct psc_fifoc __iomem *psc_fifoc;
static unsigned int psc_fifoc_irq;
static struct clk *psc_fifoc_clk;
static void mpc512x_psc_fifo_init(struct uart_port *port)
{
/* /32 prescaler */
out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00);
out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
out_be32(&FIFO_512x(port)->txalarm, 1);
out_be32(&FIFO_512x(port)->tximr, 0);
out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
out_be32(&FIFO_512x(port)->rxalarm, 1);
out_be32(&FIFO_512x(port)->rximr, 0);
out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM);
out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->rxsr)
& in_be32(&FIFO_512x(port)->rximr)
& MPC512x_PSC_FIFO_ALARM;
}
static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& in_be32(&FIFO_512x(port)->tximr)
& MPC512x_PSC_FIFO_ALARM;
}
static unsigned int mpc512x_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& MPC512x_PSC_FIFO_EMPTY;
}
static void mpc512x_psc_stop_rx(struct uart_port *port)
{
unsigned long rx_fifo_imr;
rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr);
rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr);
}
static void mpc512x_psc_start_tx(struct uart_port *port)
{
unsigned long tx_fifo_imr;
tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
}
static void mpc512x_psc_stop_tx(struct uart_port *port)
{
unsigned long tx_fifo_imr;
tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
}
static void mpc512x_psc_rx_clr_irq(struct uart_port *port)
{
out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr));
}
static void mpc512x_psc_tx_clr_irq(struct uart_port *port)
{
out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr));
}
static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c)
{
out_8(&FIFO_512x(port)->txdata_8, c);
}
static unsigned char mpc512x_psc_read_char(struct uart_port *port)
{
return in_8(&FIFO_512x(port)->rxdata_8);
}
static void mpc512x_psc_cw_disable_ints(struct uart_port *port)
{
port->read_status_mask =
in_be32(&FIFO_512x(port)->tximr) << 16 |
in_be32(&FIFO_512x(port)->rximr);
out_be32(&FIFO_512x(port)->tximr, 0);
out_be32(&FIFO_512x(port)->rximr, 0);
}
static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
{
out_be32(&FIFO_512x(port)->tximr,
(port->read_status_mask >> 16) & 0x7f);
out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
}
static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
/*
* The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on
* pg. 30-10 that the chip supports a /32 and a /10 prescaler.
* Furthermore, it states that "After reset, the prescaler by 10
* for the UART mode is selected", but the reset register value is
* 0x0000 which means a /32 prescaler. This is wrong.
*
* In reality using /32 prescaler doesn't work, as it is not supported!
* Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide",
* Chapter 4.1 PSC in UART Mode.
* Calculate with a /16 prescaler here.
*/
/* uartclk contains the ips freq */
baud = uart_get_baud_rate(port, new, old,
port->uartclk / (16 * 0xffff) + 1,
port->uartclk / 16);
divisor = (port->uartclk + 8 * baud) / (16 * baud);
/* enable the /16 prescaler and set the divisor */
mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
return baud;
}
/* Init PSC FIFO Controller */
static int __init mpc512x_psc_fifoc_init(void)
{
int err;
struct device_node *np;
struct clk *clk;
/* default error code, potentially overwritten by clock calls */
err = -ENODEV;
np = of_find_compatible_node(NULL, NULL,
"fsl,mpc5121-psc-fifo");
if (!np) {
pr_err("%s: Can't find FIFOC node\n", __func__);
goto out_err;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
/* backwards compat with device trees that lack clock specs */
clk = clk_get_sys(np->name, "ipg");
}
if (IS_ERR(clk)) {
pr_err("%s: Can't lookup FIFO clock\n", __func__);
err = PTR_ERR(clk);
goto out_ofnode_put;
}
if (clk_prepare_enable(clk)) {
pr_err("%s: Can't enable FIFO clock\n", __func__);
clk_put(clk);
goto out_ofnode_put;
}
psc_fifoc_clk = clk;
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
goto out_clk_disable;
}
psc_fifoc_irq = irq_of_parse_and_map(np, 0);
if (psc_fifoc_irq == 0) {
pr_err("%s: Can't get FIFOC irq\n", __func__);
goto out_unmap;
}
of_node_put(np);
return 0;
out_unmap:
iounmap(psc_fifoc);
out_clk_disable:
clk_disable_unprepare(psc_fifoc_clk);
clk_put(psc_fifoc_clk);
out_ofnode_put:
of_node_put(np);
out_err:
return err;
}
static void __exit mpc512x_psc_fifoc_uninit(void)
{
iounmap(psc_fifoc);
/* disable the clock, errors are not fatal */
if (psc_fifoc_clk) {
clk_disable_unprepare(psc_fifoc_clk);
clk_put(psc_fifoc_clk);
psc_fifoc_clk = NULL;
}
}
/* 512x specific interrupt handler. The caller holds the port lock */
static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
{
unsigned long fifoc_int;
int psc_num;
/* Read pending PSC FIFOC interrupts */
fifoc_int = in_be32(&psc_fifoc->fifoc_int);
/* Check if it is an interrupt for this port */
psc_num = (port->mapbase & 0xf00) >> 8;
if (test_bit(psc_num, &fifoc_int) ||
test_bit(psc_num + 16, &fifoc_int))
return mpc5xxx_uart_process_int(port);
return IRQ_NONE;
}
static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM];
static struct clk *psc_ipg_clk[MPC52xx_PSC_MAXNUM];
/* called from within the .request_port() callback (allocation) */
static int mpc512x_psc_alloc_clock(struct uart_port *port)
{
int psc_num;
struct clk *clk;
int err;
psc_num = (port->mapbase & 0xf00) >> 8;
clk = devm_clk_get(port->dev, "mclk");
if (IS_ERR(clk)) {
dev_err(port->dev, "Failed to get MCLK!\n");
err = PTR_ERR(clk);
goto out_err;
}
err = clk_prepare_enable(clk);
if (err) {
dev_err(port->dev, "Failed to enable MCLK!\n");
goto out_err;
}
psc_mclk_clk[psc_num] = clk;
clk = devm_clk_get(port->dev, "ipg");
if (IS_ERR(clk)) {
dev_err(port->dev, "Failed to get IPG clock!\n");
err = PTR_ERR(clk);
goto out_err;
}
err = clk_prepare_enable(clk);
if (err) {
dev_err(port->dev, "Failed to enable IPG clock!\n");
goto out_err;
}
psc_ipg_clk[psc_num] = clk;
return 0;
out_err:
if (psc_mclk_clk[psc_num]) {
clk_disable_unprepare(psc_mclk_clk[psc_num]);
psc_mclk_clk[psc_num] = NULL;
}
if (psc_ipg_clk[psc_num]) {
clk_disable_unprepare(psc_ipg_clk[psc_num]);
psc_ipg_clk[psc_num] = NULL;
}
return err;
}
/* called from within the .release_port() callback (release) */
static void mpc512x_psc_relse_clock(struct uart_port *port)
{
int psc_num;
struct clk *clk;
psc_num = (port->mapbase & 0xf00) >> 8;
clk = psc_mclk_clk[psc_num];
if (clk) {
clk_disable_unprepare(clk);
psc_mclk_clk[psc_num] = NULL;
}
if (psc_ipg_clk[psc_num]) {
clk_disable_unprepare(psc_ipg_clk[psc_num]);
psc_ipg_clk[psc_num] = NULL;
}
}
/* implementation of the .clock() callback (enable/disable) */
static int mpc512x_psc_endis_clock(struct uart_port *port, int enable)
{
int psc_num;
struct clk *psc_clk;
int ret;
if (uart_console(port))
return 0;
psc_num = (port->mapbase & 0xf00) >> 8;
psc_clk = psc_mclk_clk[psc_num];
if (!psc_clk) {
dev_err(port->dev, "Failed to get PSC clock entry!\n");
return -ENODEV;
}
dev_dbg(port->dev, "mclk %sable\n", enable ? "en" : "dis");
if (enable) {
ret = clk_enable(psc_clk);
if (ret)
dev_err(port->dev, "Failed to enable MCLK!\n");
return ret;
} else {
clk_disable(psc_clk);
return 0;
}
}
static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
{
port->irqflags = IRQF_SHARED;
port->irq = psc_fifoc_irq;
}
#define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase))
#define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1))
static void mpc5125_psc_fifo_init(struct uart_port *port)
{
/* /32 prescaler */
out_8(&PSC_5125(port)->mpc52xx_psc_clock_select, 0xdd);
out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
out_be32(&FIFO_5125(port)->txalarm, 1);
out_be32(&FIFO_5125(port)->tximr, 0);
out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
out_be32(&FIFO_5125(port)->rxalarm, 1);
out_be32(&FIFO_5125(port)->rximr, 0);
out_be32(&FIFO_5125(port)->tximr, MPC512x_PSC_FIFO_ALARM);
out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->rxsr) &
in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
}
static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) &
in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
}
static unsigned int mpc5125_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
}
static void mpc5125_psc_stop_rx(struct uart_port *port)
{
unsigned long rx_fifo_imr;
rx_fifo_imr = in_be32(&FIFO_5125(port)->rximr);
rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_5125(port)->rximr, rx_fifo_imr);
}
static void mpc5125_psc_start_tx(struct uart_port *port)
{
unsigned long tx_fifo_imr;
tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr);
tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr);
}
static void mpc5125_psc_stop_tx(struct uart_port *port)
{
unsigned long tx_fifo_imr;
tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr);
tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr);
}
static void mpc5125_psc_rx_clr_irq(struct uart_port *port)
{
out_be32(&FIFO_5125(port)->rxisr, in_be32(&FIFO_5125(port)->rxisr));
}
static void mpc5125_psc_tx_clr_irq(struct uart_port *port)
{
out_be32(&FIFO_5125(port)->txisr, in_be32(&FIFO_5125(port)->txisr));
}
static void mpc5125_psc_write_char(struct uart_port *port, unsigned char c)
{
out_8(&FIFO_5125(port)->txdata_8, c);
}
static unsigned char mpc5125_psc_read_char(struct uart_port *port)
{
return in_8(&FIFO_5125(port)->rxdata_8);
}
static void mpc5125_psc_cw_disable_ints(struct uart_port *port)
{
port->read_status_mask =
in_be32(&FIFO_5125(port)->tximr) << 16 |
in_be32(&FIFO_5125(port)->rximr);
out_be32(&FIFO_5125(port)->tximr, 0);
out_be32(&FIFO_5125(port)->rximr, 0);
}
static void mpc5125_psc_cw_restore_ints(struct uart_port *port)
{
out_be32(&FIFO_5125(port)->tximr,
(port->read_status_mask >> 16) & 0x7f);
out_be32(&FIFO_5125(port)->rximr, port->read_status_mask & 0x7f);
}
static inline void mpc5125_set_divisor(struct mpc5125_psc __iomem *psc,
u8 prescaler, unsigned int divisor)
{
/* select prescaler */
out_8(&psc->mpc52xx_psc_clock_select, prescaler);
out_8(&psc->ctur, divisor >> 8);
out_8(&psc->ctlr, divisor & 0xff);
}
static unsigned int mpc5125_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
/*
* Calculate with a /16 prescaler here.
*/
/* uartclk contains the ips freq */
baud = uart_get_baud_rate(port, new, old,
port->uartclk / (16 * 0xffff) + 1,
port->uartclk / 16);
divisor = (port->uartclk + 8 * baud) / (16 * baud);
/* enable the /16 prescaler and set the divisor */
mpc5125_set_divisor(PSC_5125(port), 0xdd, divisor);
return baud;
}
/*
* MPC5125 have compatible PSC FIFO Controller.
* Special init not needed.
*/
static u16 mpc5125_psc_get_status(struct uart_port *port)
{
return in_be16(&PSC_5125(port)->mpc52xx_psc_status);
}
static u8 mpc5125_psc_get_ipcr(struct uart_port *port)
{
return in_8(&PSC_5125(port)->mpc52xx_psc_ipcr);
}
static void mpc5125_psc_command(struct uart_port *port, u8 cmd)
{
out_8(&PSC_5125(port)->command, cmd);
}
static void mpc5125_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2)
{
out_8(&PSC_5125(port)->mr1, mr1);
out_8(&PSC_5125(port)->mr2, mr2);
}
static void mpc5125_psc_set_rts(struct uart_port *port, int state)
{
if (state & TIOCM_RTS)
out_8(&PSC_5125(port)->op1, MPC52xx_PSC_OP_RTS);
else
out_8(&PSC_5125(port)->op0, MPC52xx_PSC_OP_RTS);
}
static void mpc5125_psc_enable_ms(struct uart_port *port)
{
struct mpc5125_psc __iomem *psc = PSC_5125(port);
/* clear D_*-bits by reading them */
in_8(&psc->mpc52xx_psc_ipcr);
/* enable CTS and DCD as IPC interrupts */
out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
}
static void mpc5125_psc_set_sicr(struct uart_port *port, u32 val)
{
out_be32(&PSC_5125(port)->sicr, val);
}
static void mpc5125_psc_set_imr(struct uart_port *port, u16 val)
{
out_be16(&PSC_5125(port)->mpc52xx_psc_imr, val);
}
static u8 mpc5125_psc_get_mr1(struct uart_port *port)
{
return in_8(&PSC_5125(port)->mr1);
}
static const struct psc_ops mpc5125_psc_ops = {
.fifo_init = mpc5125_psc_fifo_init,
.raw_rx_rdy = mpc5125_psc_raw_rx_rdy,
.raw_tx_rdy = mpc5125_psc_raw_tx_rdy,
.rx_rdy = mpc5125_psc_rx_rdy,
.tx_rdy = mpc5125_psc_tx_rdy,
.tx_empty = mpc5125_psc_tx_empty,
.stop_rx = mpc5125_psc_stop_rx,
.start_tx = mpc5125_psc_start_tx,
.stop_tx = mpc5125_psc_stop_tx,
.rx_clr_irq = mpc5125_psc_rx_clr_irq,
.tx_clr_irq = mpc5125_psc_tx_clr_irq,
.write_char = mpc5125_psc_write_char,
.read_char = mpc5125_psc_read_char,
.cw_disable_ints = mpc5125_psc_cw_disable_ints,
.cw_restore_ints = mpc5125_psc_cw_restore_ints,
.set_baudrate = mpc5125_psc_set_baudrate,
.clock_alloc = mpc512x_psc_alloc_clock,
.clock_relse = mpc512x_psc_relse_clock,
.clock = mpc512x_psc_endis_clock,
.fifoc_init = mpc512x_psc_fifoc_init,
.fifoc_uninit = mpc512x_psc_fifoc_uninit,
.get_irq = mpc512x_psc_get_irq,
.handle_irq = mpc512x_psc_handle_irq,
.get_status = mpc5125_psc_get_status,
.get_ipcr = mpc5125_psc_get_ipcr,
.command = mpc5125_psc_command,
.set_mode = mpc5125_psc_set_mode,
.set_rts = mpc5125_psc_set_rts,
.enable_ms = mpc5125_psc_enable_ms,
.set_sicr = mpc5125_psc_set_sicr,
.set_imr = mpc5125_psc_set_imr,
.get_mr1 = mpc5125_psc_get_mr1,
};
static const struct psc_ops mpc512x_psc_ops = {
.fifo_init = mpc512x_psc_fifo_init,
.raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
.raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
.rx_rdy = mpc512x_psc_rx_rdy,
.tx_rdy = mpc512x_psc_tx_rdy,
.tx_empty = mpc512x_psc_tx_empty,
.stop_rx = mpc512x_psc_stop_rx,
.start_tx = mpc512x_psc_start_tx,
.stop_tx = mpc512x_psc_stop_tx,
.rx_clr_irq = mpc512x_psc_rx_clr_irq,
.tx_clr_irq = mpc512x_psc_tx_clr_irq,
.write_char = mpc512x_psc_write_char,
.read_char = mpc512x_psc_read_char,
.cw_disable_ints = mpc512x_psc_cw_disable_ints,
.cw_restore_ints = mpc512x_psc_cw_restore_ints,
.set_baudrate = mpc512x_psc_set_baudrate,
.clock_alloc = mpc512x_psc_alloc_clock,
.clock_relse = mpc512x_psc_relse_clock,
.clock = mpc512x_psc_endis_clock,
.fifoc_init = mpc512x_psc_fifoc_init,
.fifoc_uninit = mpc512x_psc_fifoc_uninit,
.get_irq = mpc512x_psc_get_irq,
.handle_irq = mpc512x_psc_handle_irq,
.get_status = mpc52xx_psc_get_status,
.get_ipcr = mpc52xx_psc_get_ipcr,
.command = mpc52xx_psc_command,
.set_mode = mpc52xx_psc_set_mode,
.set_rts = mpc52xx_psc_set_rts,
.enable_ms = mpc52xx_psc_enable_ms,
.set_sicr = mpc52xx_psc_set_sicr,
.set_imr = mpc52xx_psc_set_imr,
.get_mr1 = mpc52xx_psc_get_mr1,
};
#endif /* CONFIG_PPC_MPC512x */
static const struct psc_ops *psc_ops;
/* ======================================================================== */
/* UART operations */
/* ======================================================================== */
static unsigned int
mpc52xx_uart_tx_empty(struct uart_port *port)
{
return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0;
}
static void
mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
psc_ops->set_rts(port, mctrl & TIOCM_RTS);
}
static unsigned int
mpc52xx_uart_get_mctrl(struct uart_port *port)
{
unsigned int ret = TIOCM_DSR;
u8 status = psc_ops->get_ipcr(port);
if (!(status & MPC52xx_PSC_CTS))
ret |= TIOCM_CTS;
if (!(status & MPC52xx_PSC_DCD))
ret |= TIOCM_CAR;
return ret;
}
static void
mpc52xx_uart_stop_tx(struct uart_port *port)
{
/* port->lock taken by caller */
psc_ops->stop_tx(port);
}
static void
mpc52xx_uart_start_tx(struct uart_port *port)
{
/* port->lock taken by caller */
psc_ops->start_tx(port);
}
static void
mpc52xx_uart_stop_rx(struct uart_port *port)
{
/* port->lock taken by caller */
psc_ops->stop_rx(port);
}
static void
mpc52xx_uart_enable_ms(struct uart_port *port)
{
psc_ops->enable_ms(port);
}
static void
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (ctl == -1)
psc_ops->command(port, MPC52xx_PSC_START_BRK);
else
psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
spin_unlock_irqrestore(&port->lock, flags);
}
static int
mpc52xx_uart_startup(struct uart_port *port)
{
int ret;
if (psc_ops->clock) {
ret = psc_ops->clock(port, 1);
if (ret)
return ret;
}
/* Request IRQ */
ret = request_irq(port->irq, mpc52xx_uart_int,
port->irqflags, "mpc52xx_psc_uart", port);
if (ret)
return ret;
/* Reset/activate the port, clear and enable interrupts */
psc_ops->command(port, MPC52xx_PSC_RST_RX);
psc_ops->command(port, MPC52xx_PSC_RST_TX);
/*
* According to Freescale's support the RST_TX command can produce a
* spike on the TX pin. So they recommend to delay "for one character".
* One millisecond should be enough for everyone.
*/
msleep(1);
psc_ops->set_sicr(port, 0); /* UART mode DCD ignored */
psc_ops->fifo_init(port);
psc_ops->command(port, MPC52xx_PSC_TX_ENABLE);
psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
return 0;
}
static void
mpc52xx_uart_shutdown(struct uart_port *port)
{
/* Shut down the port. Leave TX active if on a console port */
psc_ops->command(port, MPC52xx_PSC_RST_RX);
if (!uart_console(port))
psc_ops->command(port, MPC52xx_PSC_RST_TX);
port->read_status_mask = 0;
psc_ops->set_imr(port, port->read_status_mask);
if (psc_ops->clock)
psc_ops->clock(port, 0);
/* Disable interrupt */
psc_ops->cw_disable_ints(port);
/* Release interrupt */
free_irq(port->irq, port);
}
static void
mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
const struct ktermios *old)
{
unsigned long flags;
unsigned char mr1, mr2;
unsigned int j;
unsigned int baud;
/* Prepare what we're gonna write */
mr1 = 0;
switch (new->c_cflag & CSIZE) {
case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS;
break;
case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS;
break;
case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS;
break;
case CS8:
default: mr1 |= MPC52xx_PSC_MODE_8_BITS;
}
if (new->c_cflag & PARENB) {
if (new->c_cflag & CMSPAR)
mr1 |= MPC52xx_PSC_MODE_PARFORCE;
/* With CMSPAR, PARODD also means high parity (same as termios) */
mr1 |= (new->c_cflag & PARODD) ?
MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
} else {
mr1 |= MPC52xx_PSC_MODE_PARNONE;
}
mr2 = 0;
if (new->c_cflag & CSTOPB)
mr2 |= MPC52xx_PSC_MODE_TWO_STOP;
else
mr2 |= ((new->c_cflag & CSIZE) == CS5) ?
MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
MPC52xx_PSC_MODE_ONE_STOP;
if (new->c_cflag & CRTSCTS) {
mr1 |= MPC52xx_PSC_MODE_RXRTS;
mr2 |= MPC52xx_PSC_MODE_TXCTS;
}
/* Get the lock */
spin_lock_irqsave(&port->lock, flags);
/* Do our best to flush TX & RX, so we don't lose anything */
/* But we don't wait indefinitely ! */
j = 5000000; /* Maximum wait */
/* FIXME Can't receive chars since set_termios might be called at early
* boot for the console, all stuff is not yet ready to receive at that
* time and that just makes the kernel oops */
/* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
if (!j)
printk(KERN_ERR "mpc52xx_uart.c: "
"Unable to flush RX & TX fifos in-time in set_termios."
"Some chars may have been lost.\n");
/* Reset the TX & RX */
psc_ops->command(port, MPC52xx_PSC_RST_RX);
psc_ops->command(port, MPC52xx_PSC_RST_TX);
/* Send new mode settings */
psc_ops->set_mode(port, mr1, mr2);
baud = psc_ops->set_baudrate(port, new, old);
/* Update the per-port timeout */
uart_update_timeout(port, new->c_cflag, baud);
if (UART_ENABLE_MS(port, new->c_cflag))
mpc52xx_uart_enable_ms(port);
/* Reenable TX & RX */
psc_ops->command(port, MPC52xx_PSC_TX_ENABLE);
psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
/* We're all set, release the lock */
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *
mpc52xx_uart_type(struct uart_port *port)
{
/*
* We keep using PORT_MPC52xx for historic reasons although it applies
* for MPC512x, too, but print "MPC5xxx" to not irritate users
*/
return port->type == PORT_MPC52xx ? "MPC5xxx PSC" : NULL;
}
static void
mpc52xx_uart_release_port(struct uart_port *port)
{
if (psc_ops->clock_relse)
psc_ops->clock_relse(port);
/* remapped by us ? */
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
}
static int
mpc52xx_uart_request_port(struct uart_port *port)
{
int err;
if (port->flags & UPF_IOREMAP) /* Need to remap ? */
port->membase = ioremap(port->mapbase,
sizeof(struct mpc52xx_psc));
if (!port->membase)
return -EINVAL;
err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
"mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
if (err)
goto out_membase;
if (psc_ops->clock_alloc) {
err = psc_ops->clock_alloc(port);
if (err)
goto out_mapregion;
}
return 0;
out_mapregion:
release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
out_membase:
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
return err;
}
static void
mpc52xx_uart_config_port(struct uart_port *port, int flags)
{
if ((flags & UART_CONFIG_TYPE)
&& (mpc52xx_uart_request_port(port) == 0))
port->type = PORT_MPC52xx;
}
static int
mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx)
return -EINVAL;
if ((ser->irq != port->irq) ||
(ser->io_type != UPIO_MEM) ||
(ser->baud_base != port->uartclk) ||
(ser->iomem_base != (void *)port->mapbase) ||
(ser->hub6 != 0))
return -EINVAL;
return 0;
}
static const struct uart_ops mpc52xx_uart_ops = {
.tx_empty = mpc52xx_uart_tx_empty,
.set_mctrl = mpc52xx_uart_set_mctrl,
.get_mctrl = mpc52xx_uart_get_mctrl,
.stop_tx = mpc52xx_uart_stop_tx,
.start_tx = mpc52xx_uart_start_tx,
.stop_rx = mpc52xx_uart_stop_rx,
.enable_ms = mpc52xx_uart_enable_ms,
.break_ctl = mpc52xx_uart_break_ctl,
.startup = mpc52xx_uart_startup,
.shutdown = mpc52xx_uart_shutdown,
.set_termios = mpc52xx_uart_set_termios,
/* .pm = mpc52xx_uart_pm, Not supported yet */
.type = mpc52xx_uart_type,
.release_port = mpc52xx_uart_release_port,
.request_port = mpc52xx_uart_request_port,
.config_port = mpc52xx_uart_config_port,
.verify_port = mpc52xx_uart_verify_port
};
/* ======================================================================== */
/* Interrupt handling */
/* ======================================================================== */
static inline bool
mpc52xx_uart_int_rx_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
unsigned char ch, flag;
unsigned short status;
/* While we can read, do so ! */
while (psc_ops->raw_rx_rdy(port)) {
/* Get the char */
ch = psc_ops->read_char(port);
/* Handle sysreq char */
if (uart_handle_sysrq_char(port, ch))
continue;
/* Store it */
flag = TTY_NORMAL;
port->icount.rx++;
status = psc_ops->get_status(port);
if (status & (MPC52xx_PSC_SR_PE |
MPC52xx_PSC_SR_FE |
MPC52xx_PSC_SR_RB)) {
if (status & MPC52xx_PSC_SR_RB) {
flag = TTY_BREAK;
uart_handle_break(port);
port->icount.brk++;
} else if (status & MPC52xx_PSC_SR_PE) {
flag = TTY_PARITY;
port->icount.parity++;
}
else if (status & MPC52xx_PSC_SR_FE) {
flag = TTY_FRAME;
port->icount.frame++;
}
/* Clear error condition */
psc_ops->command(port, MPC52xx_PSC_RST_ERR_STAT);
}
tty_insert_flip_char(tport, ch, flag);
if (status & MPC52xx_PSC_SR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
port->icount.overrun++;
}
}
tty_flip_buffer_push(tport);
return psc_ops->raw_rx_rdy(port);
}
static inline bool
mpc52xx_uart_int_tx_chars(struct uart_port *port)
{
u8 ch;
return uart_port_tx(port, ch,
psc_ops->raw_tx_rdy(port),
psc_ops->write_char(port, ch));
}
static irqreturn_t
mpc5xxx_uart_process_int(struct uart_port *port)
{
unsigned long pass = ISR_PASS_LIMIT;
bool keepgoing;
u8 status;
/* While we have stuff to do, we continue */
do {
/* If we don't find anything to do, we stop */
keepgoing = false;
psc_ops->rx_clr_irq(port);
if (psc_ops->rx_rdy(port))
keepgoing |= mpc52xx_uart_int_rx_chars(port);
psc_ops->tx_clr_irq(port);
if (psc_ops->tx_rdy(port))
keepgoing |= mpc52xx_uart_int_tx_chars(port);
status = psc_ops->get_ipcr(port);
if (status & MPC52xx_PSC_D_DCD)
uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD));
if (status & MPC52xx_PSC_D_CTS)
uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS));
/* Limit number of iteration */
if (!(--pass))
keepgoing = false;
} while (keepgoing);
return IRQ_HANDLED;
}
static irqreturn_t
mpc52xx_uart_int(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
irqreturn_t ret;
spin_lock(&port->lock);
ret = psc_ops->handle_irq(port);
spin_unlock(&port->lock);
return ret;
}
/* ======================================================================== */
/* Console ( if applicable ) */
/* ======================================================================== */
#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE
static void __init
mpc52xx_console_get_options(struct uart_port *port,
int *baud, int *parity, int *bits, int *flow)
{
unsigned char mr1;
pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
/* Read the mode registers */
mr1 = psc_ops->get_mr1(port);
/* CT{U,L}R are write-only ! */
*baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
/* Parse them */
switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
case MPC52xx_PSC_MODE_5_BITS:
*bits = 5;
break;
case MPC52xx_PSC_MODE_6_BITS:
*bits = 6;
break;
case MPC52xx_PSC_MODE_7_BITS:
*bits = 7;
break;
case MPC52xx_PSC_MODE_8_BITS:
default:
*bits = 8;
}
if (mr1 & MPC52xx_PSC_MODE_PARNONE)
*parity = 'n';
else
*parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
}
static void
mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
unsigned int i, j;
/* Disable interrupts */
psc_ops->cw_disable_ints(port);
/* Wait the TX buffer to be empty */
j = 5000000; /* Maximum wait */
while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
/* Write all the chars */
for (i = 0; i < count; i++, s++) {
/* Line return handling */
if (*s == '\n')
psc_ops->write_char(port, '\r');
/* Send the char */
psc_ops->write_char(port, *s);
/* Wait the TX buffer to be empty */
j = 20000; /* Maximum wait */
while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
}
/* Restore interrupt state */
psc_ops->cw_restore_ints(port);
}
static int __init
mpc52xx_console_setup(struct console *co, char *options)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
struct device_node *np = mpc52xx_uart_nodes[co->index];
unsigned int uartclk;
struct resource res;
int ret;
int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
int bits = 8;
int parity = 'n';
int flow = 'n';
pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
co, co->index, options);
if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
pr_debug("PSC%x out of range\n", co->index);
return -EINVAL;
}
if (!np) {
pr_debug("PSC%x not found in device tree\n", co->index);
return -EINVAL;
}
pr_debug("Console on ttyPSC%x is %pOF\n",
co->index, mpc52xx_uart_nodes[co->index]);
/* Fetch register locations */
ret = of_address_to_resource(np, 0, &res);
if (ret) {
pr_debug("Could not get resources for PSC%x\n", co->index);
return ret;
}
uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np));
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
}
/* Basic port init. Needed since we use some uart_??? func before
* real init for early access */
spin_lock_init(&port->lock);
port->uartclk = uartclk;
port->ops = &mpc52xx_uart_ops;
port->mapbase = res.start;
port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
port->irq = irq_of_parse_and_map(np, 0);
if (port->membase == NULL)
return -EINVAL;
pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n",
(void *)port->mapbase, port->membase,
port->irq, port->uartclk);
/* Setup the port parameters accoding to options */
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
baud, bits, parity, flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver mpc52xx_uart_driver;
static struct console mpc52xx_console = {
.name = "ttyPSC",
.write = mpc52xx_console_write,
.device = uart_console_device,
.setup = mpc52xx_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0) */
.data = &mpc52xx_uart_driver,
};
static int __init
mpc52xx_console_init(void)
{
mpc52xx_uart_of_enumerate();
register_console(&mpc52xx_console);
return 0;
}
console_initcall(mpc52xx_console_init);
#define MPC52xx_PSC_CONSOLE &mpc52xx_console
#else
#define MPC52xx_PSC_CONSOLE NULL
#endif
/* ======================================================================== */
/* UART Driver */
/* ======================================================================== */
static struct uart_driver mpc52xx_uart_driver = {
.driver_name = "mpc52xx_psc_uart",
.dev_name = "ttyPSC",
.major = SERIAL_PSC_MAJOR,
.minor = SERIAL_PSC_MINOR,
.nr = MPC52xx_PSC_MAXNUM,
.cons = MPC52xx_PSC_CONSOLE,
};
/* ======================================================================== */
/* OF Platform Driver */
/* ======================================================================== */
static const struct of_device_id mpc52xx_uart_of_match[] = {
#ifdef CONFIG_PPC_MPC52xx
{ .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, },
{ .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
/* binding used by old lite5200 device trees: */
{ .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
/* binding used by efika: */
{ .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, },
#endif
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
{ .compatible = "fsl,mpc5125-psc-uart", .data = &mpc5125_psc_ops, },
#endif
{},
};
static int mpc52xx_uart_of_probe(struct platform_device *op)
{
int idx = -1;
unsigned int uartclk;
struct uart_port *port = NULL;
struct resource res;
int ret;
/* Check validity & presence */
for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
if (mpc52xx_uart_nodes[idx] == op->dev.of_node)
break;
if (idx >= MPC52xx_PSC_MAXNUM)
return -EINVAL;
pr_debug("Found %pOF assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx], idx);
/* set the uart clock to the input clock of the psc, the different
* prescalers are taken into account in the set_baudrate() methods
* of the respective chip */
uartclk = mpc5xxx_get_bus_frequency(&op->dev);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
}
/* Init the port structure */
port = &mpc52xx_uart_ports[idx];
spin_lock_init(&port->lock);
port->uartclk = uartclk;
port->fifosize = 512;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MPC52xx_CONSOLE);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF |
(uart_console(port) ? 0 : UPF_IOREMAP);
port->line = idx;
port->ops = &mpc52xx_uart_ops;
port->dev = &op->dev;
/* Search for IRQ and mapbase */
ret = of_address_to_resource(op->dev.of_node, 0, &res);
if (ret)
return ret;
port->mapbase = res.start;
if (!port->mapbase) {
dev_dbg(&op->dev, "Could not allocate resources for PSC\n");
return -EINVAL;
}
psc_ops->get_irq(port, op->dev.of_node);
if (port->irq == 0) {
dev_dbg(&op->dev, "Could not get irq\n");
return -EINVAL;
}
dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n",
(void *)port->mapbase, port->irq, port->uartclk);
/* Add the port to the uart sub-system */
ret = uart_add_one_port(&mpc52xx_uart_driver, port);
if (ret)
return ret;
platform_set_drvdata(op, (void *)port);
return 0;
}
static int
mpc52xx_uart_of_remove(struct platform_device *op)
{
struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_remove_one_port(&mpc52xx_uart_driver, port);
return 0;
}
#ifdef CONFIG_PM
static int
mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
{
struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_suspend_port(&mpc52xx_uart_driver, port);
return 0;
}
static int
mpc52xx_uart_of_resume(struct platform_device *op)
{
struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_resume_port(&mpc52xx_uart_driver, port);
return 0;
}
#endif
static void
mpc52xx_uart_of_assign(struct device_node *np)
{
int i;
/* Find the first free PSC number */
for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
if (mpc52xx_uart_nodes[i] == NULL) {
of_node_get(np);
mpc52xx_uart_nodes[i] = np;
return;
}
}
}
static void
mpc52xx_uart_of_enumerate(void)
{
static int enum_done;
struct device_node *np;
const struct of_device_id *match;
int i;
if (enum_done)
return;
/* Assign index to each PSC in device tree */
for_each_matching_node(np, mpc52xx_uart_of_match) {
match = of_match_node(mpc52xx_uart_of_match, np);
psc_ops = match->data;
mpc52xx_uart_of_assign(np);
}
enum_done = 1;
for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
if (mpc52xx_uart_nodes[i])
pr_debug("%pOF assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[i], i);
}
}
MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
static struct platform_driver mpc52xx_uart_of_driver = {
.probe = mpc52xx_uart_of_probe,
.remove = mpc52xx_uart_of_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_uart_of_suspend,
.resume = mpc52xx_uart_of_resume,
#endif
.driver = {
.name = "mpc52xx-psc-uart",
.of_match_table = mpc52xx_uart_of_match,
},
};
/* ======================================================================== */
/* Module */
/* ======================================================================== */
static int __init
mpc52xx_uart_init(void)
{
int ret;
printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
ret = uart_register_driver(&mpc52xx_uart_driver);
if (ret) {
printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
__FILE__, ret);
return ret;
}
mpc52xx_uart_of_enumerate();
/*
* Map the PSC FIFO Controller and init if on MPC512x.
*/
if (psc_ops && psc_ops->fifoc_init) {
ret = psc_ops->fifoc_init();
if (ret)
goto err_init;
}
ret = platform_driver_register(&mpc52xx_uart_of_driver);
if (ret) {
printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
__FILE__, ret);
goto err_reg;
}
return 0;
err_reg:
if (psc_ops && psc_ops->fifoc_uninit)
psc_ops->fifoc_uninit();
err_init:
uart_unregister_driver(&mpc52xx_uart_driver);
return ret;
}
static void __exit
mpc52xx_uart_exit(void)
{
if (psc_ops->fifoc_uninit)
psc_ops->fifoc_uninit();
platform_driver_unregister(&mpc52xx_uart_of_driver);
uart_unregister_driver(&mpc52xx_uart_driver);
}
module_init(mpc52xx_uart_init);
module_exit(mpc52xx_uart_exit);
MODULE_AUTHOR("Sylvain Munaut <[email protected]>");
MODULE_DESCRIPTION("Freescale MPC52xx PSC UART");
MODULE_LICENSE("GPL");
| linux-master | drivers/tty/serial/mpc52xx_uart.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the asynchronous serial interface (DUART) included
* in the BCM1250 and derived System-On-a-Chip (SOC) devices.
*
* Copyright (c) 2007 Maciej W. Rozycki
*
* Derived from drivers/char/sb1250_duart.c for which the following
* copyright applies:
*
* Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
*
* References:
*
* "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
*/
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/types.h>
#include <linux/refcount.h>
#include <linux/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_uart.h>
#include <asm/sibyte/swarm.h>
#if defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
#define DUART_CHANREG_SPACING BCM1480_DUART_CHANREG_SPACING
#define R_DUART_IMRREG(line) R_BCM1480_DUART_IMRREG(line)
#define R_DUART_INCHREG(line) R_BCM1480_DUART_INCHREG(line)
#define R_DUART_ISRREG(line) R_BCM1480_DUART_ISRREG(line)
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
#define SBD_INT(line) (K_INT_UART_0 + (line))
#else
#error invalid SB1250 UART configuration
#endif
MODULE_AUTHOR("Maciej W. Rozycki <[email protected]>");
MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
MODULE_LICENSE("GPL");
#define DUART_MAX_CHIP 2
#define DUART_MAX_SIDE 2
/*
* Per-port state.
*/
struct sbd_port {
struct sbd_duart *duart;
struct uart_port port;
unsigned char __iomem *memctrl;
int tx_stopped;
int initialised;
};
/*
* Per-DUART state for the shared register space.
*/
struct sbd_duart {
struct sbd_port sport[2];
unsigned long mapctrl;
refcount_t map_guard;
};
#define to_sport(uport) container_of(uport, struct sbd_port, port)
static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
/*
* Reading and writing SB1250 DUART registers.
*
* There are three register spaces: two per-channel ones and
* a shared one. We have to define accessors appropriately.
* All registers are 64-bit and all but the Baud Rate Clock
* registers only define 8 least significant bits. There is
* also a workaround to take into account. Raw accessors use
* the full register width, but cooked ones truncate it
* intentionally so that the rest of the driver does not care.
*/
static u64 __read_sbdchn(struct sbd_port *sport, int reg)
{
void __iomem *csr = sport->port.membase + reg;
return __raw_readq(csr);
}
static u64 __read_sbdshr(struct sbd_port *sport, int reg)
{
void __iomem *csr = sport->memctrl + reg;
return __raw_readq(csr);
}
static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
{
void __iomem *csr = sport->port.membase + reg;
__raw_writeq(value, csr);
}
static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
{
void __iomem *csr = sport->memctrl + reg;
__raw_writeq(value, csr);
}
/*
* In bug 1956, we get glitches that can mess up uart registers. This
* "read-mode-reg after any register access" is an accepted workaround.
*/
static void __war_sbd1956(struct sbd_port *sport)
{
__read_sbdchn(sport, R_DUART_MODE_REG_1);
__read_sbdchn(sport, R_DUART_MODE_REG_2);
}
static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
{
unsigned char retval;
retval = __read_sbdchn(sport, reg);
if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
return retval;
}
static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
{
unsigned char retval;
retval = __read_sbdshr(sport, reg);
if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
return retval;
}
static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
{
__write_sbdchn(sport, reg, value);
if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
}
static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
{
__write_sbdshr(sport, reg, value);
if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
}
static int sbd_receive_ready(struct sbd_port *sport)
{
return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
}
static int sbd_receive_drain(struct sbd_port *sport)
{
int loops = 10000;
while (sbd_receive_ready(sport) && --loops)
read_sbdchn(sport, R_DUART_RX_HOLD);
return loops;
}
static int __maybe_unused sbd_transmit_ready(struct sbd_port *sport)
{
return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
}
static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport)
{
int loops = 10000;
while (!sbd_transmit_ready(sport) && --loops)
udelay(2);
return loops;
}
static int sbd_transmit_empty(struct sbd_port *sport)
{
return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
}
static int sbd_line_drain(struct sbd_port *sport)
{
int loops = 10000;
while (!sbd_transmit_empty(sport) && --loops)
udelay(2);
return loops;
}
static unsigned int sbd_tx_empty(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
}
static unsigned int sbd_get_mctrl(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mctrl, status;
status = read_sbdshr(sport, R_DUART_IN_PORT);
status >>= (uport->line) % 2;
mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
(!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
(!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
(!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
return mctrl;
}
static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
{
struct sbd_port *sport = to_sport(uport);
unsigned int clr = 0, set = 0, mode2;
if (mctrl & TIOCM_DTR)
set |= M_DUART_SET_OPR2;
else
clr |= M_DUART_CLR_OPR2;
if (mctrl & TIOCM_RTS)
set |= M_DUART_SET_OPR0;
else
clr |= M_DUART_CLR_OPR0;
clr <<= (uport->line) % 2;
set <<= (uport->line) % 2;
mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
mode2 &= ~M_DUART_CHAN_MODE;
if (mctrl & TIOCM_LOOP)
mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
else
mode2 |= V_DUART_CHAN_MODE_NORMAL;
write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
write_sbdshr(sport, R_DUART_SET_OPR, set);
write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
}
static void sbd_stop_tx(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
sport->tx_stopped = 1;
};
static void sbd_start_tx(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mask;
/* Enable tx interrupts. */
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
mask |= M_DUART_IMR_TX;
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
/* Go!, go!, go!... */
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
sport->tx_stopped = 0;
};
static void sbd_stop_rx(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
};
static void sbd_enable_ms(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
write_sbdchn(sport, R_DUART_AUXCTL_X,
M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
}
static void sbd_break_ctl(struct uart_port *uport, int break_state)
{
struct sbd_port *sport = to_sport(uport);
if (break_state == -1)
write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
else
write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
}
static void sbd_receive_chars(struct sbd_port *sport)
{
struct uart_port *uport = &sport->port;
struct uart_icount *icount;
unsigned int status;
int count;
u8 ch, flag;
for (count = 16; count; count--) {
status = read_sbdchn(sport, R_DUART_STATUS);
if (!(status & M_DUART_RX_RDY))
break;
ch = read_sbdchn(sport, R_DUART_RX_HOLD);
flag = TTY_NORMAL;
icount = &uport->icount;
icount->rx++;
if (unlikely(status &
(M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
if (status & M_DUART_RCVD_BRK) {
icount->brk++;
if (uart_handle_break(uport))
continue;
} else if (status & M_DUART_FRM_ERR)
icount->frame++;
else if (status & M_DUART_PARITY_ERR)
icount->parity++;
if (status & M_DUART_OVRUN_ERR)
icount->overrun++;
status &= uport->read_status_mask;
if (status & M_DUART_RCVD_BRK)
flag = TTY_BREAK;
else if (status & M_DUART_FRM_ERR)
flag = TTY_FRAME;
else if (status & M_DUART_PARITY_ERR)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(uport, ch))
continue;
uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
}
tty_flip_buffer_push(&uport->state->port);
}
static void sbd_transmit_chars(struct sbd_port *sport)
{
struct uart_port *uport = &sport->port;
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned int mask;
int stop_tx;
/* XON/XOFF chars. */
if (sport->port.x_char) {
write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
sport->port.icount.tx++;
sport->port.x_char = 0;
return;
}
/* If nothing to do or stopped or hardware stopped. */
stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
/* Send char. */
if (!stop_tx) {
write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
uart_xmit_advance(&sport->port, 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
}
/* Are we are done? */
if (stop_tx || uart_circ_empty(xmit)) {
/* Disable tx interrupts. */
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
mask &= ~M_DUART_IMR_TX;
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
}
}
static void sbd_status_handle(struct sbd_port *sport)
{
struct uart_port *uport = &sport->port;
unsigned int delta;
delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
delta >>= (uport->line) % 2;
if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
uport->icount.dsr++;
if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
S_DUART_IN_PIN_CHNG))
wake_up_interruptible(&uport->state->port.delta_msr_wait);
}
static irqreturn_t sbd_interrupt(int irq, void *dev_id)
{
struct sbd_port *sport = dev_id;
struct uart_port *uport = &sport->port;
irqreturn_t status = IRQ_NONE;
unsigned int intstat;
int count;
for (count = 16; count; count--) {
intstat = read_sbdshr(sport,
R_DUART_ISRREG((uport->line) % 2));
intstat &= read_sbdshr(sport,
R_DUART_IMRREG((uport->line) % 2));
intstat &= M_DUART_ISR_ALL;
if (!intstat)
break;
if (intstat & M_DUART_ISR_RX)
sbd_receive_chars(sport);
if (intstat & M_DUART_ISR_IN)
sbd_status_handle(sport);
if (intstat & M_DUART_ISR_TX)
sbd_transmit_chars(sport);
status = IRQ_HANDLED;
}
return status;
}
static int sbd_startup(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mode1;
int ret;
ret = request_irq(sport->port.irq, sbd_interrupt,
IRQF_SHARED, "sb1250-duart", sport);
if (ret)
return ret;
/* Clear the receive FIFO. */
sbd_receive_drain(sport);
/* Clear the interrupt registers. */
write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
/* Set rx/tx interrupt to FIFO available. */
mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
/* Disable tx, enable rx. */
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
sport->tx_stopped = 1;
/* Enable interrupts. */
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
M_DUART_IMR_IN | M_DUART_IMR_RX);
return 0;
}
static void sbd_shutdown(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
sport->tx_stopped = 1;
free_irq(sport->port.irq, sport);
}
static void sbd_init_port(struct sbd_port *sport)
{
struct uart_port *uport = &sport->port;
if (sport->initialised)
return;
/* There is no DUART reset feature, so just set some sane defaults. */
write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
write_sbdchn(sport, R_DUART_FULL_CTL,
V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
write_sbdchn(sport, R_DUART_OPCR_X, 0);
write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
sport->initialised = 1;
}
static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
const struct ktermios *old_termios)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mode1 = 0, mode2 = 0, aux = 0;
unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
unsigned int oldmode1, oldmode2, oldaux;
unsigned int baud, brg;
unsigned int command;
mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
M_DUART_BITS_PER_CHAR);
mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
auxmask |= ~M_DUART_CTS_CHNG_ENA;
/* Byte size. */
switch (termios->c_cflag & CSIZE) {
case CS5:
case CS6:
/* Unsupported, leave unchanged. */
mode1mask |= M_DUART_PARITY_MODE;
break;
case CS7:
mode1 |= V_DUART_BITS_PER_CHAR_7;
break;
case CS8:
default:
mode1 |= V_DUART_BITS_PER_CHAR_8;
break;
}
/* Parity and stop bits. */
if (termios->c_cflag & CSTOPB)
mode2 |= M_DUART_STOP_BIT_LEN_2;
else
mode2 |= M_DUART_STOP_BIT_LEN_1;
if (termios->c_cflag & PARENB)
mode1 |= V_DUART_PARITY_MODE_ADD;
else
mode1 |= V_DUART_PARITY_MODE_NONE;
if (termios->c_cflag & PARODD)
mode1 |= M_DUART_PARITY_TYPE_ODD;
else
mode1 |= M_DUART_PARITY_TYPE_EVEN;
baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
brg = V_DUART_BAUD_RATE(baud);
/* The actual lower bound is 1221bps, so compensate. */
if (brg > M_DUART_CLK_COUNTER)
brg = M_DUART_CLK_COUNTER;
uart_update_timeout(uport, termios->c_cflag, baud);
uport->read_status_mask = M_DUART_OVRUN_ERR;
if (termios->c_iflag & INPCK)
uport->read_status_mask |= M_DUART_FRM_ERR |
M_DUART_PARITY_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
uport->read_status_mask |= M_DUART_RCVD_BRK;
uport->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
uport->ignore_status_mask |= M_DUART_FRM_ERR |
M_DUART_PARITY_ERR;
if (termios->c_iflag & IGNBRK) {
uport->ignore_status_mask |= M_DUART_RCVD_BRK;
if (termios->c_iflag & IGNPAR)
uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
}
if (termios->c_cflag & CREAD)
command = M_DUART_RX_EN;
else
command = M_DUART_RX_DIS;
if (termios->c_cflag & CRTSCTS)
aux |= M_DUART_CTS_CHNG_ENA;
else
aux &= ~M_DUART_CTS_CHNG_ENA;
spin_lock(&uport->lock);
if (sport->tx_stopped)
command |= M_DUART_TX_DIS;
else
command |= M_DUART_TX_EN;
oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
if (!sport->tx_stopped)
sbd_line_drain(sport);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
write_sbdchn(sport, R_DUART_CLK_SEL, brg);
write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
write_sbdchn(sport, R_DUART_CMD, command);
spin_unlock(&uport->lock);
}
static const char *sbd_type(struct uart_port *uport)
{
return "SB1250 DUART";
}
static void sbd_release_port(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
struct sbd_duart *duart = sport->duart;
iounmap(sport->memctrl);
sport->memctrl = NULL;
iounmap(uport->membase);
uport->membase = NULL;
if(refcount_dec_and_test(&duart->map_guard))
release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
}
static int sbd_map_port(struct uart_port *uport)
{
const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
struct sbd_port *sport = to_sport(uport);
struct sbd_duart *duart = sport->duart;
if (!uport->membase)
uport->membase = ioremap(uport->mapbase,
DUART_CHANREG_SPACING);
if (!uport->membase) {
printk(err);
return -ENOMEM;
}
if (!sport->memctrl)
sport->memctrl = ioremap(duart->mapctrl,
DUART_CHANREG_SPACING);
if (!sport->memctrl) {
printk(err);
iounmap(uport->membase);
uport->membase = NULL;
return -ENOMEM;
}
return 0;
}
static int sbd_request_port(struct uart_port *uport)
{
const char *err = KERN_ERR "sbd: Unable to reserve MMIO resource\n";
struct sbd_duart *duart = to_sport(uport)->duart;
int ret = 0;
if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
"sb1250-duart")) {
printk(err);
return -EBUSY;
}
refcount_inc(&duart->map_guard);
if (refcount_read(&duart->map_guard) == 1) {
if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
"sb1250-duart")) {
refcount_dec(&duart->map_guard);
printk(err);
ret = -EBUSY;
}
}
if (!ret) {
ret = sbd_map_port(uport);
if (ret) {
if (refcount_dec_and_test(&duart->map_guard))
release_mem_region(duart->mapctrl,
DUART_CHANREG_SPACING);
}
}
if (ret) {
release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
return ret;
}
return 0;
}
static void sbd_config_port(struct uart_port *uport, int flags)
{
struct sbd_port *sport = to_sport(uport);
if (flags & UART_CONFIG_TYPE) {
if (sbd_request_port(uport))
return;
uport->type = PORT_SB1250_DUART;
sbd_init_port(sport);
}
}
static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
ret = -EINVAL;
if (ser->irq != uport->irq)
ret = -EINVAL;
if (ser->baud_base != uport->uartclk / 16)
ret = -EINVAL;
return ret;
}
static const struct uart_ops sbd_ops = {
.tx_empty = sbd_tx_empty,
.set_mctrl = sbd_set_mctrl,
.get_mctrl = sbd_get_mctrl,
.stop_tx = sbd_stop_tx,
.start_tx = sbd_start_tx,
.stop_rx = sbd_stop_rx,
.enable_ms = sbd_enable_ms,
.break_ctl = sbd_break_ctl,
.startup = sbd_startup,
.shutdown = sbd_shutdown,
.set_termios = sbd_set_termios,
.type = sbd_type,
.release_port = sbd_release_port,
.request_port = sbd_request_port,
.config_port = sbd_config_port,
.verify_port = sbd_verify_port,
};
/* Initialize SB1250 DUART port structures. */
static void __init sbd_probe_duarts(void)
{
static int probed;
int chip, side;
int max_lines, line;
if (probed)
return;
/* Set the number of available units based on the SOC type. */
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1x55:
case K_SYS_SOC_TYPE_BCM1x80:
max_lines = 4;
break;
default:
/* Assume at least two serial ports at the normal address. */
max_lines = 2;
break;
}
probed = 1;
for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
chip++) {
sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
side++, line++) {
struct sbd_port *sport = &sbd_duarts[chip].sport[side];
struct uart_port *uport = &sport->port;
sport->duart = &sbd_duarts[chip];
uport->irq = SBD_INT(line);
uport->uartclk = 100000000 / 20 * 16;
uport->fifosize = 16;
uport->iotype = UPIO_MEM;
uport->flags = UPF_BOOT_AUTOCONF;
uport->ops = &sbd_ops;
uport->line = line;
uport->mapbase = SBD_CHANREGS(line);
uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SB1250_DUART_CONSOLE);
}
}
}
#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
/*
* Serial console stuff. Very basic, polling driver for doing serial
* console output. The console_lock is held by the caller, so we
* shouldn't be interrupted for more console activity.
*/
static void sbd_console_putchar(struct uart_port *uport, unsigned char ch)
{
struct sbd_port *sport = to_sport(uport);
sbd_transmit_drain(sport);
write_sbdchn(sport, R_DUART_TX_HOLD, ch);
}
static void sbd_console_write(struct console *co, const char *s,
unsigned int count)
{
int chip = co->index / DUART_MAX_SIDE;
int side = co->index % DUART_MAX_SIDE;
struct sbd_port *sport = &sbd_duarts[chip].sport[side];
struct uart_port *uport = &sport->port;
unsigned long flags;
unsigned int mask;
/* Disable transmit interrupts and enable the transmitter. */
spin_lock_irqsave(&uport->lock, flags);
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
mask & ~M_DUART_IMR_TX);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
spin_unlock_irqrestore(&uport->lock, flags);
uart_console_write(&sport->port, s, count, sbd_console_putchar);
/* Restore transmit interrupts and the transmitter enable. */
spin_lock_irqsave(&uport->lock, flags);
sbd_line_drain(sport);
if (sport->tx_stopped)
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
spin_unlock_irqrestore(&uport->lock, flags);
}
static int __init sbd_console_setup(struct console *co, char *options)
{
int chip = co->index / DUART_MAX_SIDE;
int side = co->index % DUART_MAX_SIDE;
struct sbd_port *sport = &sbd_duarts[chip].sport[side];
struct uart_port *uport = &sport->port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
if (!sport->duart)
return -ENXIO;
ret = sbd_map_port(uport);
if (ret)
return ret;
sbd_init_port(sport);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(uport, co, baud, parity, bits, flow);
}
static struct uart_driver sbd_reg;
static struct console sbd_console = {
.name = "duart",
.write = sbd_console_write,
.device = uart_console_device,
.setup = sbd_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sbd_reg
};
static int __init sbd_serial_console_init(void)
{
sbd_probe_duarts();
register_console(&sbd_console);
return 0;
}
console_initcall(sbd_serial_console_init);
#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
#else
#define SERIAL_SB1250_DUART_CONSOLE NULL
#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
static struct uart_driver sbd_reg = {
.owner = THIS_MODULE,
.driver_name = "sb1250_duart",
.dev_name = "duart",
.major = TTY_MAJOR,
.minor = SB1250_DUART_MINOR_BASE,
.nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
.cons = SERIAL_SB1250_DUART_CONSOLE,
};
/* Set up the driver and register it. */
static int __init sbd_init(void)
{
int i, ret;
sbd_probe_duarts();
ret = uart_register_driver(&sbd_reg);
if (ret)
return ret;
for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
struct uart_port *uport = &sport->port;
if (sport->duart)
uart_add_one_port(&sbd_reg, uport);
}
return 0;
}
/* Unload the driver. Unregister stuff, get ready to go away. */
static void __exit sbd_exit(void)
{
int i;
for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
struct uart_port *uport = &sport->port;
if (sport->duart)
uart_remove_one_port(&sbd_reg, uport);
}
uart_unregister_driver(&sbd_reg);
}
module_init(sbd_init);
module_exit(sbd_exit);
| linux-master | drivers/tty/serial/sb1250-duart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KGDB NMI serial console
*
* Copyright 2010 Google, Inc.
* Arve Hjønnevåg <[email protected]>
* Colin Cross <[email protected]>
* Copyright 2012 Linaro Ltd.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/atomic.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/kfifo.h>
#include <linux/kgdb.h>
#include <linux/kdb.h>
static int kgdb_nmi_knock = 1;
module_param_named(knock, kgdb_nmi_knock, int, 0600);
MODULE_PARM_DESC(knock, "if set to 1 (default), the special '$3#33' command " \
"must be used to enter the debugger; when set to 0, " \
"hitting return key is enough to enter the debugger; " \
"when set to -1, the debugger is entered immediately " \
"upon NMI");
static char *kgdb_nmi_magic = "$3#33";
module_param_named(magic, kgdb_nmi_magic, charp, 0600);
MODULE_PARM_DESC(magic, "magic sequence to enter NMI debugger (default $3#33)");
static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0);
static int kgdb_nmi_console_setup(struct console *co, char *options)
{
arch_kgdb_ops.enable_nmi(1);
/* The NMI console uses the dbg_io_ops to issue console messages. To
* avoid duplicate messages during kdb sessions we must inform kdb's
* I/O utilities that messages sent to the console will automatically
* be displayed on the dbg_io.
*/
dbg_io_ops->cons = co;
return 0;
}
static void kgdb_nmi_console_write(struct console *co, const char *s, uint c)
{
int i;
for (i = 0; i < c; i++)
dbg_io_ops->write_char(s[i]);
}
static struct tty_driver *kgdb_nmi_tty_driver;
static struct tty_driver *kgdb_nmi_console_device(struct console *co, int *idx)
{
*idx = co->index;
return kgdb_nmi_tty_driver;
}
static struct console kgdb_nmi_console = {
.name = "ttyNMI",
.setup = kgdb_nmi_console_setup,
.write = kgdb_nmi_console_write,
.device = kgdb_nmi_console_device,
.flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
};
/*
* This is usually the maximum rate on debug ports. We make fifo large enough
* to make copy-pasting to the terminal usable.
*/
#define KGDB_NMI_BAUD 115200
#define KGDB_NMI_FIFO_SIZE roundup_pow_of_two(KGDB_NMI_BAUD / 8 / HZ)
struct kgdb_nmi_tty_priv {
struct tty_port port;
struct timer_list timer;
STRUCT_KFIFO(char, KGDB_NMI_FIFO_SIZE) fifo;
};
static struct tty_port *kgdb_nmi_port;
static void kgdb_tty_recv(int ch)
{
struct kgdb_nmi_tty_priv *priv;
char c = ch;
if (!kgdb_nmi_port || ch < 0)
return;
/*
* Can't use port->tty->driver_data as tty might be not there. Timer
* will check for tty and will get the ref, but here we don't have to
* do that, and actually, we can't: we're in NMI context, no locks are
* possible.
*/
priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port);
kfifo_in(&priv->fifo, &c, 1);
}
static int kgdb_nmi_poll_one_knock(void)
{
static int n;
int c;
const char *magic = kgdb_nmi_magic;
size_t m = strlen(magic);
bool printch = false;
c = dbg_io_ops->read_char();
if (c == NO_POLL_CHAR)
return c;
if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) {
return 1;
} else if (c == magic[n]) {
n = (n + 1) % m;
if (!n)
return 1;
printch = true;
} else {
n = 0;
}
if (atomic_read(&kgdb_nmi_num_readers)) {
kgdb_tty_recv(c);
return 0;
}
if (printch) {
kdb_printf("%c", c);
return 0;
}
kdb_printf("\r%s %s to enter the debugger> %*s",
kgdb_nmi_knock ? "Type" : "Hit",
kgdb_nmi_knock ? magic : "<return>", (int)m, "");
while (m--)
kdb_printf("\b");
return 0;
}
/**
* kgdb_nmi_poll_knock - Check if it is time to enter the debugger
*
* "Serial ports are often noisy, especially when muxed over another port (we
* often use serial over the headset connector). Noise on the async command
* line just causes characters that are ignored, on a command line that blocked
* execution noise would be catastrophic." -- Colin Cross
*
* So, this function implements KGDB/KDB knocking on the serial line: we won't
* enter the debugger until we receive a known magic phrase (which is actually
* "$3#33", known as "escape to KDB" command. There is also a relaxed variant
* of knocking, i.e. just pressing the return key is enough to enter the
* debugger. And if knocking is disabled, the function always returns 1.
*/
bool kgdb_nmi_poll_knock(void)
{
if (kgdb_nmi_knock < 0)
return true;
while (1) {
int ret;
ret = kgdb_nmi_poll_one_knock();
if (ret == NO_POLL_CHAR)
return false;
else if (ret == 1)
break;
}
return true;
}
/*
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
*/
static void kgdb_nmi_tty_receiver(struct timer_list *t)
{
struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
char ch;
priv->timer.expires = jiffies + (HZ/100);
add_timer(&priv->timer);
if (likely(!atomic_read(&kgdb_nmi_num_readers) ||
!kfifo_len(&priv->fifo)))
return;
while (kfifo_out(&priv->fifo, &ch, 1))
tty_insert_flip_char(&priv->port, ch, TTY_NORMAL);
tty_flip_buffer_push(&priv->port);
}
static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty)
{
struct kgdb_nmi_tty_priv *priv =
container_of(port, struct kgdb_nmi_tty_priv, port);
kgdb_nmi_port = port;
priv->timer.expires = jiffies + (HZ/100);
add_timer(&priv->timer);
return 0;
}
static void kgdb_nmi_tty_shutdown(struct tty_port *port)
{
struct kgdb_nmi_tty_priv *priv =
container_of(port, struct kgdb_nmi_tty_priv, port);
del_timer(&priv->timer);
kgdb_nmi_port = NULL;
}
static const struct tty_port_operations kgdb_nmi_tty_port_ops = {
.activate = kgdb_nmi_tty_activate,
.shutdown = kgdb_nmi_tty_shutdown,
};
static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
{
struct kgdb_nmi_tty_priv *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
INIT_KFIFO(priv->fifo);
timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
tty_port_init(&priv->port);
priv->port.ops = &kgdb_nmi_tty_port_ops;
tty->driver_data = priv;
ret = tty_port_install(&priv->port, drv, tty);
if (ret) {
pr_err("%s: can't install tty port: %d\n", __func__, ret);
goto err;
}
return 0;
err:
tty_port_destroy(&priv->port);
kfree(priv);
return ret;
}
static void kgdb_nmi_tty_cleanup(struct tty_struct *tty)
{
struct kgdb_nmi_tty_priv *priv = tty->driver_data;
tty->driver_data = NULL;
tty_port_destroy(&priv->port);
kfree(priv);
}
static int kgdb_nmi_tty_open(struct tty_struct *tty, struct file *file)
{
struct kgdb_nmi_tty_priv *priv = tty->driver_data;
unsigned int mode = file->f_flags & O_ACCMODE;
int ret;
ret = tty_port_open(&priv->port, tty, file);
if (!ret && (mode == O_RDONLY || mode == O_RDWR))
atomic_inc(&kgdb_nmi_num_readers);
return ret;
}
static void kgdb_nmi_tty_close(struct tty_struct *tty, struct file *file)
{
struct kgdb_nmi_tty_priv *priv = tty->driver_data;
unsigned int mode = file->f_flags & O_ACCMODE;
if (mode == O_RDONLY || mode == O_RDWR)
atomic_dec(&kgdb_nmi_num_readers);
tty_port_close(&priv->port, tty, file);
}
static void kgdb_nmi_tty_hangup(struct tty_struct *tty)
{
struct kgdb_nmi_tty_priv *priv = tty->driver_data;
tty_port_hangup(&priv->port);
}
static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty)
{
/* Actually, we can handle any amount as we use polled writes. */
return 2048;
}
static ssize_t kgdb_nmi_tty_write(struct tty_struct *tty, const u8 *buf,
size_t c)
{
int i;
for (i = 0; i < c; i++)
dbg_io_ops->write_char(buf[i]);
return c;
}
static const struct tty_operations kgdb_nmi_tty_ops = {
.open = kgdb_nmi_tty_open,
.close = kgdb_nmi_tty_close,
.install = kgdb_nmi_tty_install,
.cleanup = kgdb_nmi_tty_cleanup,
.hangup = kgdb_nmi_tty_hangup,
.write_room = kgdb_nmi_tty_write_room,
.write = kgdb_nmi_tty_write,
};
int kgdb_register_nmi_console(void)
{
int ret;
if (!arch_kgdb_ops.enable_nmi)
return 0;
kgdb_nmi_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
if (IS_ERR(kgdb_nmi_tty_driver)) {
pr_err("%s: cannot allocate tty\n", __func__);
return PTR_ERR(kgdb_nmi_tty_driver);
}
kgdb_nmi_tty_driver->driver_name = "ttyNMI";
kgdb_nmi_tty_driver->name = "ttyNMI";
kgdb_nmi_tty_driver->num = 1;
kgdb_nmi_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
kgdb_nmi_tty_driver->subtype = SERIAL_TYPE_NORMAL;
kgdb_nmi_tty_driver->init_termios = tty_std_termios;
tty_termios_encode_baud_rate(&kgdb_nmi_tty_driver->init_termios,
KGDB_NMI_BAUD, KGDB_NMI_BAUD);
tty_set_operations(kgdb_nmi_tty_driver, &kgdb_nmi_tty_ops);
ret = tty_register_driver(kgdb_nmi_tty_driver);
if (ret) {
pr_err("%s: can't register tty driver: %d\n", __func__, ret);
goto err_drv_reg;
}
register_console(&kgdb_nmi_console);
return 0;
err_drv_reg:
tty_driver_kref_put(kgdb_nmi_tty_driver);
return ret;
}
EXPORT_SYMBOL_GPL(kgdb_register_nmi_console);
int kgdb_unregister_nmi_console(void)
{
int ret;
if (!arch_kgdb_ops.enable_nmi)
return 0;
arch_kgdb_ops.enable_nmi(0);
ret = unregister_console(&kgdb_nmi_console);
if (ret)
return ret;
tty_unregister_driver(kgdb_nmi_tty_driver);
tty_driver_kref_put(kgdb_nmi_tty_driver);
return 0;
}
EXPORT_SYMBOL_GPL(kgdb_unregister_nmi_console);
| linux-master | drivers/tty/serial/kgdb_nmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* timbuart.c timberdale FPGA UART driver
* Copyright (c) 2009 Intel Corporation
*/
/* Supports:
* Timberdale FPGA UART
*/
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "timbuart.h"
struct timbuart_port {
struct uart_port port;
struct tasklet_struct tasklet;
int usedma;
u32 last_ier;
struct platform_device *dev;
};
static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
921600, 1843200, 3250000};
static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
static void timbuart_stop_rx(struct uart_port *port)
{
/* spin lock held by upper layer, disable all RX interrupts */
u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_stop_tx(struct uart_port *port)
{
/* spinlock held by upper layer, disable TX interrupt */
u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_start_tx(struct uart_port *port)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
/* do not transfer anything here -> fire off the tasklet */
tasklet_schedule(&uart->tasklet);
}
static unsigned int timbuart_tx_empty(struct uart_port *port)
{
u32 isr = ioread32(port->membase + TIMBUART_ISR);
return (isr & TXBE) ? TIOCSER_TEMT : 0;
}
static void timbuart_flush_buffer(struct uart_port *port)
{
if (!timbuart_tx_empty(port)) {
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
TIMBUART_CTRL_FLSHTX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
iowrite32(TXBF, port->membase + TIMBUART_ISR);
}
}
static void timbuart_rx_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
tty_insert_flip_char(tport, ch, TTY_NORMAL);
}
tty_flip_buffer_push(tport);
dev_dbg(port->dev, "%s - total read %d bytes\n",
__func__, port->icount.rx);
}
static void timbuart_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
!uart_circ_empty(xmit)) {
iowrite8(xmit->buf[xmit->tail],
port->membase + TIMBUART_TXFIFO);
uart_xmit_advance(port, 1);
}
dev_dbg(port->dev,
"%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
__func__,
port->icount.tx,
ioread8(port->membase + TIMBUART_CTRL),
port->mctrl & TIOCM_RTS,
ioread8(port->membase + TIMBUART_BAUDRATE));
}
static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
if (port->x_char)
return;
if (isr & TXFLAGS) {
timbuart_tx_chars(port);
/* clear all TX interrupts */
iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
} else
/* Re-enable any tx interrupt */
*ier |= uart->last_ier & TXFLAGS;
/* enable interrupts if there are chars in the transmit buffer,
* Or if we delivered some bytes and want the almost empty interrupt
* we wake up the upper layer later when we got the interrupt
* to give it some time to go out...
*/
if (!uart_circ_empty(xmit))
*ier |= TXBAE;
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
static void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
if (isr & RXBF) {
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
TIMBUART_CTRL_FLSHRX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
port->icount.overrun++;
} else if (isr & (RXDP))
timbuart_rx_chars(port);
/* ack all RX interrupts */
iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
}
/* always have the RX interrupts enabled */
*ier |= RXBAF | RXBF | RXTT;
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
static void timbuart_tasklet(struct tasklet_struct *t)
{
struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
u32 isr, ier = 0;
spin_lock(&uart->port.lock);
isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
if (!uart->usedma)
timbuart_handle_tx_port(&uart->port, isr, &ier);
timbuart_mctrl_check(&uart->port, isr, &ier);
if (!uart->usedma)
timbuart_handle_rx_port(&uart->port, isr, &ier);
iowrite32(ier, uart->port.membase + TIMBUART_IER);
spin_unlock(&uart->port.lock);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
}
static unsigned int timbuart_get_mctrl(struct uart_port *port)
{
u8 cts = ioread8(port->membase + TIMBUART_CTRL);
dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
if (cts & TIMBUART_CTRL_CTS)
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
else
return TIOCM_DSR | TIOCM_CAR;
}
static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
if (mctrl & TIOCM_RTS)
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
else
iowrite8(0, port->membase + TIMBUART_CTRL);
}
static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
{
unsigned int cts;
if (isr & CTS_DELTA) {
/* ack */
iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
cts = timbuart_get_mctrl(port);
uart_handle_cts_change(port, cts & TIOCM_CTS);
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
*ier |= CTS_DELTA;
}
static void timbuart_break_ctl(struct uart_port *port, int ctl)
{
/* N/A */
}
static int timbuart_startup(struct uart_port *port)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
iowrite32(0x1ff, port->membase + TIMBUART_ISR);
/* Enable all but TX interrupts */
iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
port->membase + TIMBUART_IER);
return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
"timb-uart", uart);
}
static void timbuart_shutdown(struct uart_port *port)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
iowrite32(0, port->membase + TIMBUART_IER);
timbuart_flush_buffer(port);
}
static int get_bindex(int baud)
{
int i;
for (i = 0; i < ARRAY_SIZE(baudrates); i++)
if (baud <= baudrates[i])
return i;
return -1;
}
static void timbuart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int baud;
short bindex;
unsigned long flags;
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
bindex = get_bindex(baud);
dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
if (bindex < 0)
bindex = 0;
baud = baudrates[bindex];
/* The serial layer calls into this once with old = NULL when setting
up initially */
if (old)
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
spin_lock_irqsave(&port->lock, flags);
iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *timbuart_type(struct uart_port *port)
{
return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
}
/* We do not request/release mappings of the registers here,
* currently it's done in the proble function.
*/
static void timbuart_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size =
resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
release_mem_region(port->mapbase, size);
}
static int timbuart_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size =
resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (!request_mem_region(port->mapbase, size, "timb-uart"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
return 0;
}
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
{
struct timbuart_port *uart = (struct timbuart_port *)devid;
if (ioread8(uart->port.membase + TIMBUART_IPR)) {
uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
/* disable interrupts, the tasklet enables them again */
iowrite32(0, uart->port.membase + TIMBUART_IER);
/* fire off bottom half */
tasklet_schedule(&uart->tasklet);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
/*
* Configure/autoconfigure the port.
*/
static void timbuart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_TIMBUART;
timbuart_request_port(port);
}
}
static int timbuart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
return -EINVAL;
}
static const struct uart_ops timbuart_ops = {
.tx_empty = timbuart_tx_empty,
.set_mctrl = timbuart_set_mctrl,
.get_mctrl = timbuart_get_mctrl,
.stop_tx = timbuart_stop_tx,
.start_tx = timbuart_start_tx,
.flush_buffer = timbuart_flush_buffer,
.stop_rx = timbuart_stop_rx,
.break_ctl = timbuart_break_ctl,
.startup = timbuart_startup,
.shutdown = timbuart_shutdown,
.set_termios = timbuart_set_termios,
.type = timbuart_type,
.release_port = timbuart_release_port,
.request_port = timbuart_request_port,
.config_port = timbuart_config_port,
.verify_port = timbuart_verify_port
};
static struct uart_driver timbuart_driver = {
.owner = THIS_MODULE,
.driver_name = "timberdale_uart",
.dev_name = "ttyTU",
.major = TIMBUART_MAJOR,
.minor = TIMBUART_MINOR,
.nr = 1
};
static int timbuart_probe(struct platform_device *dev)
{
int err, irq;
struct timbuart_port *uart;
struct resource *iomem;
dev_dbg(&dev->dev, "%s\n", __func__);
uart = kzalloc(sizeof(*uart), GFP_KERNEL);
if (!uart) {
err = -EINVAL;
goto err_mem;
}
uart->usedma = 0;
uart->port.uartclk = 3250000 * 16;
uart->port.fifosize = TIMBUART_FIFO_SIZE;
uart->port.regshift = 2;
uart->port.iotype = UPIO_MEM;
uart->port.ops = &timbuart_ops;
uart->port.irq = 0;
uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
uart->port.line = 0;
uart->port.dev = &dev->dev;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem) {
err = -ENOMEM;
goto err_register;
}
uart->port.mapbase = iomem->start;
uart->port.membase = NULL;
irq = platform_get_irq(dev, 0);
if (irq < 0) {
err = -EINVAL;
goto err_register;
}
uart->port.irq = irq;
tasklet_setup(&uart->tasklet, timbuart_tasklet);
err = uart_register_driver(&timbuart_driver);
if (err)
goto err_register;
err = uart_add_one_port(&timbuart_driver, &uart->port);
if (err)
goto err_add_port;
platform_set_drvdata(dev, uart);
return 0;
err_add_port:
uart_unregister_driver(&timbuart_driver);
err_register:
kfree(uart);
err_mem:
printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
err);
return err;
}
static int timbuart_remove(struct platform_device *dev)
{
struct timbuart_port *uart = platform_get_drvdata(dev);
tasklet_kill(&uart->tasklet);
uart_remove_one_port(&timbuart_driver, &uart->port);
uart_unregister_driver(&timbuart_driver);
kfree(uart);
return 0;
}
static struct platform_driver timbuart_platform_driver = {
.driver = {
.name = "timb-uart",
},
.probe = timbuart_probe,
.remove = timbuart_remove,
};
module_platform_driver(timbuart_platform_driver);
MODULE_DESCRIPTION("Timberdale UART driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:timb-uart");
| linux-master | drivers/tty/serial/timbuart.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.