Message ID | 1594966857-5215-3-git-send-email-sumit.garg@linaro.org |
---|---|
State | New |
Headers | show |
Series | Introduce NMI aware serial drivers | expand |
On Fri, Jul 17, 2020 at 11:50:55AM +0530, Sumit Garg wrote: > With the advent of pseudo NMIs on arm64 platforms, it will be possible > to have NMI driven serial drivers which enables us to have magic sysrq > running in NMI context. So add NMI framework APIs in serial core that > can be leveraged by serial drivers operating in polling mode to have > NMI driven serial transfers. As previously, the whole "with the advent of pseudo NMI" isn't really very important to what the patch actually does. Also do we need to mention polling mode here. It is something of a distraction if only because this framework is largely here to support the non-polled features of the serial driver. > The general idea is to intercept RX characters in NMI context, if those > are specific to magic sysrq then allow corresponding handler to run in > NMI context. Otherwise defer all NMI unsafe RX and TX operations to IRQ ^^^^^^^^^^^^ > work queue in order to run those in normal interrupt context. Perhaps remove "NMI unsafe" here. The deciding factor on what to run in NMI mode is not whether or not something is NMI safe. Instead we follow the principle that if something does not *need* to run from NMI then don't run it from NMI. > Also, since magic sysrq entry APIs can be invoked from NMI context, so > make those APIs NMI safe via deferring NMI unsafe work to IRQ work queue. s/can/will need to/ for much the same reasons. Daniel. > > Signed-off-by: Sumit Garg <sumit.garg@linaro.org> > --- > drivers/tty/serial/serial_core.c | 120 ++++++++++++++++++++++++++++++++++++++- > include/linux/serial_core.h | 67 ++++++++++++++++++++++ > 2 files changed, 185 insertions(+), 2 deletions(-) > > diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c > index 57840cf..6342e90 100644 > --- a/drivers/tty/serial/serial_core.c > +++ b/drivers/tty/serial/serial_core.c > @@ -3181,8 +3181,14 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) > return true; > } > > +#ifdef CONFIG_CONSOLE_POLL > + if (in_nmi()) > + irq_work_queue(&port->nmi_state.sysrq_toggle_work); > + else > + schedule_work(&sysrq_enable_work); > +#else > schedule_work(&sysrq_enable_work); > - > +#endif > port->sysrq = 0; > return true; > } > @@ -3273,12 +3279,122 @@ int uart_handle_break(struct uart_port *port) > port->sysrq = 0; > } > > - if (port->flags & UPF_SAK) > + if (port->flags & UPF_SAK) { > +#ifdef CONFIG_CONSOLE_POLL > + if (in_nmi()) > + irq_work_queue(&port->nmi_state.sysrq_sak_work); > + else > + do_SAK(state->port.tty); > +#else > do_SAK(state->port.tty); > +#endif > + } > return 0; > } > EXPORT_SYMBOL_GPL(uart_handle_break); > > +#ifdef CONFIG_CONSOLE_POLL > +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, > + unsigned int overrun, unsigned int ch, > + unsigned int flag) > +{ > + struct uart_nmi_rx_data rx_data; > + > + if (!in_nmi()) > + return 0; > + > + rx_data.status = status; > + rx_data.overrun = overrun; > + rx_data.ch = ch; > + rx_data.flag = flag; > + > + if (!kfifo_in(&port->nmi_state.rx_fifo, &rx_data, 1)) > + ++port->icount.buf_overrun; > + > + return 1; > +} > +EXPORT_SYMBOL_GPL(uart_nmi_handle_char); > + > +static void uart_nmi_rx_work(struct irq_work *rx_work) > +{ > + struct uart_nmi_state *nmi_state = > + container_of(rx_work, struct uart_nmi_state, rx_work); > + struct uart_port *port = > + container_of(nmi_state, struct uart_port, nmi_state); > + struct uart_nmi_rx_data rx_data; > + > + /* > + * In polling mode, serial device is initialized much prior to > + * TTY port becoming active. This scenario is especially useful > + * from debugging perspective such that magic sysrq or debugger > + * entry would still be possible even when TTY port isn't > + * active (consider a boot hang case or if a user hasn't opened > + * the serial port). So we discard any other RX data apart from > + * magic sysrq commands in case TTY port isn't active. > + */ > + if (!port->state || !tty_port_active(&port->state->port)) { > + kfifo_reset(&nmi_state->rx_fifo); > + return; > + } > + > + spin_lock(&port->lock); > + while (kfifo_out(&nmi_state->rx_fifo, &rx_data, 1)) > + uart_insert_char(port, rx_data.status, rx_data.overrun, > + rx_data.ch, rx_data.flag); > + spin_unlock(&port->lock); > + > + tty_flip_buffer_push(&port->state->port); > +} > + > +static void uart_nmi_tx_work(struct irq_work *tx_work) > +{ > + struct uart_nmi_state *nmi_state = > + container_of(tx_work, struct uart_nmi_state, tx_work); > + struct uart_port *port = > + container_of(nmi_state, struct uart_port, nmi_state); > + > + spin_lock(&port->lock); > + if (nmi_state->tx_irq_callback) > + nmi_state->tx_irq_callback(port); > + spin_unlock(&port->lock); > +} > + > +static void uart_nmi_sak_work(struct irq_work *work) > +{ > + struct uart_nmi_state *nmi_state = > + container_of(work, struct uart_nmi_state, sysrq_sak_work); > + struct uart_port *port = > + container_of(nmi_state, struct uart_port, nmi_state); > + > + do_SAK(port->state->port.tty); > +} > + > +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL > +static void uart_nmi_toggle_work(struct irq_work *work) > +{ > + schedule_work(&sysrq_enable_work); > +} > +#endif > + > +int uart_nmi_state_init(struct uart_port *port) > +{ > + int ret; > + > + ret = kfifo_alloc(&port->nmi_state.rx_fifo, 256, GFP_KERNEL); > + if (ret) > + return ret; > + > + init_irq_work(&port->nmi_state.rx_work, uart_nmi_rx_work); > + init_irq_work(&port->nmi_state.tx_work, uart_nmi_tx_work); > + init_irq_work(&port->nmi_state.sysrq_sak_work, uart_nmi_sak_work); > +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL > + init_irq_work(&port->nmi_state.sysrq_toggle_work, uart_nmi_toggle_work); > +#endif > + return ret; > +} > +EXPORT_SYMBOL_GPL(uart_nmi_state_init); > +#endif > + > EXPORT_SYMBOL(uart_write_wakeup); > EXPORT_SYMBOL(uart_register_driver); > EXPORT_SYMBOL(uart_unregister_driver); > diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h > index 9fd550e..84487a9 100644 > --- a/include/linux/serial_core.h > +++ b/include/linux/serial_core.h > @@ -18,6 +18,8 @@ > #include <linux/tty.h> > #include <linux/mutex.h> > #include <linux/sysrq.h> > +#include <linux/irq_work.h> > +#include <linux/kfifo.h> > #include <uapi/linux/serial_core.h> > > #ifdef CONFIG_SERIAL_CORE_CONSOLE > @@ -103,6 +105,28 @@ struct uart_icount { > typedef unsigned int __bitwise upf_t; > typedef unsigned int __bitwise upstat_t; > > +#ifdef CONFIG_CONSOLE_POLL > +struct uart_nmi_rx_data { > + unsigned int status; > + unsigned int overrun; > + unsigned int ch; > + unsigned int flag; > +}; > + > +struct uart_nmi_state { > + bool active; > + > + struct irq_work tx_work; > + void (*tx_irq_callback)(struct uart_port *port); > + > + struct irq_work rx_work; > + DECLARE_KFIFO_PTR(rx_fifo, struct uart_nmi_rx_data); > + > + struct irq_work sysrq_sak_work; > + struct irq_work sysrq_toggle_work; > +}; > +#endif > + > struct uart_port { > spinlock_t lock; /* port lock */ > unsigned long iobase; /* in/out[bwl] */ > @@ -255,6 +279,9 @@ struct uart_port { > struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ > struct serial_iso7816 iso7816; > void *private_data; /* generic platform data pointer */ > +#ifdef CONFIG_CONSOLE_POLL > + struct uart_nmi_state nmi_state; > +#endif > }; > > static inline int serial_port_in(struct uart_port *up, int offset) > @@ -475,4 +502,44 @@ extern int uart_handle_break(struct uart_port *port); > !((cflag) & CLOCAL)) > > int uart_get_rs485_mode(struct uart_port *port); > + > +/* > + * The following are helper functions for the NMI aware serial drivers. > + * Currently NMI support is only enabled under polling mode. > + */ > + > +#ifdef CONFIG_CONSOLE_POLL > +int uart_nmi_state_init(struct uart_port *port); > +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, > + unsigned int overrun, unsigned int ch, > + unsigned int flag); > + > +static inline bool uart_nmi_active(struct uart_port *port) > +{ > + return port->nmi_state.active; > +} > + > +static inline void uart_set_nmi_active(struct uart_port *port, bool val) > +{ > + port->nmi_state.active = val; > +} > +#else > +static inline int uart_nmi_handle_char(struct uart_port *port, > + unsigned int status, > + unsigned int overrun, > + unsigned int ch, unsigned int flag) > +{ > + return 0; > +} > + > +static inline bool uart_nmi_active(struct uart_port *port) > +{ > + return false; > +} > + > +static inline void uart_set_nmi_active(struct uart_port *port, bool val) > +{ > +} > +#endif > + > #endif /* LINUX_SERIAL_CORE_H */ > -- > 2.7.4
On Mon, 20 Jul 2020 at 19:07, Daniel Thompson <daniel.thompson@linaro.org> wrote: > > On Fri, Jul 17, 2020 at 11:50:55AM +0530, Sumit Garg wrote: > > With the advent of pseudo NMIs on arm64 platforms, it will be possible > > to have NMI driven serial drivers which enables us to have magic sysrq > > running in NMI context. So add NMI framework APIs in serial core that > > can be leveraged by serial drivers operating in polling mode to have > > NMI driven serial transfers. > > As previously, the whole "with the advent of pseudo NMI" isn't really > very important to what the patch actually does. Okay will get rid of this text. > > Also do we need to mention polling mode here. It is something of a > distraction if only because this framework is largely here to support > the non-polled features of the serial driver. > Okay will remove mention of polling mode and instead add following para to reflect why we have to put it under CONFIG_CONSOLE_POLL: NMI framework APIs are kept under CONFIG_CONSOLE_POLL as currently kgdb operating in polling mode is the only user to enable NMI driven serial drivers. > > > The general idea is to intercept RX characters in NMI context, if those > > are specific to magic sysrq then allow corresponding handler to run in > > NMI context. Otherwise defer all NMI unsafe RX and TX operations to IRQ > ^^^^^^^^^^^^ > > work queue in order to run those in normal interrupt context. > > Perhaps remove "NMI unsafe" here. > > The deciding factor on what to run in NMI mode is not whether or not > something is NMI safe. Instead we follow the principle that if > something does not *need* to run from NMI then don't run it from NMI. > Okay. > > > Also, since magic sysrq entry APIs can be invoked from NMI context, so > > make those APIs NMI safe via deferring NMI unsafe work to IRQ work queue. > > s/can/will need to/ for much the same reasons. > Okay. -Sumit > > Daniel. > > > > > > Signed-off-by: Sumit Garg <sumit.garg@linaro.org> > > --- > > drivers/tty/serial/serial_core.c | 120 ++++++++++++++++++++++++++++++++++++++- > > include/linux/serial_core.h | 67 ++++++++++++++++++++++ > > 2 files changed, 185 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c > > index 57840cf..6342e90 100644 > > --- a/drivers/tty/serial/serial_core.c > > +++ b/drivers/tty/serial/serial_core.c > > @@ -3181,8 +3181,14 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) > > return true; > > } > > > > +#ifdef CONFIG_CONSOLE_POLL > > + if (in_nmi()) > > + irq_work_queue(&port->nmi_state.sysrq_toggle_work); > > + else > > + schedule_work(&sysrq_enable_work); > > +#else > > schedule_work(&sysrq_enable_work); > > - > > +#endif > > port->sysrq = 0; > > return true; > > } > > @@ -3273,12 +3279,122 @@ int uart_handle_break(struct uart_port *port) > > port->sysrq = 0; > > } > > > > - if (port->flags & UPF_SAK) > > + if (port->flags & UPF_SAK) { > > +#ifdef CONFIG_CONSOLE_POLL > > + if (in_nmi()) > > + irq_work_queue(&port->nmi_state.sysrq_sak_work); > > + else > > + do_SAK(state->port.tty); > > +#else > > do_SAK(state->port.tty); > > +#endif > > + } > > return 0; > > } > > EXPORT_SYMBOL_GPL(uart_handle_break); > > > > +#ifdef CONFIG_CONSOLE_POLL > > +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, > > + unsigned int overrun, unsigned int ch, > > + unsigned int flag) > > +{ > > + struct uart_nmi_rx_data rx_data; > > + > > + if (!in_nmi()) > > + return 0; > > + > > + rx_data.status = status; > > + rx_data.overrun = overrun; > > + rx_data.ch = ch; > > + rx_data.flag = flag; > > + > > + if (!kfifo_in(&port->nmi_state.rx_fifo, &rx_data, 1)) > > + ++port->icount.buf_overrun; > > + > > + return 1; > > +} > > +EXPORT_SYMBOL_GPL(uart_nmi_handle_char); > > + > > +static void uart_nmi_rx_work(struct irq_work *rx_work) > > +{ > > + struct uart_nmi_state *nmi_state = > > + container_of(rx_work, struct uart_nmi_state, rx_work); > > + struct uart_port *port = > > + container_of(nmi_state, struct uart_port, nmi_state); > > + struct uart_nmi_rx_data rx_data; > > + > > + /* > > + * In polling mode, serial device is initialized much prior to > > + * TTY port becoming active. This scenario is especially useful > > + * from debugging perspective such that magic sysrq or debugger > > + * entry would still be possible even when TTY port isn't > > + * active (consider a boot hang case or if a user hasn't opened > > + * the serial port). So we discard any other RX data apart from > > + * magic sysrq commands in case TTY port isn't active. > > + */ > > + if (!port->state || !tty_port_active(&port->state->port)) { > > + kfifo_reset(&nmi_state->rx_fifo); > > + return; > > + } > > + > > + spin_lock(&port->lock); > > + while (kfifo_out(&nmi_state->rx_fifo, &rx_data, 1)) > > + uart_insert_char(port, rx_data.status, rx_data.overrun, > > + rx_data.ch, rx_data.flag); > > + spin_unlock(&port->lock); > > + > > + tty_flip_buffer_push(&port->state->port); > > +} > > + > > +static void uart_nmi_tx_work(struct irq_work *tx_work) > > +{ > > + struct uart_nmi_state *nmi_state = > > + container_of(tx_work, struct uart_nmi_state, tx_work); > > + struct uart_port *port = > > + container_of(nmi_state, struct uart_port, nmi_state); > > + > > + spin_lock(&port->lock); > > + if (nmi_state->tx_irq_callback) > > + nmi_state->tx_irq_callback(port); > > + spin_unlock(&port->lock); > > +} > > + > > +static void uart_nmi_sak_work(struct irq_work *work) > > +{ > > + struct uart_nmi_state *nmi_state = > > + container_of(work, struct uart_nmi_state, sysrq_sak_work); > > + struct uart_port *port = > > + container_of(nmi_state, struct uart_port, nmi_state); > > + > > + do_SAK(port->state->port.tty); > > +} > > + > > +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL > > +static void uart_nmi_toggle_work(struct irq_work *work) > > +{ > > + schedule_work(&sysrq_enable_work); > > +} > > +#endif > > + > > +int uart_nmi_state_init(struct uart_port *port) > > +{ > > + int ret; > > + > > + ret = kfifo_alloc(&port->nmi_state.rx_fifo, 256, GFP_KERNEL); > > + if (ret) > > + return ret; > > + > > + init_irq_work(&port->nmi_state.rx_work, uart_nmi_rx_work); > > + init_irq_work(&port->nmi_state.tx_work, uart_nmi_tx_work); > > + init_irq_work(&port->nmi_state.sysrq_sak_work, uart_nmi_sak_work); > > +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL > > + init_irq_work(&port->nmi_state.sysrq_toggle_work, uart_nmi_toggle_work); > > +#endif > > + return ret; > > +} > > +EXPORT_SYMBOL_GPL(uart_nmi_state_init); > > +#endif > > + > > EXPORT_SYMBOL(uart_write_wakeup); > > EXPORT_SYMBOL(uart_register_driver); > > EXPORT_SYMBOL(uart_unregister_driver); > > diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h > > index 9fd550e..84487a9 100644 > > --- a/include/linux/serial_core.h > > +++ b/include/linux/serial_core.h > > @@ -18,6 +18,8 @@ > > #include <linux/tty.h> > > #include <linux/mutex.h> > > #include <linux/sysrq.h> > > +#include <linux/irq_work.h> > > +#include <linux/kfifo.h> > > #include <uapi/linux/serial_core.h> > > > > #ifdef CONFIG_SERIAL_CORE_CONSOLE > > @@ -103,6 +105,28 @@ struct uart_icount { > > typedef unsigned int __bitwise upf_t; > > typedef unsigned int __bitwise upstat_t; > > > > +#ifdef CONFIG_CONSOLE_POLL > > +struct uart_nmi_rx_data { > > + unsigned int status; > > + unsigned int overrun; > > + unsigned int ch; > > + unsigned int flag; > > +}; > > + > > +struct uart_nmi_state { > > + bool active; > > + > > + struct irq_work tx_work; > > + void (*tx_irq_callback)(struct uart_port *port); > > + > > + struct irq_work rx_work; > > + DECLARE_KFIFO_PTR(rx_fifo, struct uart_nmi_rx_data); > > + > > + struct irq_work sysrq_sak_work; > > + struct irq_work sysrq_toggle_work; > > +}; > > +#endif > > + > > struct uart_port { > > spinlock_t lock; /* port lock */ > > unsigned long iobase; /* in/out[bwl] */ > > @@ -255,6 +279,9 @@ struct uart_port { > > struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ > > struct serial_iso7816 iso7816; > > void *private_data; /* generic platform data pointer */ > > +#ifdef CONFIG_CONSOLE_POLL > > + struct uart_nmi_state nmi_state; > > +#endif > > }; > > > > static inline int serial_port_in(struct uart_port *up, int offset) > > @@ -475,4 +502,44 @@ extern int uart_handle_break(struct uart_port *port); > > !((cflag) & CLOCAL)) > > > > int uart_get_rs485_mode(struct uart_port *port); > > + > > +/* > > + * The following are helper functions for the NMI aware serial drivers. > > + * Currently NMI support is only enabled under polling mode. > > + */ > > + > > +#ifdef CONFIG_CONSOLE_POLL > > +int uart_nmi_state_init(struct uart_port *port); > > +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, > > + unsigned int overrun, unsigned int ch, > > + unsigned int flag); > > + > > +static inline bool uart_nmi_active(struct uart_port *port) > > +{ > > + return port->nmi_state.active; > > +} > > + > > +static inline void uart_set_nmi_active(struct uart_port *port, bool val) > > +{ > > + port->nmi_state.active = val; > > +} > > +#else > > +static inline int uart_nmi_handle_char(struct uart_port *port, > > + unsigned int status, > > + unsigned int overrun, > > + unsigned int ch, unsigned int flag) > > +{ > > + return 0; > > +} > > + > > +static inline bool uart_nmi_active(struct uart_port *port) > > +{ > > + return false; > > +} > > + > > +static inline void uart_set_nmi_active(struct uart_port *port, bool val) > > +{ > > +} > > +#endif > > + > > #endif /* LINUX_SERIAL_CORE_H */ > > -- > > 2.7.4
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57840cf..6342e90 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -3181,8 +3181,14 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) return true; } +#ifdef CONFIG_CONSOLE_POLL + if (in_nmi()) + irq_work_queue(&port->nmi_state.sysrq_toggle_work); + else + schedule_work(&sysrq_enable_work); +#else schedule_work(&sysrq_enable_work); - +#endif port->sysrq = 0; return true; } @@ -3273,12 +3279,122 @@ int uart_handle_break(struct uart_port *port) port->sysrq = 0; } - if (port->flags & UPF_SAK) + if (port->flags & UPF_SAK) { +#ifdef CONFIG_CONSOLE_POLL + if (in_nmi()) + irq_work_queue(&port->nmi_state.sysrq_sak_work); + else + do_SAK(state->port.tty); +#else do_SAK(state->port.tty); +#endif + } return 0; } EXPORT_SYMBOL_GPL(uart_handle_break); +#ifdef CONFIG_CONSOLE_POLL +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, + unsigned int overrun, unsigned int ch, + unsigned int flag) +{ + struct uart_nmi_rx_data rx_data; + + if (!in_nmi()) + return 0; + + rx_data.status = status; + rx_data.overrun = overrun; + rx_data.ch = ch; + rx_data.flag = flag; + + if (!kfifo_in(&port->nmi_state.rx_fifo, &rx_data, 1)) + ++port->icount.buf_overrun; + + return 1; +} +EXPORT_SYMBOL_GPL(uart_nmi_handle_char); + +static void uart_nmi_rx_work(struct irq_work *rx_work) +{ + struct uart_nmi_state *nmi_state = + container_of(rx_work, struct uart_nmi_state, rx_work); + struct uart_port *port = + container_of(nmi_state, struct uart_port, nmi_state); + struct uart_nmi_rx_data rx_data; + + /* + * In polling mode, serial device is initialized much prior to + * TTY port becoming active. This scenario is especially useful + * from debugging perspective such that magic sysrq or debugger + * entry would still be possible even when TTY port isn't + * active (consider a boot hang case or if a user hasn't opened + * the serial port). So we discard any other RX data apart from + * magic sysrq commands in case TTY port isn't active. + */ + if (!port->state || !tty_port_active(&port->state->port)) { + kfifo_reset(&nmi_state->rx_fifo); + return; + } + + spin_lock(&port->lock); + while (kfifo_out(&nmi_state->rx_fifo, &rx_data, 1)) + uart_insert_char(port, rx_data.status, rx_data.overrun, + rx_data.ch, rx_data.flag); + spin_unlock(&port->lock); + + tty_flip_buffer_push(&port->state->port); +} + +static void uart_nmi_tx_work(struct irq_work *tx_work) +{ + struct uart_nmi_state *nmi_state = + container_of(tx_work, struct uart_nmi_state, tx_work); + struct uart_port *port = + container_of(nmi_state, struct uart_port, nmi_state); + + spin_lock(&port->lock); + if (nmi_state->tx_irq_callback) + nmi_state->tx_irq_callback(port); + spin_unlock(&port->lock); +} + +static void uart_nmi_sak_work(struct irq_work *work) +{ + struct uart_nmi_state *nmi_state = + container_of(work, struct uart_nmi_state, sysrq_sak_work); + struct uart_port *port = + container_of(nmi_state, struct uart_port, nmi_state); + + do_SAK(port->state->port.tty); +} + +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL +static void uart_nmi_toggle_work(struct irq_work *work) +{ + schedule_work(&sysrq_enable_work); +} +#endif + +int uart_nmi_state_init(struct uart_port *port) +{ + int ret; + + ret = kfifo_alloc(&port->nmi_state.rx_fifo, 256, GFP_KERNEL); + if (ret) + return ret; + + init_irq_work(&port->nmi_state.rx_work, uart_nmi_rx_work); + init_irq_work(&port->nmi_state.tx_work, uart_nmi_tx_work); + init_irq_work(&port->nmi_state.sysrq_sak_work, uart_nmi_sak_work); +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL + init_irq_work(&port->nmi_state.sysrq_toggle_work, uart_nmi_toggle_work); +#endif + return ret; +} +EXPORT_SYMBOL_GPL(uart_nmi_state_init); +#endif + EXPORT_SYMBOL(uart_write_wakeup); EXPORT_SYMBOL(uart_register_driver); EXPORT_SYMBOL(uart_unregister_driver); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9fd550e..84487a9 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -18,6 +18,8 @@ #include <linux/tty.h> #include <linux/mutex.h> #include <linux/sysrq.h> +#include <linux/irq_work.h> +#include <linux/kfifo.h> #include <uapi/linux/serial_core.h> #ifdef CONFIG_SERIAL_CORE_CONSOLE @@ -103,6 +105,28 @@ struct uart_icount { typedef unsigned int __bitwise upf_t; typedef unsigned int __bitwise upstat_t; +#ifdef CONFIG_CONSOLE_POLL +struct uart_nmi_rx_data { + unsigned int status; + unsigned int overrun; + unsigned int ch; + unsigned int flag; +}; + +struct uart_nmi_state { + bool active; + + struct irq_work tx_work; + void (*tx_irq_callback)(struct uart_port *port); + + struct irq_work rx_work; + DECLARE_KFIFO_PTR(rx_fifo, struct uart_nmi_rx_data); + + struct irq_work sysrq_sak_work; + struct irq_work sysrq_toggle_work; +}; +#endif + struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ @@ -255,6 +279,9 @@ struct uart_port { struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ +#ifdef CONFIG_CONSOLE_POLL + struct uart_nmi_state nmi_state; +#endif }; static inline int serial_port_in(struct uart_port *up, int offset) @@ -475,4 +502,44 @@ extern int uart_handle_break(struct uart_port *port); !((cflag) & CLOCAL)) int uart_get_rs485_mode(struct uart_port *port); + +/* + * The following are helper functions for the NMI aware serial drivers. + * Currently NMI support is only enabled under polling mode. + */ + +#ifdef CONFIG_CONSOLE_POLL +int uart_nmi_state_init(struct uart_port *port); +int uart_nmi_handle_char(struct uart_port *port, unsigned int status, + unsigned int overrun, unsigned int ch, + unsigned int flag); + +static inline bool uart_nmi_active(struct uart_port *port) +{ + return port->nmi_state.active; +} + +static inline void uart_set_nmi_active(struct uart_port *port, bool val) +{ + port->nmi_state.active = val; +} +#else +static inline int uart_nmi_handle_char(struct uart_port *port, + unsigned int status, + unsigned int overrun, + unsigned int ch, unsigned int flag) +{ + return 0; +} + +static inline bool uart_nmi_active(struct uart_port *port) +{ + return false; +} + +static inline void uart_set_nmi_active(struct uart_port *port, bool val) +{ +} +#endif + #endif /* LINUX_SERIAL_CORE_H */
With the advent of pseudo NMIs on arm64 platforms, it will be possible to have NMI driven serial drivers which enables us to have magic sysrq running in NMI context. So add NMI framework APIs in serial core that can be leveraged by serial drivers operating in polling mode to have NMI driven serial transfers. The general idea is to intercept RX characters in NMI context, if those are specific to magic sysrq then allow corresponding handler to run in NMI context. Otherwise defer all NMI unsafe RX and TX operations to IRQ work queue in order to run those in normal interrupt context. Also, since magic sysrq entry APIs can be invoked from NMI context, so make those APIs NMI safe via deferring NMI unsafe work to IRQ work queue. Signed-off-by: Sumit Garg <sumit.garg@linaro.org> --- drivers/tty/serial/serial_core.c | 120 ++++++++++++++++++++++++++++++++++++++- include/linux/serial_core.h | 67 ++++++++++++++++++++++ 2 files changed, 185 insertions(+), 2 deletions(-) -- 2.7.4