python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <unistd.h>
#include <stdarg.h>
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <net_user.h>
#include <os.h>
#include <um_malloc.h>
int tap_open_common(void *dev, char *gate_addr)
{
int tap_addr[4];
if (gate_addr == NULL)
return 0;
if (sscanf(gate_addr, "%d.%d.%d.%d", &tap_addr[0],
&tap_addr[1], &tap_addr[2], &tap_addr[3]) != 4) {
printk(UM_KERN_ERR "Invalid tap IP address - '%s'\n",
gate_addr);
return -EINVAL;
}
return 0;
}
void tap_check_ips(char *gate_addr, unsigned char *eth_addr)
{
int tap_addr[4];
if ((gate_addr != NULL) &&
(sscanf(gate_addr, "%d.%d.%d.%d", &tap_addr[0],
&tap_addr[1], &tap_addr[2], &tap_addr[3]) == 4) &&
(eth_addr[0] == tap_addr[0]) &&
(eth_addr[1] == tap_addr[1]) &&
(eth_addr[2] == tap_addr[2]) &&
(eth_addr[3] == tap_addr[3])) {
printk(UM_KERN_ERR "The tap IP address and the UML eth IP "
"address must be different\n");
}
}
/* Do reliable error handling as this fails frequently enough. */
void read_output(int fd, char *output, int len)
{
int remain, ret, expected;
char c;
char *str;
if (output == NULL) {
output = &c;
len = sizeof(c);
}
*output = '\0';
ret = read(fd, &remain, sizeof(remain));
if (ret != sizeof(remain)) {
if (ret < 0)
ret = -errno;
expected = sizeof(remain);
str = "length";
goto err;
}
while (remain != 0) {
expected = (remain < len) ? remain : len;
ret = read(fd, output, expected);
if (ret != expected) {
if (ret < 0)
ret = -errno;
str = "data";
goto err;
}
remain -= ret;
}
return;
err:
if (ret < 0)
printk(UM_KERN_ERR "read_output - read of %s failed, "
"errno = %d\n", str, -ret);
else
printk(UM_KERN_ERR "read_output - read of %s failed, read only "
"%d of %d bytes\n", str, ret, expected);
}
int net_read(int fd, void *buf, int len)
{
int n;
n = read(fd, buf, len);
if ((n < 0) && (errno == EAGAIN))
return 0;
else if (n == 0)
return -ENOTCONN;
return n;
}
int net_recvfrom(int fd, void *buf, int len)
{
int n;
CATCH_EINTR(n = recvfrom(fd, buf, len, 0, NULL, NULL));
if (n < 0) {
if (errno == EAGAIN)
return 0;
return -errno;
}
else if (n == 0)
return -ENOTCONN;
return n;
}
int net_write(int fd, void *buf, int len)
{
int n;
n = write(fd, buf, len);
if ((n < 0) && (errno == EAGAIN))
return 0;
else if (n == 0)
return -ENOTCONN;
return n;
}
int net_send(int fd, void *buf, int len)
{
int n;
CATCH_EINTR(n = send(fd, buf, len, 0));
if (n < 0) {
if (errno == EAGAIN)
return 0;
return -errno;
}
else if (n == 0)
return -ENOTCONN;
return n;
}
int net_sendto(int fd, void *buf, int len, void *to, int sock_len)
{
int n;
CATCH_EINTR(n = sendto(fd, buf, len, 0, (struct sockaddr *) to,
sock_len));
if (n < 0) {
if (errno == EAGAIN)
return 0;
return -errno;
}
else if (n == 0)
return -ENOTCONN;
return n;
}
struct change_pre_exec_data {
int close_me;
int stdout_fd;
};
static void change_pre_exec(void *arg)
{
struct change_pre_exec_data *data = arg;
close(data->close_me);
dup2(data->stdout_fd, 1);
}
static int change_tramp(char **argv, char *output, int output_len)
{
int pid, fds[2], err;
struct change_pre_exec_data pe_data;
err = os_pipe(fds, 1, 0);
if (err < 0) {
printk(UM_KERN_ERR "change_tramp - pipe failed, err = %d\n",
-err);
return err;
}
pe_data.close_me = fds[0];
pe_data.stdout_fd = fds[1];
pid = run_helper(change_pre_exec, &pe_data, argv);
if (pid > 0) /* Avoid hang as we won't get data in failure case. */
read_output(fds[0], output, output_len);
close(fds[0]);
close(fds[1]);
if (pid > 0)
helper_wait(pid);
return pid;
}
static void change(char *dev, char *what, unsigned char *addr,
unsigned char *netmask)
{
char addr_buf[sizeof("255.255.255.255\0")];
char netmask_buf[sizeof("255.255.255.255\0")];
char version[sizeof("nnnnn\0")];
char *argv[] = { "uml_net", version, what, dev, addr_buf,
netmask_buf, NULL };
char *output;
int output_len, pid;
sprintf(version, "%d", UML_NET_VERSION);
sprintf(addr_buf, "%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]);
sprintf(netmask_buf, "%d.%d.%d.%d", netmask[0], netmask[1],
netmask[2], netmask[3]);
output_len = UM_KERN_PAGE_SIZE;
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
if (output == NULL)
printk(UM_KERN_ERR "change : failed to allocate output "
"buffer\n");
pid = change_tramp(argv, output, output_len);
if (pid < 0) {
kfree(output);
return;
}
if (output != NULL) {
printk("%s", output);
kfree(output);
}
}
void open_addr(unsigned char *addr, unsigned char *netmask, void *arg)
{
change(arg, "add", addr, netmask);
}
void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
{
change(arg, "del", addr, netmask);
}
char *split_if_spec(char *str, ...)
{
char **arg, *end, *ret = NULL;
va_list ap;
va_start(ap, str);
while ((arg = va_arg(ap, char **)) != NULL) {
if (*str == '\0')
goto out;
end = strchr(str, ',');
if (end != str)
*arg = str;
if (end == NULL)
goto out;
*end++ = '\0';
str = end;
}
ret = str;
out:
va_end(ap);
return ret;
}
| linux-master | arch/um/drivers/net_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <stddef.h>
#include <errno.h>
#include <fcntl.h>
#include "chan_user.h"
#include <os.h>
/* This address is used only as a unique identifier */
static int null_chan;
static void *null_init(char *str, int device, const struct chan_opts *opts)
{
return &null_chan;
}
static int null_open(int input, int output, int primary, void *d,
char **dev_out)
{
int fd;
*dev_out = NULL;
fd = open(DEV_NULL, O_RDWR);
return (fd < 0) ? -errno : fd;
}
static int null_read(int fd, char *c_out, void *unused)
{
return -ENODEV;
}
static void null_free(void *data)
{
}
const struct chan_ops null_ops = {
.type = "null",
.init = null_init,
.open = null_open,
.close = generic_close,
.read = null_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = null_free,
.winch = 0,
};
| linux-master | arch/um/drivers/null.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include "chan_user.h"
/* ----------------------------------------------------------------------------- */
/* trivial console driver -- simply dump everything to stderr */
/*
* Don't register by default -- as this registers very early in the
* boot process it becomes the default console.
*
* Initialized at init time.
*/
static int use_stderr_console = 0;
static void stderr_console_write(struct console *console, const char *string,
unsigned len)
{
generic_write(2 /* stderr */, string, len, NULL);
}
static struct console stderr_console = {
.name = "stderr",
.write = stderr_console_write,
.flags = CON_PRINTBUFFER,
};
static int __init stderr_console_init(void)
{
if (use_stderr_console)
register_console(&stderr_console);
return 0;
}
console_initcall(stderr_console_init);
static int stderr_setup(char *str)
{
if (!str)
return 0;
use_stderr_console = simple_strtoul(str,&str,0);
return 1;
}
__setup("stderr=", stderr_setup);
/* The previous behavior of not unregistering led to /dev/console being
* impossible to open. My FC5 filesystem started having init die, and the
* system panicing because of this. Unregistering causes the real
* console to become the default console, and /dev/console can then be
* opened. Making this an initcall makes this happen late enough that
* there is no added value in dumping everything to stderr, and the
* normal console is good enough to show you all available output.
*/
static int __init unregister_stderr(void)
{
unregister_console(&stderr_console);
return 0;
}
__initcall(unregister_stderr);
| linux-master | arch/um/drivers/stderr_console.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <init.h>
#include <irq_kern.h>
#include <os.h>
#include "port.h"
struct port_list {
struct list_head list;
atomic_t wait_count;
int has_connection;
struct completion done;
int port;
int fd;
spinlock_t lock;
struct list_head pending;
struct list_head connections;
};
struct port_dev {
struct port_list *port;
int helper_pid;
int telnetd_pid;
};
struct connection {
struct list_head list;
int fd;
int helper_pid;
int socket[2];
int telnetd_pid;
struct port_list *port;
};
static irqreturn_t pipe_interrupt(int irq, void *data)
{
struct connection *conn = data;
int fd;
fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
if (fd < 0) {
if (fd == -EAGAIN)
return IRQ_NONE;
printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
-fd);
os_close_file(conn->fd);
}
list_del(&conn->list);
conn->fd = fd;
list_add(&conn->list, &conn->port->connections);
complete(&conn->port->done);
return IRQ_HANDLED;
}
#define NO_WAITER_MSG \
"****\n" \
"There are currently no UML consoles waiting for port connections.\n" \
"Either disconnect from one to make it available or activate some more\n" \
"by enabling more consoles in the UML /etc/inittab.\n" \
"****\n"
static int port_accept(struct port_list *port)
{
struct connection *conn;
int fd, socket[2], pid;
fd = port_connection(port->fd, socket, &pid);
if (fd < 0) {
if (fd != -EAGAIN)
printk(KERN_ERR "port_accept : port_connection "
"returned %d\n", -fd);
goto out;
}
conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
if (conn == NULL) {
printk(KERN_ERR "port_accept : failed to allocate "
"connection\n");
goto out_close;
}
*conn = ((struct connection)
{ .list = LIST_HEAD_INIT(conn->list),
.fd = fd,
.socket = { socket[0], socket[1] },
.telnetd_pid = pid,
.port = port });
if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
IRQF_SHARED, "telnetd", conn) < 0) {
printk(KERN_ERR "port_accept : failed to get IRQ for "
"telnetd\n");
goto out_free;
}
if (atomic_read(&port->wait_count) == 0) {
os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
printk(KERN_ERR "No one waiting for port\n");
}
list_add(&conn->list, &port->pending);
return 1;
out_free:
kfree(conn);
out_close:
os_close_file(fd);
os_kill_process(pid, 1);
out:
return 0;
}
static DEFINE_MUTEX(ports_mutex);
static LIST_HEAD(ports);
static void port_work_proc(struct work_struct *unused)
{
struct port_list *port;
struct list_head *ele;
unsigned long flags;
local_irq_save(flags);
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
if (!port->has_connection)
continue;
while (port_accept(port))
;
port->has_connection = 0;
}
local_irq_restore(flags);
}
static DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
struct port_list *port = data;
port->has_connection = 1;
schedule_work(&port_work);
return IRQ_HANDLED;
}
void *port_data(int port_num)
{
struct list_head *ele;
struct port_list *port;
struct port_dev *dev = NULL;
int fd;
mutex_lock(&ports_mutex);
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
if (port->port == port_num)
goto found;
}
port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
if (port == NULL) {
printk(KERN_ERR "Allocation of port list failed\n");
goto out;
}
fd = port_listen_fd(port_num);
if (fd < 0) {
printk(KERN_ERR "binding to port %d failed, errno = %d\n",
port_num, -fd);
goto out_free;
}
if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
IRQF_SHARED, "port", port) < 0) {
printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
goto out_close;
}
*port = ((struct port_list)
{ .list = LIST_HEAD_INIT(port->list),
.wait_count = ATOMIC_INIT(0),
.has_connection = 0,
.port = port_num,
.fd = fd,
.pending = LIST_HEAD_INIT(port->pending),
.connections = LIST_HEAD_INIT(port->connections) });
spin_lock_init(&port->lock);
init_completion(&port->done);
list_add(&port->list, &ports);
found:
dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "Allocation of port device entry failed\n");
goto out;
}
*dev = ((struct port_dev) { .port = port,
.helper_pid = -1,
.telnetd_pid = -1 });
goto out;
out_close:
os_close_file(fd);
out_free:
kfree(port);
out:
mutex_unlock(&ports_mutex);
return dev;
}
int port_wait(void *data)
{
struct port_dev *dev = data;
struct connection *conn;
struct port_list *port = dev->port;
int fd;
atomic_inc(&port->wait_count);
while (1) {
fd = -ERESTARTSYS;
if (wait_for_completion_interruptible(&port->done))
goto out;
spin_lock(&port->lock);
conn = list_entry(port->connections.next, struct connection,
list);
list_del(&conn->list);
spin_unlock(&port->lock);
os_shutdown_socket(conn->socket[0], 1, 1);
os_close_file(conn->socket[0]);
os_shutdown_socket(conn->socket[1], 1, 1);
os_close_file(conn->socket[1]);
/* This is done here because freeing an IRQ can't be done
* within the IRQ handler. So, pipe_interrupt always ups
* the semaphore regardless of whether it got a successful
* connection. Then we loop here throwing out failed
* connections until a good one is found.
*/
um_free_irq(TELNETD_IRQ, conn);
if (conn->fd >= 0)
break;
os_close_file(conn->fd);
kfree(conn);
}
fd = conn->fd;
dev->helper_pid = conn->helper_pid;
dev->telnetd_pid = conn->telnetd_pid;
kfree(conn);
out:
atomic_dec(&port->wait_count);
return fd;
}
void port_remove_dev(void *d)
{
struct port_dev *dev = d;
if (dev->helper_pid != -1)
os_kill_process(dev->helper_pid, 0);
if (dev->telnetd_pid != -1)
os_kill_process(dev->telnetd_pid, 1);
dev->helper_pid = -1;
dev->telnetd_pid = -1;
}
void port_kern_free(void *d)
{
struct port_dev *dev = d;
port_remove_dev(dev);
kfree(dev);
}
static void free_port(void)
{
struct list_head *ele;
struct port_list *port;
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
free_irq_by_fd(port->fd);
os_close_file(port->fd);
}
}
__uml_exitcall(free_port);
| linux-master | arch/um/drivers/port_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Anton Ivanov ([email protected])
* Copyright (C) 2000, 2001, 2002 Jeff Dike ([email protected])
* Copyright (C) 2001 Ridgerun,Inc ([email protected])
*/
#include <stddef.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <endian.h>
#include <byteswap.h>
#include "ubd.h"
#include <os.h>
#include <poll.h>
struct pollfd kernel_pollfd;
int start_io_thread(unsigned long sp, int *fd_out)
{
int pid, fds[2], err;
err = os_pipe(fds, 1, 1);
if(err < 0){
printk("start_io_thread - os_pipe failed, err = %d\n", -err);
goto out;
}
kernel_fd = fds[0];
kernel_pollfd.fd = kernel_fd;
kernel_pollfd.events = POLLIN;
*fd_out = fds[1];
err = os_set_fd_block(*fd_out, 0);
err = os_set_fd_block(kernel_fd, 0);
if (err) {
printk("start_io_thread - failed to set nonblocking I/O.\n");
goto out_close;
}
pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL);
if(pid < 0){
err = -errno;
printk("start_io_thread - clone failed : errno = %d\n", errno);
goto out_close;
}
return(pid);
out_close:
os_close_file(fds[0]);
os_close_file(fds[1]);
kernel_fd = -1;
*fd_out = -1;
out:
return err;
}
int ubd_read_poll(int timeout)
{
kernel_pollfd.events = POLLIN;
return poll(&kernel_pollfd, 1, timeout);
}
int ubd_write_poll(int timeout)
{
kernel_pollfd.events = POLLOUT;
return poll(&kernel_pollfd, 1, timeout);
}
| linux-master | arch/um/drivers/ubd_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Lennert Buytenhek ([email protected])
* Copyright (C) 2001 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/console.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched/debug.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <asm/switch_to.h>
#include <init.h>
#include <irq_kern.h>
#include <irq_user.h>
#include <kern_util.h>
#include "mconsole.h"
#include "mconsole_kern.h"
#include <os.h>
static struct vfsmount *proc_mnt = NULL;
static int do_unlink_socket(struct notifier_block *notifier,
unsigned long what, void *data)
{
return mconsole_unlink_socket();
}
static struct notifier_block reboot_notifier = {
.notifier_call = do_unlink_socket,
.priority = 0,
};
/* Safe without explicit locking for now. Tasklets provide their own
* locking, and the interrupt handler is safe because it can't interrupt
* itself and it can only happen on CPU 0.
*/
static LIST_HEAD(mc_requests);
static void mc_work_proc(struct work_struct *unused)
{
struct mconsole_entry *req;
unsigned long flags;
while (!list_empty(&mc_requests)) {
local_irq_save(flags);
req = list_entry(mc_requests.next, struct mconsole_entry, list);
list_del(&req->list);
local_irq_restore(flags);
req->request.cmd->handler(&req->request);
kfree(req);
}
}
static DECLARE_WORK(mconsole_work, mc_work_proc);
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
{
/* long to avoid size mismatch warnings from gcc */
long fd;
struct mconsole_entry *new;
static struct mc_request req; /* that's OK */
fd = (long) dev_id;
while (mconsole_get_request(fd, &req)) {
if (req.cmd->context == MCONSOLE_INTR)
(*req.cmd->handler)(&req);
else {
new = kmalloc(sizeof(*new), GFP_NOWAIT);
if (new == NULL)
mconsole_reply(&req, "Out of memory", 1, 0);
else {
new->request = req;
new->request.regs = get_irq_regs()->regs;
list_add(&new->list, &mc_requests);
}
}
}
if (!list_empty(&mc_requests))
schedule_work(&mconsole_work);
return IRQ_HANDLED;
}
void mconsole_version(struct mc_request *req)
{
char version[256];
sprintf(version, "%s %s %s %s %s", utsname()->sysname,
utsname()->nodename, utsname()->release, utsname()->version,
utsname()->machine);
mconsole_reply(req, version, 0, 0);
}
void mconsole_log(struct mc_request *req)
{
int len;
char *ptr = req->request.data;
ptr += strlen("log ");
len = req->len - (ptr - req->request.data);
printk(KERN_WARNING "%.*s", len, ptr);
mconsole_reply(req, "", 0, 0);
}
void mconsole_proc(struct mc_request *req)
{
struct vfsmount *mnt = proc_mnt;
char *buf;
int len;
struct file *file;
int first_chunk = 1;
char *ptr = req->request.data;
loff_t pos = 0;
ptr += strlen("proc");
ptr = skip_spaces(ptr);
if (!mnt) {
mconsole_reply(req, "Proc not available", 1, 0);
goto out;
}
file = file_open_root_mnt(mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
goto out;
}
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (buf == NULL) {
mconsole_reply(req, "Failed to allocate buffer", 1, 0);
goto out_fput;
}
do {
len = kernel_read(file, buf, PAGE_SIZE - 1, &pos);
if (len < 0) {
mconsole_reply(req, "Read of file failed", 1, 0);
goto out_free;
}
/* Begin the file content on his own line. */
if (first_chunk) {
mconsole_reply(req, "\n", 0, 1);
first_chunk = 0;
}
buf[len] = '\0';
mconsole_reply(req, buf, 0, (len > 0));
} while (len > 0);
out_free:
kfree(buf);
out_fput:
fput(file);
out: ;
}
#define UML_MCONSOLE_HELPTEXT \
"Commands: \n\
version - Get kernel version \n\
help - Print this message \n\
halt - Halt UML \n\
reboot - Reboot UML \n\
config <dev>=<config> - Add a new device to UML; \n\
same syntax as command line \n\
config <dev> - Query the configuration of a device \n\
remove <dev> - Remove a device from UML \n\
sysrq <letter> - Performs the SysRq action controlled by the letter \n\
cad - invoke the Ctrl-Alt-Del handler \n\
stop - pause the UML; it will do nothing until it receives a 'go' \n\
go - continue the UML after a 'stop' \n\
log <string> - make UML enter <string> into the kernel log\n\
proc <file> - returns the contents of the UML's /proc/<file>\n\
stack <pid> - returns the stack of the specified pid\n\
"
void mconsole_help(struct mc_request *req)
{
mconsole_reply(req, UML_MCONSOLE_HELPTEXT, 0, 0);
}
void mconsole_halt(struct mc_request *req)
{
mconsole_reply(req, "", 0, 0);
machine_halt();
}
void mconsole_reboot(struct mc_request *req)
{
mconsole_reply(req, "", 0, 0);
machine_restart(NULL);
}
void mconsole_cad(struct mc_request *req)
{
mconsole_reply(req, "", 0, 0);
ctrl_alt_del();
}
void mconsole_go(struct mc_request *req)
{
mconsole_reply(req, "Not stopped", 1, 0);
}
void mconsole_stop(struct mc_request *req)
{
block_signals();
os_set_fd_block(req->originating_fd, 1);
mconsole_reply(req, "stopped", 0, 0);
for (;;) {
if (!mconsole_get_request(req->originating_fd, req))
continue;
if (req->cmd->handler == mconsole_go)
break;
if (req->cmd->handler == mconsole_stop) {
mconsole_reply(req, "Already stopped", 1, 0);
continue;
}
if (req->cmd->handler == mconsole_sysrq) {
struct pt_regs *old_regs;
old_regs = set_irq_regs((struct pt_regs *)&req->regs);
mconsole_sysrq(req);
set_irq_regs(old_regs);
continue;
}
(*req->cmd->handler)(req);
}
os_set_fd_block(req->originating_fd, 0);
mconsole_reply(req, "", 0, 0);
unblock_signals();
}
static DEFINE_SPINLOCK(mc_devices_lock);
static LIST_HEAD(mconsole_devices);
void mconsole_register_dev(struct mc_device *new)
{
spin_lock(&mc_devices_lock);
BUG_ON(!list_empty(&new->list));
list_add(&new->list, &mconsole_devices);
spin_unlock(&mc_devices_lock);
}
static struct mc_device *mconsole_find_dev(char *name)
{
struct list_head *ele;
struct mc_device *dev;
list_for_each(ele, &mconsole_devices) {
dev = list_entry(ele, struct mc_device, list);
if (!strncmp(name, dev->name, strlen(dev->name)))
return dev;
}
return NULL;
}
#define UNPLUGGED_PER_PAGE \
((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
struct unplugged_pages {
struct list_head list;
void *pages[UNPLUGGED_PER_PAGE];
};
static DEFINE_MUTEX(plug_mem_mutex);
static unsigned long long unplugged_pages_count;
static LIST_HEAD(unplugged_pages);
static int unplug_index = UNPLUGGED_PER_PAGE;
static int mem_config(char *str, char **error_out)
{
unsigned long long diff;
int err = -EINVAL, i, add;
char *ret;
if (str[0] != '=') {
*error_out = "Expected '=' after 'mem'";
goto out;
}
str++;
if (str[0] == '-')
add = 0;
else if (str[0] == '+') {
add = 1;
}
else {
*error_out = "Expected increment to start with '-' or '+'";
goto out;
}
str++;
diff = memparse(str, &ret);
if (*ret != '\0') {
*error_out = "Failed to parse memory increment";
goto out;
}
diff /= PAGE_SIZE;
mutex_lock(&plug_mem_mutex);
for (i = 0; i < diff; i++) {
struct unplugged_pages *unplugged;
void *addr;
if (add) {
if (list_empty(&unplugged_pages))
break;
unplugged = list_entry(unplugged_pages.next,
struct unplugged_pages, list);
if (unplug_index > 0)
addr = unplugged->pages[--unplug_index];
else {
list_del(&unplugged->list);
addr = unplugged;
unplug_index = UNPLUGGED_PER_PAGE;
}
free_page((unsigned long) addr);
unplugged_pages_count--;
}
else {
struct page *page;
page = alloc_page(GFP_ATOMIC);
if (page == NULL)
break;
unplugged = page_address(page);
if (unplug_index == UNPLUGGED_PER_PAGE) {
list_add(&unplugged->list, &unplugged_pages);
unplug_index = 0;
}
else {
struct list_head *entry = unplugged_pages.next;
addr = unplugged;
unplugged = list_entry(entry,
struct unplugged_pages,
list);
err = os_drop_memory(addr, PAGE_SIZE);
if (err) {
printk(KERN_ERR "Failed to release "
"memory - errno = %d\n", err);
*error_out = "Failed to release memory";
goto out_unlock;
}
unplugged->pages[unplug_index++] = addr;
}
unplugged_pages_count++;
}
}
err = 0;
out_unlock:
mutex_unlock(&plug_mem_mutex);
out:
return err;
}
static int mem_get_config(char *name, char *str, int size, char **error_out)
{
char buf[sizeof("18446744073709551615")];
int len = 0;
sprintf(buf, "%ld", uml_physmem);
CONFIG_CHUNK(str, size, len, buf, 1);
return len;
}
static int mem_id(char **str, int *start_out, int *end_out)
{
*start_out = 0;
*end_out = 0;
return 0;
}
static int mem_remove(int n, char **error_out)
{
*error_out = "Memory doesn't support the remove operation";
return -EBUSY;
}
static struct mc_device mem_mc = {
.list = LIST_HEAD_INIT(mem_mc.list),
.name = "mem",
.config = mem_config,
.get_config = mem_get_config,
.id = mem_id,
.remove = mem_remove,
};
static int __init mem_mc_init(void)
{
if (can_drop_memory())
mconsole_register_dev(&mem_mc);
else printk(KERN_ERR "Can't release memory to the host - memory "
"hotplug won't be supported\n");
return 0;
}
__initcall(mem_mc_init);
#define CONFIG_BUF_SIZE 64
static void mconsole_get_config(int (*get_config)(char *, char *, int,
char **),
struct mc_request *req, char *name)
{
char default_buf[CONFIG_BUF_SIZE], *error, *buf;
int n, size;
if (get_config == NULL) {
mconsole_reply(req, "No get_config routine defined", 1, 0);
return;
}
error = NULL;
size = ARRAY_SIZE(default_buf);
buf = default_buf;
while (1) {
n = (*get_config)(name, buf, size, &error);
if (error != NULL) {
mconsole_reply(req, error, 1, 0);
goto out;
}
if (n <= size) {
mconsole_reply(req, buf, 0, 0);
goto out;
}
if (buf != default_buf)
kfree(buf);
size = n;
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL) {
mconsole_reply(req, "Failed to allocate buffer", 1, 0);
return;
}
}
out:
if (buf != default_buf)
kfree(buf);
}
void mconsole_config(struct mc_request *req)
{
struct mc_device *dev;
char *ptr = req->request.data, *name, *error_string = "";
int err;
ptr += strlen("config");
ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad configuration option", 1, 0);
return;
}
name = &ptr[strlen(dev->name)];
ptr = name;
while ((*ptr != '=') && (*ptr != '\0'))
ptr++;
if (*ptr == '=') {
err = (*dev->config)(name, &error_string);
mconsole_reply(req, error_string, err, 0);
}
else mconsole_get_config(dev->get_config, req, name);
}
void mconsole_remove(struct mc_request *req)
{
struct mc_device *dev;
char *ptr = req->request.data, *err_msg = "";
char error[256];
int err, start, end, n;
ptr += strlen("remove");
ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad remove option", 1, 0);
return;
}
ptr = &ptr[strlen(dev->name)];
err = 1;
n = (*dev->id)(&ptr, &start, &end);
if (n < 0) {
err_msg = "Couldn't parse device number";
goto out;
}
else if ((n < start) || (n > end)) {
sprintf(error, "Invalid device number - must be between "
"%d and %d", start, end);
err_msg = error;
goto out;
}
err_msg = NULL;
err = (*dev->remove)(n, &err_msg);
switch(err) {
case 0:
err_msg = "";
break;
case -ENODEV:
if (err_msg == NULL)
err_msg = "Device doesn't exist";
break;
case -EBUSY:
if (err_msg == NULL)
err_msg = "Device is currently open";
break;
default:
break;
}
out:
mconsole_reply(req, err_msg, err, 0);
}
struct mconsole_output {
struct list_head list;
struct mc_request *req;
};
static DEFINE_SPINLOCK(client_lock);
static LIST_HEAD(clients);
static char console_buf[MCONSOLE_MAX_DATA] __nonstring;
static void console_write(struct console *console, const char *string,
unsigned int len)
{
struct list_head *ele;
int n;
if (list_empty(&clients))
return;
while (len > 0) {
n = min((size_t) len, ARRAY_SIZE(console_buf));
memcpy(console_buf, string, n);
string += n;
len -= n;
list_for_each(ele, &clients) {
struct mconsole_output *entry;
entry = list_entry(ele, struct mconsole_output, list);
mconsole_reply_len(entry->req, console_buf, n, 0, 1);
}
}
}
static struct console mc_console = { .name = "mc",
.write = console_write,
.flags = CON_ENABLED,
.index = -1 };
static int mc_add_console(void)
{
register_console(&mc_console);
return 0;
}
late_initcall(mc_add_console);
static void with_console(struct mc_request *req, void (*proc)(void *),
void *arg)
{
struct mconsole_output entry;
unsigned long flags;
entry.req = req;
spin_lock_irqsave(&client_lock, flags);
list_add(&entry.list, &clients);
spin_unlock_irqrestore(&client_lock, flags);
(*proc)(arg);
mconsole_reply_len(req, "", 0, 0, 0);
spin_lock_irqsave(&client_lock, flags);
list_del(&entry.list);
spin_unlock_irqrestore(&client_lock, flags);
}
#ifdef CONFIG_MAGIC_SYSRQ
#include <linux/sysrq.h>
static void sysrq_proc(void *arg)
{
char *op = arg;
handle_sysrq(*op);
}
void mconsole_sysrq(struct mc_request *req)
{
char *ptr = req->request.data;
ptr += strlen("sysrq");
ptr = skip_spaces(ptr);
/*
* With 'b', the system will shut down without a chance to reply,
* so in this case, we reply first.
*/
if (*ptr == 'b')
mconsole_reply(req, "", 0, 0);
with_console(req, sysrq_proc, ptr);
}
#else
void mconsole_sysrq(struct mc_request *req)
{
mconsole_reply(req, "Sysrq not compiled in", 1, 0);
}
#endif
static void stack_proc(void *arg)
{
struct task_struct *task = arg;
show_stack(task, NULL, KERN_INFO);
}
/*
* Mconsole stack trace
* Added by Allan Graves, Jeff Dike
* Dumps a stacks registers to the linux console.
* Usage stack <pid>.
*/
void mconsole_stack(struct mc_request *req)
{
char *ptr = req->request.data;
int pid_requested= -1;
struct task_struct *to = NULL;
/*
* Would be nice:
* 1) Send showregs output to mconsole.
* 2) Add a way to stack dump all pids.
*/
ptr += strlen("stack");
ptr = skip_spaces(ptr);
/*
* Should really check for multiple pids or reject bad args here
*/
/* What do the arguments in mconsole_reply mean? */
if (sscanf(ptr, "%d", &pid_requested) == 0) {
mconsole_reply(req, "Please specify a pid", 1, 0);
return;
}
to = find_task_by_pid_ns(pid_requested, &init_pid_ns);
if ((to == NULL) || (pid_requested == 0)) {
mconsole_reply(req, "Couldn't find that pid", 1, 0);
return;
}
with_console(req, stack_proc, to);
}
static int __init mount_proc(void)
{
struct file_system_type *proc_fs_type;
struct vfsmount *mnt;
proc_fs_type = get_fs_type("proc");
if (!proc_fs_type)
return -ENODEV;
mnt = kern_mount(proc_fs_type);
put_filesystem(proc_fs_type);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
proc_mnt = mnt;
return 0;
}
/*
* Changed by mconsole_setup, which is __setup, and called before SMP is
* active.
*/
static char *notify_socket = NULL;
static int __init mconsole_init(void)
{
/* long to avoid size mismatch warnings from gcc */
long sock;
int err;
char file[UNIX_PATH_MAX];
mount_proc();
if (umid_file_name("mconsole", file, sizeof(file)))
return -1;
snprintf(mconsole_socket_name, sizeof(file), "%s", file);
sock = os_create_unix_socket(file, sizeof(file), 1);
if (sock < 0) {
printk(KERN_ERR "Failed to initialize management console\n");
return 1;
}
if (os_set_fd_block(sock, 0))
goto out;
register_reboot_notifier(&reboot_notifier);
err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt,
IRQF_SHARED, "mconsole", (void *)sock);
if (err < 0) {
printk(KERN_ERR "Failed to get IRQ for management console\n");
goto out;
}
if (notify_socket != NULL) {
notify_socket = kstrdup(notify_socket, GFP_KERNEL);
if (notify_socket != NULL)
mconsole_notify(notify_socket, MCONSOLE_SOCKET,
mconsole_socket_name,
strlen(mconsole_socket_name) + 1);
else printk(KERN_ERR "mconsole_setup failed to strdup "
"string\n");
}
printk(KERN_INFO "mconsole (version %d) initialized on %s\n",
MCONSOLE_VERSION, mconsole_socket_name);
return 0;
out:
os_close_file(sock);
return 1;
}
__initcall(mconsole_init);
static ssize_t mconsole_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *buf;
buf = memdup_user_nul(buffer, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
mconsole_notify(notify_socket, MCONSOLE_USER_NOTIFY, buf, count);
kfree(buf);
return count;
}
static const struct proc_ops mconsole_proc_ops = {
.proc_write = mconsole_proc_write,
.proc_lseek = noop_llseek,
};
static int create_proc_mconsole(void)
{
struct proc_dir_entry *ent;
if (notify_socket == NULL)
return 0;
ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_ops);
if (ent == NULL) {
printk(KERN_INFO "create_proc_mconsole : proc_create failed\n");
return 0;
}
return 0;
}
static DEFINE_SPINLOCK(notify_spinlock);
void lock_notify(void)
{
spin_lock(¬ify_spinlock);
}
void unlock_notify(void)
{
spin_unlock(¬ify_spinlock);
}
__initcall(create_proc_mconsole);
#define NOTIFY "notify:"
static int mconsole_setup(char *str)
{
if (!strncmp(str, NOTIFY, strlen(NOTIFY))) {
str += strlen(NOTIFY);
notify_socket = str;
}
else printk(KERN_ERR "mconsole_setup : Unknown option - '%s'\n", str);
return 1;
}
__setup("mconsole=", mconsole_setup);
__uml_help(mconsole_setup,
"mconsole=notify:<socket>\n"
" Requests that the mconsole driver send a message to the named Unix\n"
" socket containing the name of the mconsole socket. This also serves\n"
" to notify outside processes when UML has booted far enough to respond\n"
" to mconsole requests.\n\n"
);
static int notify_panic(struct notifier_block *self, unsigned long unused1,
void *ptr)
{
char *message = ptr;
if (notify_socket == NULL)
return 0;
mconsole_notify(notify_socket, MCONSOLE_PANIC, message,
strlen(message) + 1);
return NOTIFY_DONE;
}
static struct notifier_block panic_exit_notifier = {
.notifier_call = notify_panic,
.priority = INT_MAX, /* run as soon as possible */
};
static int add_notifier(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&panic_exit_notifier);
return 0;
}
__initcall(add_notifier);
char *mconsole_notify_socket(void)
{
return notify_socket;
}
EXPORT_SYMBOL(mconsole_notify_socket);
| linux-master | arch/um/drivers/mconsole_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/irqreturn.h>
#include <asm/irq.h>
#include <irq_kern.h>
#include <os.h>
#include "xterm.h"
struct xterm_wait {
struct completion ready;
int fd;
int pid;
int new_fd;
};
static irqreturn_t xterm_interrupt(int irq, void *data)
{
struct xterm_wait *xterm = data;
int fd;
fd = os_rcv_fd(xterm->fd, &xterm->pid);
if (fd == -EAGAIN)
return IRQ_NONE;
xterm->new_fd = fd;
complete(&xterm->ready);
return IRQ_HANDLED;
}
int xterm_fd(int socket, int *pid_out)
{
struct xterm_wait *data;
int err, ret;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
printk(KERN_ERR "xterm_fd : failed to allocate xterm_wait\n");
return -ENOMEM;
}
/* This is a locked semaphore... */
*data = ((struct xterm_wait) { .fd = socket,
.pid = -1,
.new_fd = -1 });
init_completion(&data->ready);
err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt,
IRQF_SHARED, "xterm", data);
if (err < 0) {
printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, "
"err = %d\n", err);
ret = err;
goto out;
}
/* ... so here we wait for an xterm interrupt.
*
* XXX Note, if the xterm doesn't work for some reason (eg. DISPLAY
* isn't set) this will hang... */
wait_for_completion(&data->ready);
um_free_irq(XTERM_IRQ, data);
ret = data->new_fd;
*pid_out = data->pid;
out:
kfree(data);
return ret;
}
| linux-master | arch/um/drivers/xterm_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <init.h>
#include <irq_kern.h>
#include <irq_user.h>
#include "mconsole_kern.h"
#include <net_kern.h>
#include <net_user.h>
#define DRIVER_NAME "uml-netdev"
static DEFINE_SPINLOCK(opened_lock);
static LIST_HEAD(opened);
/*
* The drop_skb is used when we can't allocate an skb. The
* packet is read into drop_skb in order to get the data off the
* connection to the host.
* It is reallocated whenever a maximum packet size is seen which is
* larger than any seen before. update_drop_skb is called from
* eth_configure when a new interface is added.
*/
static DEFINE_SPINLOCK(drop_lock);
static struct sk_buff *drop_skb;
static int drop_max;
static int update_drop_skb(int max)
{
struct sk_buff *new;
unsigned long flags;
int err = 0;
spin_lock_irqsave(&drop_lock, flags);
if (max <= drop_max)
goto out;
err = -ENOMEM;
new = dev_alloc_skb(max);
if (new == NULL)
goto out;
skb_put(new, max);
kfree_skb(drop_skb);
drop_skb = new;
drop_max = max;
err = 0;
out:
spin_unlock_irqrestore(&drop_lock, flags);
return err;
}
static int uml_net_rx(struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
int pkt_len;
struct sk_buff *skb;
/* If we can't allocate memory, try again next round. */
skb = dev_alloc_skb(lp->max_packet);
if (skb == NULL) {
drop_skb->dev = dev;
/* Read a packet into drop_skb and don't do anything with it. */
(*lp->read)(lp->fd, drop_skb, lp);
dev->stats.rx_dropped++;
return 0;
}
skb->dev = dev;
skb_put(skb, lp->max_packet);
skb_reset_mac_header(skb);
pkt_len = (*lp->read)(lp->fd, skb, lp);
if (pkt_len > 0) {
skb_trim(skb, pkt_len);
skb->protocol = (*lp->protocol)(skb);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
netif_rx(skb);
return pkt_len;
}
kfree_skb(skb);
return pkt_len;
}
static void uml_dev_close(struct work_struct *work)
{
struct uml_net_private *lp =
container_of(work, struct uml_net_private, work);
dev_close(lp->dev);
}
static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct uml_net_private *lp = netdev_priv(dev);
int err;
if (!netif_running(dev))
return IRQ_NONE;
spin_lock(&lp->lock);
while ((err = uml_net_rx(dev)) > 0) ;
if (err < 0) {
printk(KERN_ERR
"Device '%s' read returned %d, shutting it down\n",
dev->name, err);
/* dev_close can't be called in interrupt context, and takes
* again lp->lock.
* And dev_close() can be safely called multiple times on the
* same device, since it tests for (dev->flags & IFF_UP). So
* there's no harm in delaying the device shutdown.
* Furthermore, the workqueue will not re-enqueue an already
* enqueued work item. */
schedule_work(&lp->work);
goto out;
}
out:
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
static int uml_net_open(struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
int err;
if (lp->fd >= 0) {
err = -ENXIO;
goto out;
}
lp->fd = (*lp->open)(&lp->user);
if (lp->fd < 0) {
err = lp->fd;
goto out;
}
err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
err = -ENETUNREACH;
goto out_close;
}
netif_start_queue(dev);
/* clear buffer - it can happen that the host side of the interface
* is full when we get here. In this case, new data is never queued,
* SIGIOs never arrive, and the net never works.
*/
while ((err = uml_net_rx(dev)) > 0) ;
spin_lock(&opened_lock);
list_add(&lp->list, &opened);
spin_unlock(&opened_lock);
return 0;
out_close:
if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
out:
return err;
}
static int uml_net_close(struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
um_free_irq(dev->irq, dev);
if (lp->close != NULL)
(*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
spin_lock(&opened_lock);
list_del(&lp->list);
spin_unlock(&opened_lock);
return 0;
}
static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
unsigned long flags;
int len;
netif_stop_queue(dev);
spin_lock_irqsave(&lp->lock, flags);
len = (*lp->write)(lp->fd, skb, lp);
skb_tx_timestamp(skb);
if (len == skb->len) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netif_trans_update(dev);
netif_start_queue(dev);
/* this is normally done in the interrupt when tx finishes */
netif_wake_queue(dev);
}
else if (len == 0) {
netif_start_queue(dev);
dev->stats.tx_dropped++;
}
else {
netif_start_queue(dev);
printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
}
spin_unlock_irqrestore(&lp->lock, flags);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
static void uml_net_set_multicast_list(struct net_device *dev)
{
return;
}
static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netif_trans_update(dev);
netif_wake_queue(dev);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uml_net_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
uml_net_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static void uml_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
}
static const struct ethtool_ops uml_net_ethtool_ops = {
.get_drvinfo = uml_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
};
void uml_net_setup_etheraddr(struct net_device *dev, char *str)
{
u8 addr[ETH_ALEN];
char *end;
int i;
if (str == NULL)
goto random;
for (i = 0; i < 6; i++) {
addr[i] = simple_strtoul(str, &end, 16);
if ((end == str) ||
((*end != ':') && (*end != ',') && (*end != '\0'))) {
printk(KERN_ERR
"setup_etheraddr: failed to parse '%s' "
"as an ethernet address\n", str);
goto random;
}
str = end + 1;
}
if (is_multicast_ether_addr(addr)) {
printk(KERN_ERR
"Attempt to assign a multicast ethernet address to a "
"device disallowed\n");
goto random;
}
if (!is_valid_ether_addr(addr)) {
printk(KERN_ERR
"Attempt to assign an invalid ethernet address to a "
"device disallowed\n");
goto random;
}
if (!is_local_ether_addr(addr)) {
printk(KERN_WARNING
"Warning: Assigning a globally valid ethernet "
"address to a device\n");
printk(KERN_WARNING "You should set the 2nd rightmost bit in "
"the first byte of the MAC,\n");
printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
addr[5]);
}
eth_hw_addr_set(dev, addr);
return;
random:
printk(KERN_INFO
"Choosing a random ethernet address for device %s\n", dev->name);
eth_hw_addr_random(dev);
}
static DEFINE_SPINLOCK(devices_lock);
static LIST_HEAD(devices);
static struct platform_driver uml_net_driver = {
.driver = {
.name = DRIVER_NAME,
},
};
static void net_device_release(struct device *dev)
{
struct uml_net *device = dev_get_drvdata(dev);
struct net_device *netdev = device->dev;
struct uml_net_private *lp = netdev_priv(netdev);
if (lp->remove != NULL)
(*lp->remove)(&lp->user);
list_del(&device->list);
kfree(device);
free_netdev(netdev);
}
static const struct net_device_ops uml_netdev_ops = {
.ndo_open = uml_net_open,
.ndo_stop = uml_net_close,
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_rx_mode = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = uml_net_poll_controller,
#endif
};
/*
* Ensures that platform_driver_register is called only once by
* eth_configure. Will be set in an initcall.
*/
static int driver_registered;
static void eth_configure(int n, void *init, char *mac,
struct transport *transport, gfp_t gfp_mask)
{
struct uml_net *device;
struct net_device *dev;
struct uml_net_private *lp;
int err, size;
size = transport->private_size + sizeof(struct uml_net_private);
device = kzalloc(sizeof(*device), gfp_mask);
if (device == NULL) {
printk(KERN_ERR "eth_configure failed to allocate struct "
"uml_net\n");
return;
}
dev = alloc_etherdev(size);
if (dev == NULL) {
printk(KERN_ERR "eth_configure: failed to allocate struct "
"net_device for eth%d\n", n);
goto out_free_device;
}
INIT_LIST_HEAD(&device->list);
device->index = n;
/* If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
* and fail.
*/
snprintf(dev->name, sizeof(dev->name), "eth%d", n);
uml_net_setup_etheraddr(dev, mac);
printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
lp = netdev_priv(dev);
/* This points to the transport private data. It's still clear, but we
* must memset it to 0 *now*. Let's help the drivers. */
memset(lp, 0, size);
INIT_WORK(&lp->work, uml_dev_close);
/* sysfs register */
if (!driver_registered) {
platform_driver_register(¨_net_driver);
driver_registered = 1;
}
device->pdev.id = n;
device->pdev.name = DRIVER_NAME;
device->pdev.dev.release = net_device_release;
dev_set_drvdata(&device->pdev.dev, device);
if (platform_device_register(&device->pdev))
goto out_free_netdev;
SET_NETDEV_DEV(dev,&device->pdev.dev);
device->dev = dev;
/*
* These just fill in a data structure, so there's no failure
* to be worried about.
*/
(*transport->kern->init)(dev, init);
*lp = ((struct uml_net_private)
{ .list = LIST_HEAD_INIT(lp->list),
.dev = dev,
.fd = -1,
.mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
.max_packet = transport->user->max_packet,
.protocol = transport->kern->protocol,
.open = transport->user->open,
.close = transport->user->close,
.remove = transport->user->remove,
.read = transport->kern->read,
.write = transport->kern->write,
.add_address = transport->user->add_address,
.delete_address = transport->user->delete_address });
spin_lock_init(&lp->lock);
memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
if ((transport->user->init != NULL) &&
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
dev->mtu = transport->user->mtu;
dev->netdev_ops = ¨_netdev_ops;
dev->ethtool_ops = ¨_net_ethtool_ops;
dev->watchdog_timeo = (HZ >> 1);
dev->irq = UM_ETH_IRQ;
err = update_drop_skb(lp->max_packet);
if (err)
goto out_undo_user_init;
rtnl_lock();
err = register_netdevice(dev);
rtnl_unlock();
if (err)
goto out_undo_user_init;
spin_lock(&devices_lock);
list_add(&device->list, &devices);
spin_unlock(&devices_lock);
return;
out_undo_user_init:
if (transport->user->remove != NULL)
(*transport->user->remove)(&lp->user);
out_unregister:
platform_device_unregister(&device->pdev);
return; /* platform_device_unregister frees dev and device */
out_free_netdev:
free_netdev(dev);
out_free_device:
kfree(device);
}
static struct uml_net *find_device(int n)
{
struct uml_net *device;
struct list_head *ele;
spin_lock(&devices_lock);
list_for_each(ele, &devices) {
device = list_entry(ele, struct uml_net, list);
if (device->index == n)
goto out;
}
device = NULL;
out:
spin_unlock(&devices_lock);
return device;
}
static int eth_parse(char *str, int *index_out, char **str_out,
char **error_out)
{
char *end;
int n, err = -EINVAL;
n = simple_strtoul(str, &end, 0);
if (end == str) {
*error_out = "Bad device number";
return err;
}
str = end;
if (*str != '=') {
*error_out = "Expected '=' after device number";
return err;
}
str++;
if (find_device(n)) {
*error_out = "Device already configured";
return err;
}
*index_out = n;
*str_out = str;
return 0;
}
struct eth_init {
struct list_head list;
char *init;
int index;
};
static DEFINE_SPINLOCK(transports_lock);
static LIST_HEAD(transports);
/* Filled in during early boot */
static LIST_HEAD(eth_cmd_line);
static int check_transport(struct transport *transport, char *eth, int n,
void **init_out, char **mac_out, gfp_t gfp_mask)
{
int len;
len = strlen(transport->name);
if (strncmp(eth, transport->name, len))
return 0;
eth += len;
if (*eth == ',')
eth++;
else if (*eth != '\0')
return 0;
*init_out = kmalloc(transport->setup_size, gfp_mask);
if (*init_out == NULL)
return 1;
if (!transport->setup(eth, mac_out, *init_out)) {
kfree(*init_out);
*init_out = NULL;
}
return 1;
}
void register_transport(struct transport *new)
{
struct list_head *ele, *next;
struct eth_init *eth;
void *init;
char *mac = NULL;
int match;
spin_lock(&transports_lock);
BUG_ON(!list_empty(&new->list));
list_add(&new->list, &transports);
spin_unlock(&transports_lock);
list_for_each_safe(ele, next, ð_cmd_line) {
eth = list_entry(ele, struct eth_init, list);
match = check_transport(new, eth->init, eth->index, &init,
&mac, GFP_KERNEL);
if (!match)
continue;
else if (init != NULL) {
eth_configure(eth->index, init, mac, new, GFP_KERNEL);
kfree(init);
}
list_del(ð->list);
}
}
static int eth_setup_common(char *str, int index)
{
struct list_head *ele;
struct transport *transport;
void *init;
char *mac = NULL;
int found = 0;
spin_lock(&transports_lock);
list_for_each(ele, &transports) {
transport = list_entry(ele, struct transport, list);
if (!check_transport(transport, str, index, &init,
&mac, GFP_ATOMIC))
continue;
if (init != NULL) {
eth_configure(index, init, mac, transport, GFP_ATOMIC);
kfree(init);
}
found = 1;
break;
}
spin_unlock(&transports_lock);
return found;
}
static int __init eth_setup(char *str)
{
struct eth_init *new;
char *error;
int n, err;
err = eth_parse(str, &n, &str, &error);
if (err) {
printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
str, error);
return 1;
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
if (!new)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->index = n;
new->init = str;
list_add_tail(&new->list, ð_cmd_line);
return 1;
}
__setup("eth", eth_setup);
__uml_help(eth_setup,
"eth[0-9]+=<transport>,<options>\n"
" Configure a network device.\n\n"
);
static int net_config(char *str, char **error_out)
{
int n, err;
err = eth_parse(str, &n, &str, error_out);
if (err)
return err;
/* This string is broken up and the pieces used by the underlying
* driver. So, it is freed only if eth_setup_common fails.
*/
str = kstrdup(str, GFP_KERNEL);
if (str == NULL) {
*error_out = "net_config failed to strdup string";
return -ENOMEM;
}
err = !eth_setup_common(str, n);
if (err)
kfree(str);
return err;
}
static int net_id(char **str, int *start_out, int *end_out)
{
char *end;
int n;
n = simple_strtoul(*str, &end, 0);
if ((*end != '\0') || (end == *str))
return -1;
*start_out = n;
*end_out = n;
*str = end;
return n;
}
static int net_remove(int n, char **error_out)
{
struct uml_net *device;
struct net_device *dev;
struct uml_net_private *lp;
device = find_device(n);
if (device == NULL)
return -ENODEV;
dev = device->dev;
lp = netdev_priv(dev);
if (lp->fd > 0)
return -EBUSY;
unregister_netdev(dev);
platform_device_unregister(&device->pdev);
return 0;
}
static struct mc_device net_mc = {
.list = LIST_HEAD_INIT(net_mc.list),
.name = "eth",
.config = net_config,
.get_config = NULL,
.id = net_id,
.remove = net_remove,
};
#ifdef CONFIG_INET
static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct uml_net_private *lp;
void (*proc)(unsigned char *, unsigned char *, void *);
unsigned char addr_buf[4], netmask_buf[4];
if (dev->netdev_ops->ndo_open != uml_net_open)
return NOTIFY_DONE;
lp = netdev_priv(dev);
proc = NULL;
switch (event) {
case NETDEV_UP:
proc = lp->add_address;
break;
case NETDEV_DOWN:
proc = lp->delete_address;
break;
}
if (proc != NULL) {
memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
(*proc)(addr_buf, netmask_buf, &lp->user);
}
return NOTIFY_DONE;
}
/* uml_net_init shouldn't be called twice on two CPUs at the same time */
static struct notifier_block uml_inetaddr_notifier = {
.notifier_call = uml_inetaddr_event,
};
static void inet_register(void)
{
struct list_head *ele;
struct uml_net_private *lp;
struct in_device *ip;
struct in_ifaddr *in;
register_inetaddr_notifier(¨_inetaddr_notifier);
/* Devices may have been opened already, so the uml_inetaddr_notifier
* didn't get a chance to run for them. This fakes it so that
* addresses which have already been set up get handled properly.
*/
spin_lock(&opened_lock);
list_for_each(ele, &opened) {
lp = list_entry(ele, struct uml_net_private, list);
ip = lp->dev->ip_ptr;
if (ip == NULL)
continue;
in = ip->ifa_list;
while (in != NULL) {
uml_inetaddr_event(NULL, NETDEV_UP, in);
in = in->ifa_next;
}
}
spin_unlock(&opened_lock);
}
#else
static inline void inet_register(void)
{
}
#endif
static int uml_net_init(void)
{
mconsole_register_dev(&net_mc);
inet_register();
return 0;
}
__initcall(uml_net_init);
static void close_devices(void)
{
struct list_head *ele;
struct uml_net_private *lp;
spin_lock(&opened_lock);
list_for_each(ele, &opened) {
lp = list_entry(ele, struct uml_net_private, list);
um_free_irq(lp->dev->irq, lp->dev);
if ((lp->close != NULL) && (lp->fd >= 0))
(*lp->close)(lp->fd, &lp->user);
if (lp->remove != NULL)
(*lp->remove)(&lp->user);
}
spin_unlock(&opened_lock);
}
__uml_exitcall(close_devices);
void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
void *),
void *arg)
{
struct net_device *dev = d;
struct in_device *ip = dev->ip_ptr;
struct in_ifaddr *in;
unsigned char address[4], netmask[4];
if (ip == NULL) return;
in = ip->ifa_list;
while (in != NULL) {
memcpy(address, &in->ifa_address, sizeof(address));
memcpy(netmask, &in->ifa_mask, sizeof(netmask));
(*cb)(address, netmask, arg);
in = in->ifa_next;
}
}
int dev_netmask(void *d, void *m)
{
struct net_device *dev = d;
struct in_device *ip = dev->ip_ptr;
struct in_ifaddr *in;
__be32 *mask_out = m;
if (ip == NULL)
return 1;
in = ip->ifa_list;
if (in == NULL)
return 1;
*mask_out = in->ifa_mask;
return 0;
}
void *get_output_buffer(int *len_out)
{
void *ret;
ret = (void *) __get_free_pages(GFP_KERNEL, 0);
if (ret) *len_out = PAGE_SIZE;
else *len_out = 0;
return ret;
}
void free_output_buffer(void *buffer)
{
free_pages((unsigned long) buffer, 0);
}
int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
char **gate_addr)
{
char *remain;
remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
if (remain != NULL) {
printk(KERN_ERR "tap_setup_common - Extra garbage on "
"specification : '%s'\n", remain);
return 1;
}
return 0;
}
unsigned short eth_protocol(struct sk_buff *skb)
{
return eth_type_trans(skb, skb->dev);
}
| linux-master | arch/um/drivers/net_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Intel Corporation
* Author: Johannes Berg <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/logic_iomem.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
#include <linux/virtio_pcidev.h>
#include <linux/virtio-uml.h>
#include <linux/delay.h>
#include <linux/msi.h>
#include <asm/unaligned.h>
#include <irq_kern.h>
#define MAX_DEVICES 8
#define MAX_MSI_VECTORS 32
#define CFG_SPACE_SIZE 4096
/* for MSI-X we have a 32-bit payload */
#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
#define NUM_IRQ_MSGS 10
#define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1))
#define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1)
struct um_pci_device {
struct virtio_device *vdev;
/* for now just standard BARs */
u8 resptr[PCI_STD_NUM_BARS];
struct virtqueue *cmd_vq, *irq_vq;
#define UM_PCI_STAT_WAITING 0
unsigned long status;
int irq;
bool platform;
};
struct um_pci_device_reg {
struct um_pci_device *dev;
void __iomem *iomem;
};
static struct pci_host_bridge *bridge;
static DEFINE_MUTEX(um_pci_mtx);
static struct um_pci_device *um_pci_platform_device;
static struct um_pci_device_reg um_pci_devices[MAX_DEVICES];
static struct fwnode_handle *um_pci_fwnode;
static struct irq_domain *um_pci_inner_domain;
static struct irq_domain *um_pci_msi_domain;
static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
static unsigned int um_pci_max_delay_us = 40000;
module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644);
struct um_pci_message_buffer {
struct virtio_pcidev_msg hdr;
u8 data[8];
};
static struct um_pci_message_buffer __percpu *um_pci_msg_bufs;
static int um_pci_send_cmd(struct um_pci_device *dev,
struct virtio_pcidev_msg *cmd,
unsigned int cmd_size,
const void *extra, unsigned int extra_size,
void *out, unsigned int out_size)
{
struct scatterlist out_sg, extra_sg, in_sg;
struct scatterlist *sgs_list[] = {
[0] = &out_sg,
[1] = extra ? &extra_sg : &in_sg,
[2] = extra ? &in_sg : NULL,
};
struct um_pci_message_buffer *buf;
int delay_count = 0;
int ret, len;
bool posted;
if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
return -EINVAL;
switch (cmd->op) {
case VIRTIO_PCIDEV_OP_CFG_WRITE:
case VIRTIO_PCIDEV_OP_MMIO_WRITE:
case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
/* in PCI, writes are posted, so don't wait */
posted = !out;
WARN_ON(!posted);
break;
default:
posted = false;
break;
}
buf = get_cpu_var(um_pci_msg_bufs);
if (buf)
memcpy(buf, cmd, cmd_size);
if (posted) {
u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
if (ncmd) {
memcpy(ncmd, cmd, cmd_size);
if (extra)
memcpy(ncmd + cmd_size, extra, extra_size);
cmd = (void *)ncmd;
cmd_size += extra_size;
extra = NULL;
extra_size = 0;
} else {
/* try without allocating memory */
posted = false;
cmd = (void *)buf;
}
} else {
cmd = (void *)buf;
}
sg_init_one(&out_sg, cmd, cmd_size);
if (extra)
sg_init_one(&extra_sg, extra, extra_size);
if (out)
sg_init_one(&in_sg, out, out_size);
/* add to internal virtio queue */
ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
extra ? 2 : 1,
out ? 1 : 0,
posted ? cmd : HANDLE_NO_FREE(cmd),
GFP_ATOMIC);
if (ret) {
if (posted)
kfree(cmd);
goto out;
}
if (posted) {
virtqueue_kick(dev->cmd_vq);
ret = 0;
goto out;
}
/* kick and poll for getting a response on the queue */
set_bit(UM_PCI_STAT_WAITING, &dev->status);
virtqueue_kick(dev->cmd_vq);
while (1) {
void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
if (completed == HANDLE_NO_FREE(cmd))
break;
if (completed && !HANDLE_IS_NO_FREE(completed))
kfree(completed);
if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
++delay_count > um_pci_max_delay_us,
"um virt-pci delay: %d", delay_count)) {
ret = -EIO;
break;
}
udelay(1);
}
clear_bit(UM_PCI_STAT_WAITING, &dev->status);
out:
put_cpu_var(um_pci_msg_bufs);
return ret;
}
static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
int size)
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
struct virtio_pcidev_msg hdr = {
.op = VIRTIO_PCIDEV_OP_CFG_READ,
.size = size,
.addr = offset,
};
/* buf->data is maximum size - we may only use parts of it */
struct um_pci_message_buffer *buf;
u8 *data;
unsigned long ret = ULONG_MAX;
size_t bytes = sizeof(buf->data);
if (!dev)
return ULONG_MAX;
buf = get_cpu_var(um_pci_msg_bufs);
data = buf->data;
if (buf)
memset(data, 0xff, bytes);
switch (size) {
case 1:
case 2:
case 4:
#ifdef CONFIG_64BIT
case 8:
#endif
break;
default:
WARN(1, "invalid config space read size %d\n", size);
goto out;
}
if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes))
goto out;
switch (size) {
case 1:
ret = data[0];
break;
case 2:
ret = le16_to_cpup((void *)data);
break;
case 4:
ret = le32_to_cpup((void *)data);
break;
#ifdef CONFIG_64BIT
case 8:
ret = le64_to_cpup((void *)data);
break;
#endif
default:
break;
}
out:
put_cpu_var(um_pci_msg_bufs);
return ret;
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
unsigned long val)
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
struct {
struct virtio_pcidev_msg hdr;
/* maximum size - we may only use parts of it */
u8 data[8];
} msg = {
.hdr = {
.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
.size = size,
.addr = offset,
},
};
if (!dev)
return;
switch (size) {
case 1:
msg.data[0] = (u8)val;
break;
case 2:
put_unaligned_le16(val, (void *)msg.data);
break;
case 4:
put_unaligned_le32(val, (void *)msg.data);
break;
#ifdef CONFIG_64BIT
case 8:
put_unaligned_le64(val, (void *)msg.data);
break;
#endif
default:
WARN(1, "invalid config space write size %d\n", size);
return;
}
WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
}
static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
.read = um_pci_cfgspace_read,
.write = um_pci_cfgspace_write,
};
static void um_pci_bar_copy_from(void *priv, void *buffer,
unsigned int offset, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
struct virtio_pcidev_msg hdr = {
.op = VIRTIO_PCIDEV_OP_MMIO_READ,
.bar = *resptr,
.size = size,
.addr = offset,
};
memset(buffer, 0xff, size);
um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
}
static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
int size)
{
/* buf->data is maximum size - we may only use parts of it */
struct um_pci_message_buffer *buf;
u8 *data;
unsigned long ret = ULONG_MAX;
buf = get_cpu_var(um_pci_msg_bufs);
data = buf->data;
switch (size) {
case 1:
case 2:
case 4:
#ifdef CONFIG_64BIT
case 8:
#endif
break;
default:
WARN(1, "invalid config space read size %d\n", size);
goto out;
}
um_pci_bar_copy_from(priv, data, offset, size);
switch (size) {
case 1:
ret = data[0];
break;
case 2:
ret = le16_to_cpup((void *)data);
break;
case 4:
ret = le32_to_cpup((void *)data);
break;
#ifdef CONFIG_64BIT
case 8:
ret = le64_to_cpup((void *)data);
break;
#endif
default:
break;
}
out:
put_cpu_var(um_pci_msg_bufs);
return ret;
}
static void um_pci_bar_copy_to(void *priv, unsigned int offset,
const void *buffer, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
struct virtio_pcidev_msg hdr = {
.op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
.bar = *resptr,
.size = size,
.addr = offset,
};
um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
}
static void um_pci_bar_write(void *priv, unsigned int offset, int size,
unsigned long val)
{
/* maximum size - we may only use parts of it */
u8 data[8];
switch (size) {
case 1:
data[0] = (u8)val;
break;
case 2:
put_unaligned_le16(val, (void *)data);
break;
case 4:
put_unaligned_le32(val, (void *)data);
break;
#ifdef CONFIG_64BIT
case 8:
put_unaligned_le64(val, (void *)data);
break;
#endif
default:
WARN(1, "invalid config space write size %d\n", size);
return;
}
um_pci_bar_copy_to(priv, offset, data, size);
}
static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
struct {
struct virtio_pcidev_msg hdr;
u8 data;
} msg = {
.hdr = {
.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
.bar = *resptr,
.size = size,
.addr = offset,
},
.data = value,
};
um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
}
static const struct logic_iomem_ops um_pci_device_bar_ops = {
.read = um_pci_bar_read,
.write = um_pci_bar_write,
.set = um_pci_bar_set,
.copy_from = um_pci_bar_copy_from,
.copy_to = um_pci_bar_copy_to,
};
static void __iomem *um_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct um_pci_device_reg *dev;
unsigned int busn = bus->number;
if (busn > 0)
return NULL;
/* not allowing functions for now ... */
if (devfn % 8)
return NULL;
if (devfn / 8 >= ARRAY_SIZE(um_pci_devices))
return NULL;
dev = &um_pci_devices[devfn / 8];
if (!dev)
return NULL;
return (void __iomem *)((unsigned long)dev->iomem + where);
}
static struct pci_ops um_pci_ops = {
.map_bus = um_pci_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
static void um_pci_rescan(void)
{
pci_lock_rescan_remove();
pci_rescan_bus(bridge->bus);
pci_unlock_rescan_remove();
}
static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
{
struct scatterlist sg[1];
sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
kfree(buf);
else if (kick)
virtqueue_kick(vq);
}
static void um_pci_handle_irq_message(struct virtqueue *vq,
struct virtio_pcidev_msg *msg)
{
struct virtio_device *vdev = vq->vdev;
struct um_pci_device *dev = vdev->priv;
if (!dev->irq)
return;
/* we should properly chain interrupts, but on ARCH=um we don't care */
switch (msg->op) {
case VIRTIO_PCIDEV_OP_INT:
generic_handle_irq(dev->irq);
break;
case VIRTIO_PCIDEV_OP_MSI:
/* our MSI message is just the interrupt number */
if (msg->size == sizeof(u32))
generic_handle_irq(le32_to_cpup((void *)msg->data));
else
generic_handle_irq(le16_to_cpup((void *)msg->data));
break;
case VIRTIO_PCIDEV_OP_PME:
/* nothing to do - we already woke up due to the message */
break;
default:
dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
break;
}
}
static void um_pci_cmd_vq_cb(struct virtqueue *vq)
{
struct virtio_device *vdev = vq->vdev;
struct um_pci_device *dev = vdev->priv;
void *cmd;
int len;
if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
return;
while ((cmd = virtqueue_get_buf(vq, &len))) {
if (WARN_ON(HANDLE_IS_NO_FREE(cmd)))
continue;
kfree(cmd);
}
}
static void um_pci_irq_vq_cb(struct virtqueue *vq)
{
struct virtio_pcidev_msg *msg;
int len;
while ((msg = virtqueue_get_buf(vq, &len))) {
if (len >= sizeof(*msg))
um_pci_handle_irq_message(vq, msg);
/* recycle the message buffer */
um_pci_irq_vq_addbuf(vq, msg, true);
}
}
#ifdef CONFIG_OF
/* Copied from arch/x86/kernel/devicetree.c */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
struct device_node *np;
for_each_node_by_type(np, "pci") {
const void *prop;
unsigned int bus_min;
prop = of_get_property(np, "bus-range", NULL);
if (!prop)
continue;
bus_min = be32_to_cpup(prop);
if (bus->number == bus_min)
return np;
}
return NULL;
}
#endif
static int um_pci_init_vqs(struct um_pci_device *dev)
{
struct virtqueue *vqs[2];
static const char *const names[2] = { "cmd", "irq" };
vq_callback_t *cbs[2] = { um_pci_cmd_vq_cb, um_pci_irq_vq_cb };
int err, i;
err = virtio_find_vqs(dev->vdev, 2, vqs, cbs, names, NULL);
if (err)
return err;
dev->cmd_vq = vqs[0];
dev->irq_vq = vqs[1];
virtio_device_ready(dev->vdev);
for (i = 0; i < NUM_IRQ_MSGS; i++) {
void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
if (msg)
um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
}
virtqueue_kick(dev->irq_vq);
return 0;
}
static void __um_pci_virtio_platform_remove(struct virtio_device *vdev,
struct um_pci_device *dev)
{
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
mutex_lock(&um_pci_mtx);
um_pci_platform_device = NULL;
mutex_unlock(&um_pci_mtx);
kfree(dev);
}
static int um_pci_virtio_platform_probe(struct virtio_device *vdev,
struct um_pci_device *dev)
{
int ret;
dev->platform = true;
mutex_lock(&um_pci_mtx);
if (um_pci_platform_device) {
mutex_unlock(&um_pci_mtx);
ret = -EBUSY;
goto out_free;
}
ret = um_pci_init_vqs(dev);
if (ret) {
mutex_unlock(&um_pci_mtx);
goto out_free;
}
um_pci_platform_device = dev;
mutex_unlock(&um_pci_mtx);
ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
if (ret)
__um_pci_virtio_platform_remove(vdev, dev);
return ret;
out_free:
kfree(dev);
return ret;
}
static int um_pci_virtio_probe(struct virtio_device *vdev)
{
struct um_pci_device *dev;
int i, free = -1;
int err = -ENOSPC;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->vdev = vdev;
vdev->priv = dev;
if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
return um_pci_virtio_platform_probe(vdev, dev);
mutex_lock(&um_pci_mtx);
for (i = 0; i < MAX_DEVICES; i++) {
if (um_pci_devices[i].dev)
continue;
free = i;
break;
}
if (free < 0)
goto error;
err = um_pci_init_vqs(dev);
if (err)
goto error;
dev->irq = irq_alloc_desc(numa_node_id());
if (dev->irq < 0) {
err = dev->irq;
goto err_reset;
}
um_pci_devices[free].dev = dev;
vdev->priv = dev;
mutex_unlock(&um_pci_mtx);
device_set_wakeup_enable(&vdev->dev, true);
/*
* In order to do suspend-resume properly, don't allow VQs
* to be suspended.
*/
virtio_uml_set_no_vq_suspend(vdev, true);
um_pci_rescan();
return 0;
err_reset:
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
error:
mutex_unlock(&um_pci_mtx);
kfree(dev);
return err;
}
static void um_pci_virtio_remove(struct virtio_device *vdev)
{
struct um_pci_device *dev = vdev->priv;
int i;
if (dev->platform) {
of_platform_depopulate(&vdev->dev);
__um_pci_virtio_platform_remove(vdev, dev);
return;
}
device_set_wakeup_enable(&vdev->dev, false);
mutex_lock(&um_pci_mtx);
for (i = 0; i < MAX_DEVICES; i++) {
if (um_pci_devices[i].dev != dev)
continue;
um_pci_devices[i].dev = NULL;
irq_free_desc(dev->irq);
break;
}
mutex_unlock(&um_pci_mtx);
if (i < MAX_DEVICES) {
struct pci_dev *pci_dev;
pci_dev = pci_get_slot(bridge->bus, i);
if (pci_dev)
pci_stop_and_remove_bus_device_locked(pci_dev);
}
/* Stop all virtqueues */
virtio_reset_device(vdev);
dev->cmd_vq = NULL;
dev->irq_vq = NULL;
vdev->config->del_vqs(vdev);
kfree(dev);
}
static struct virtio_device_id id_table[] = {
{ CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
{ 0 },
};
MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver um_pci_virtio_driver = {
.driver.name = "virtio-pci",
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = um_pci_virtio_probe,
.remove = um_pci_virtio_remove,
};
static struct resource virt_cfgspace_resource = {
.name = "PCI config space",
.start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
.end = 0xf0000000 - 1,
.flags = IORESOURCE_MEM,
};
static long um_pci_map_cfgspace(unsigned long offset, size_t size,
const struct logic_iomem_ops **ops,
void **priv)
{
if (WARN_ON(size > CFG_SPACE_SIZE || offset % CFG_SPACE_SIZE))
return -EINVAL;
if (offset / CFG_SPACE_SIZE < MAX_DEVICES) {
*ops = &um_pci_device_cfgspace_ops;
*priv = &um_pci_devices[offset / CFG_SPACE_SIZE];
return 0;
}
WARN(1, "cannot map offset 0x%lx/0x%zx\n", offset, size);
return -ENOENT;
}
static const struct logic_iomem_region_ops um_pci_cfgspace_ops = {
.map = um_pci_map_cfgspace,
};
static struct resource virt_iomem_resource = {
.name = "PCI iomem",
.start = 0xf0000000,
.end = 0xffffffff,
.flags = IORESOURCE_MEM,
};
struct um_pci_map_iomem_data {
unsigned long offset;
size_t size;
const struct logic_iomem_ops **ops;
void **priv;
long ret;
};
static int um_pci_map_iomem_walk(struct pci_dev *pdev, void *_data)
{
struct um_pci_map_iomem_data *data = _data;
struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
struct um_pci_device *dev;
int i;
if (!reg->dev)
return 0;
for (i = 0; i < ARRAY_SIZE(dev->resptr); i++) {
struct resource *r = &pdev->resource[i];
if ((r->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)
continue;
/*
* must be the whole or part of the resource,
* not allowed to only overlap
*/
if (data->offset < r->start || data->offset > r->end)
continue;
if (data->offset + data->size - 1 > r->end)
continue;
dev = reg->dev;
*data->ops = &um_pci_device_bar_ops;
dev->resptr[i] = i;
*data->priv = &dev->resptr[i];
data->ret = data->offset - r->start;
/* no need to continue */
return 1;
}
return 0;
}
static long um_pci_map_iomem(unsigned long offset, size_t size,
const struct logic_iomem_ops **ops,
void **priv)
{
struct um_pci_map_iomem_data data = {
/* we want the full address here */
.offset = offset + virt_iomem_resource.start,
.size = size,
.ops = ops,
.priv = priv,
.ret = -ENOENT,
};
pci_walk_bus(bridge->bus, um_pci_map_iomem_walk, &data);
return data.ret;
}
static const struct logic_iomem_region_ops um_pci_iomem_ops = {
.map = um_pci_map_iomem,
};
static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
/*
* This is a very low address and not actually valid 'physical' memory
* in UML, so we can simply map MSI(-X) vectors to there, it cannot be
* legitimately written to by the device in any other way.
* We use the (virtual) IRQ number here as the message to simplify the
* code that receives the message, where for now we simply trust the
* device to send the correct message.
*/
msg->address_hi = 0;
msg->address_lo = 0xa0000;
msg->data = data->irq;
}
static struct irq_chip um_pci_msi_bottom_irq_chip = {
.name = "UM virtio MSI",
.irq_compose_msi_msg = um_pci_compose_msi_msg,
};
static int um_pci_inner_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
unsigned long bit;
WARN_ON(nr_irqs != 1);
mutex_lock(&um_pci_mtx);
bit = find_first_zero_bit(um_pci_msi_used, MAX_MSI_VECTORS);
if (bit >= MAX_MSI_VECTORS) {
mutex_unlock(&um_pci_mtx);
return -ENOSPC;
}
set_bit(bit, um_pci_msi_used);
mutex_unlock(&um_pci_mtx);
irq_domain_set_info(domain, virq, bit, &um_pci_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
return 0;
}
static void um_pci_inner_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
mutex_lock(&um_pci_mtx);
if (!test_bit(d->hwirq, um_pci_msi_used))
pr_err("trying to free unused MSI#%lu\n", d->hwirq);
else
__clear_bit(d->hwirq, um_pci_msi_used);
mutex_unlock(&um_pci_mtx);
}
static const struct irq_domain_ops um_pci_inner_domain_ops = {
.alloc = um_pci_inner_domain_alloc,
.free = um_pci_inner_domain_free,
};
static struct irq_chip um_pci_msi_irq_chip = {
.name = "UM virtio PCIe MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
static struct msi_domain_info um_pci_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX,
.chip = &um_pci_msi_irq_chip,
};
static struct resource busn_resource = {
.name = "PCI busn",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUS,
};
static int um_pci_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
if (WARN_ON(!reg->dev))
return -EINVAL;
/* Yes, we map all pins to the same IRQ ... doesn't matter for now. */
return reg->dev->irq;
}
void *pci_root_bus_fwnode(struct pci_bus *bus)
{
return um_pci_fwnode;
}
static long um_pci_map_platform(unsigned long offset, size_t size,
const struct logic_iomem_ops **ops,
void **priv)
{
if (!um_pci_platform_device)
return -ENOENT;
*ops = &um_pci_device_bar_ops;
*priv = &um_pci_platform_device->resptr[0];
return 0;
}
static const struct logic_iomem_region_ops um_pci_platform_ops = {
.map = um_pci_map_platform,
};
static struct resource virt_platform_resource = {
.name = "platform",
.start = 0x10000000,
.end = 0x1fffffff,
.flags = IORESOURCE_MEM,
};
static int __init um_pci_init(void)
{
int err, i;
WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource,
&um_pci_cfgspace_ops));
WARN_ON(logic_iomem_add_region(&virt_iomem_resource,
&um_pci_iomem_ops));
WARN_ON(logic_iomem_add_region(&virt_platform_resource,
&um_pci_platform_ops));
if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
"No virtio device ID configured for PCI - no PCI support\n"))
return 0;
um_pci_msg_bufs = alloc_percpu(struct um_pci_message_buffer);
if (!um_pci_msg_bufs)
return -ENOMEM;
bridge = pci_alloc_host_bridge(0);
if (!bridge) {
err = -ENOMEM;
goto free;
}
um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci");
if (!um_pci_fwnode) {
err = -ENOMEM;
goto free;
}
um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS,
MAX_MSI_VECTORS, 0,
&um_pci_inner_domain_ops, NULL);
if (!um_pci_inner_domain) {
err = -ENOMEM;
goto free;
}
um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode,
&um_pci_msi_domain_info,
um_pci_inner_domain);
if (!um_pci_msi_domain) {
err = -ENOMEM;
goto free;
}
pci_add_resource(&bridge->windows, &virt_iomem_resource);
pci_add_resource(&bridge->windows, &busn_resource);
bridge->ops = &um_pci_ops;
bridge->map_irq = um_pci_map_irq;
for (i = 0; i < MAX_DEVICES; i++) {
resource_size_t start;
start = virt_cfgspace_resource.start + i * CFG_SPACE_SIZE;
um_pci_devices[i].iomem = ioremap(start, CFG_SPACE_SIZE);
if (WARN(!um_pci_devices[i].iomem, "failed to map %d\n", i)) {
err = -ENOMEM;
goto free;
}
}
err = pci_host_probe(bridge);
if (err)
goto free;
err = register_virtio_driver(&um_pci_virtio_driver);
if (err)
goto free;
return 0;
free:
if (um_pci_inner_domain)
irq_domain_remove(um_pci_inner_domain);
if (um_pci_fwnode)
irq_domain_free_fwnode(um_pci_fwnode);
if (bridge) {
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
}
free_percpu(um_pci_msg_bufs);
return err;
}
module_init(um_pci_init);
static void __exit um_pci_exit(void)
{
unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
free_percpu(um_pci_msg_bufs);
}
module_exit(um_pci_exit);
| linux-master | arch/um/drivers/virt-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Lennert Buytenhek ([email protected])
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <sys/un.h>
#include "mconsole.h"
static struct mconsole_command commands[] = {
/*
* With uts namespaces, uts information becomes process-specific, so
* we need a process context. If we try handling this in interrupt
* context, we may hit an exiting process without a valid uts
* namespace.
*/
{ "version", mconsole_version, MCONSOLE_PROC },
{ "halt", mconsole_halt, MCONSOLE_PROC },
{ "reboot", mconsole_reboot, MCONSOLE_PROC },
{ "config", mconsole_config, MCONSOLE_PROC },
{ "remove", mconsole_remove, MCONSOLE_PROC },
{ "sysrq", mconsole_sysrq, MCONSOLE_INTR },
{ "help", mconsole_help, MCONSOLE_INTR },
{ "cad", mconsole_cad, MCONSOLE_INTR },
{ "stop", mconsole_stop, MCONSOLE_PROC },
{ "go", mconsole_go, MCONSOLE_INTR },
{ "log", mconsole_log, MCONSOLE_INTR },
{ "proc", mconsole_proc, MCONSOLE_PROC },
{ "stack", mconsole_stack, MCONSOLE_INTR },
};
/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
static int mconsole_reply_v0(struct mc_request *req, char *reply)
{
struct iovec iov;
struct msghdr msg;
iov.iov_base = reply;
iov.iov_len = strlen(reply);
msg.msg_name = &(req->origin);
msg.msg_namelen = req->originlen;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
return sendmsg(req->originating_fd, &msg, 0);
}
static struct mconsole_command *mconsole_parse(struct mc_request *req)
{
struct mconsole_command *cmd;
int i;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
cmd = &commands[i];
if (!strncmp(req->request.data, cmd->command,
strlen(cmd->command))) {
return cmd;
}
}
return NULL;
}
#define MIN(a,b) ((a)<(b) ? (a):(b))
#define STRINGX(x) #x
#define STRING(x) STRINGX(x)
int mconsole_get_request(int fd, struct mc_request *req)
{
int len;
req->originlen = sizeof(req->origin);
req->len = recvfrom(fd, &req->request, sizeof(req->request), 0,
(struct sockaddr *) req->origin, &req->originlen);
if (req->len < 0)
return 0;
req->originating_fd = fd;
if (req->request.magic != MCONSOLE_MAGIC) {
/* Unversioned request */
len = MIN(sizeof(req->request.data) - 1,
strlen((char *) &req->request));
memmove(req->request.data, &req->request, len);
req->request.data[len] = '\0';
req->request.magic = MCONSOLE_MAGIC;
req->request.version = 0;
req->request.len = len;
mconsole_reply_v0(req, "ERR Version 0 mconsole clients are "
"not supported by this driver");
return 0;
}
if (req->request.len >= MCONSOLE_MAX_DATA) {
mconsole_reply(req, "Request too large", 1, 0);
return 0;
}
if (req->request.version != MCONSOLE_VERSION) {
mconsole_reply(req, "This driver only supports version "
STRING(MCONSOLE_VERSION) " clients", 1, 0);
}
req->request.data[req->request.len] = '\0';
req->cmd = mconsole_parse(req);
if (req->cmd == NULL) {
mconsole_reply(req, "Unknown command", 1, 0);
return 0;
}
return 1;
}
int mconsole_reply_len(struct mc_request *req, const char *str, int total,
int err, int more)
{
/*
* XXX This is a stack consumption problem. It'd be nice to
* make it global and serialize access to it, but there are a
* ton of callers to this function.
*/
struct mconsole_reply reply;
int len, n;
do {
reply.err = err;
/* err can only be true on the first packet */
err = 0;
len = MIN(total, MCONSOLE_MAX_DATA - 1);
if (len == total) reply.more = more;
else reply.more = 1;
memcpy(reply.data, str, len);
reply.data[len] = '\0';
total -= len;
str += len;
reply.len = len + 1;
len = sizeof(reply) + reply.len - sizeof(reply.data);
n = sendto(req->originating_fd, &reply, len, 0,
(struct sockaddr *) req->origin, req->originlen);
if (n < 0)
return -errno;
} while (total > 0);
return 0;
}
int mconsole_reply(struct mc_request *req, const char *str, int err, int more)
{
return mconsole_reply_len(req, str, strlen(str), err, more);
}
int mconsole_unlink_socket(void)
{
unlink(mconsole_socket_name);
return 0;
}
static int notify_sock = -1;
int mconsole_notify(char *sock_name, int type, const void *data, int len)
{
struct sockaddr_un target;
struct mconsole_notify packet;
int n, err = 0;
lock_notify();
if (notify_sock < 0) {
notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if (notify_sock < 0) {
err = -errno;
printk(UM_KERN_ERR "mconsole_notify - socket failed, "
"errno = %d\n", errno);
}
}
unlock_notify();
if (err)
return err;
target.sun_family = AF_UNIX;
strcpy(target.sun_path, sock_name);
packet.magic = MCONSOLE_MAGIC;
packet.version = MCONSOLE_VERSION;
packet.type = type;
len = (len > sizeof(packet.data)) ? sizeof(packet.data) : len;
packet.len = len;
memcpy(packet.data, data, len);
err = 0;
len = sizeof(packet) + packet.len - sizeof(packet.data);
n = sendto(notify_sock, &packet, len, 0, (struct sockaddr *) &target,
sizeof(target));
if (n < 0) {
err = -errno;
printk(UM_KERN_ERR "mconsole_notify - sendto failed, "
"errno = %d\n", errno);
}
return err;
}
| linux-master | arch/um/drivers/mconsole_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 - Cambridge Greys Limited
* Copyright (C) 2011 - 2014 Cisco Systems Inc
*/
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/virtio_net.h>
#include <linux/virtio_net.h>
#include <linux/virtio_byteorder.h>
#include <linux/netdev_features.h>
#include "vector_user.h"
#include "vector_kern.h"
#define GOOD_LINEAR 512
#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
struct gre_minimal_header {
uint16_t header;
uint16_t arptype;
};
struct uml_gre_data {
uint32_t rx_key;
uint32_t tx_key;
uint32_t sequence;
bool ipv6;
bool has_sequence;
bool pin_sequence;
bool checksum;
bool key;
struct gre_minimal_header expected_header;
uint32_t checksum_offset;
uint32_t key_offset;
uint32_t sequence_offset;
};
struct uml_l2tpv3_data {
uint64_t rx_cookie;
uint64_t tx_cookie;
uint64_t rx_session;
uint64_t tx_session;
uint32_t counter;
bool udp;
bool ipv6;
bool has_counter;
bool pin_counter;
bool cookie;
bool cookie_is_64;
uint32_t cookie_offset;
uint32_t session_offset;
uint32_t counter_offset;
};
static int l2tpv3_form_header(uint8_t *header,
struct sk_buff *skb, struct vector_private *vp)
{
struct uml_l2tpv3_data *td = vp->transport_data;
uint32_t *counter;
if (td->udp)
*(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
(*(uint32_t *) (header + td->session_offset)) = td->tx_session;
if (td->cookie) {
if (td->cookie_is_64)
(*(uint64_t *)(header + td->cookie_offset)) =
td->tx_cookie;
else
(*(uint32_t *)(header + td->cookie_offset)) =
td->tx_cookie;
}
if (td->has_counter) {
counter = (uint32_t *)(header + td->counter_offset);
if (td->pin_counter) {
*counter = 0;
} else {
td->counter++;
*counter = cpu_to_be32(td->counter);
}
}
return 0;
}
static int gre_form_header(uint8_t *header,
struct sk_buff *skb, struct vector_private *vp)
{
struct uml_gre_data *td = vp->transport_data;
uint32_t *sequence;
*((uint32_t *) header) = *((uint32_t *) &td->expected_header);
if (td->key)
(*(uint32_t *) (header + td->key_offset)) = td->tx_key;
if (td->has_sequence) {
sequence = (uint32_t *)(header + td->sequence_offset);
if (td->pin_sequence)
*sequence = 0;
else
*sequence = cpu_to_be32(++td->sequence);
}
return 0;
}
static int raw_form_header(uint8_t *header,
struct sk_buff *skb, struct vector_private *vp)
{
struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
virtio_net_hdr_from_skb(
skb,
vheader,
virtio_legacy_is_little_endian(),
false,
0
);
return 0;
}
static int l2tpv3_verify_header(
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
{
struct uml_l2tpv3_data *td = vp->transport_data;
uint32_t *session;
uint64_t cookie;
if ((!td->udp) && (!td->ipv6))
header += sizeof(struct iphdr) /* fix for ipv4 raw */;
/* we do not do a strict check for "data" packets as per
* the RFC spec because the pure IP spec does not have
* that anyway.
*/
if (td->cookie) {
if (td->cookie_is_64)
cookie = *(uint64_t *)(header + td->cookie_offset);
else
cookie = *(uint32_t *)(header + td->cookie_offset);
if (cookie != td->rx_cookie) {
if (net_ratelimit())
netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
return -1;
}
}
session = (uint32_t *) (header + td->session_offset);
if (*session != td->rx_session) {
if (net_ratelimit())
netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
return -1;
}
return 0;
}
static int gre_verify_header(
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
{
uint32_t key;
struct uml_gre_data *td = vp->transport_data;
if (!td->ipv6)
header += sizeof(struct iphdr) /* fix for ipv4 raw */;
if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
if (net_ratelimit())
netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
*((uint32_t *) &td->expected_header),
*((uint32_t *) header)
);
return -1;
}
if (td->key) {
key = (*(uint32_t *)(header + td->key_offset));
if (key != td->rx_key) {
if (net_ratelimit())
netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
key, td->rx_key);
return -1;
}
}
return 0;
}
static int raw_verify_header(
uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
{
struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
(vp->req_size != 65536)) {
if (net_ratelimit())
netdev_err(
vp->dev,
GSO_ERROR
);
}
if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
return 1;
virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
return 0;
}
static bool get_uint_param(
struct arglist *def, char *param, unsigned int *result)
{
char *arg = uml_vector_fetch_arg(def, param);
if (arg != NULL) {
if (kstrtoint(arg, 0, result) == 0)
return true;
}
return false;
}
static bool get_ulong_param(
struct arglist *def, char *param, unsigned long *result)
{
char *arg = uml_vector_fetch_arg(def, param);
if (arg != NULL) {
if (kstrtoul(arg, 0, result) == 0)
return true;
return true;
}
return false;
}
static int build_gre_transport_data(struct vector_private *vp)
{
struct uml_gre_data *td;
int temp_int;
int temp_rx;
int temp_tx;
vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
if (vp->transport_data == NULL)
return -ENOMEM;
td = vp->transport_data;
td->sequence = 0;
td->expected_header.arptype = GRE_IRB;
td->expected_header.header = 0;
vp->form_header = &gre_form_header;
vp->verify_header = &gre_verify_header;
vp->header_size = 4;
td->key_offset = 4;
td->sequence_offset = 4;
td->checksum_offset = 4;
td->ipv6 = false;
if (get_uint_param(vp->parsed, "v6", &temp_int)) {
if (temp_int > 0)
td->ipv6 = true;
}
td->key = false;
if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
td->key = true;
td->expected_header.header |= GRE_MODE_KEY;
td->rx_key = cpu_to_be32(temp_rx);
td->tx_key = cpu_to_be32(temp_tx);
vp->header_size += 4;
td->sequence_offset += 4;
} else {
return -EINVAL;
}
}
td->sequence = false;
if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
if (temp_int > 0) {
vp->header_size += 4;
td->has_sequence = true;
td->expected_header.header |= GRE_MODE_SEQUENCE;
if (get_uint_param(
vp->parsed, "pin_sequence", &temp_int)) {
if (temp_int > 0)
td->pin_sequence = true;
}
}
}
vp->rx_header_size = vp->header_size;
if (!td->ipv6)
vp->rx_header_size += sizeof(struct iphdr);
return 0;
}
static int build_l2tpv3_transport_data(struct vector_private *vp)
{
struct uml_l2tpv3_data *td;
int temp_int, temp_rxs, temp_txs;
unsigned long temp_rx;
unsigned long temp_tx;
vp->transport_data = kmalloc(
sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
if (vp->transport_data == NULL)
return -ENOMEM;
td = vp->transport_data;
vp->form_header = &l2tpv3_form_header;
vp->verify_header = &l2tpv3_verify_header;
td->counter = 0;
vp->header_size = 4;
td->session_offset = 0;
td->cookie_offset = 4;
td->counter_offset = 4;
td->ipv6 = false;
if (get_uint_param(vp->parsed, "v6", &temp_int)) {
if (temp_int > 0)
td->ipv6 = true;
}
if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
td->tx_session = cpu_to_be32(temp_txs);
td->rx_session = cpu_to_be32(temp_rxs);
} else {
return -EINVAL;
}
} else {
return -EINVAL;
}
td->cookie_is_64 = false;
if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
if (temp_int > 0)
td->cookie_is_64 = true;
}
td->cookie = false;
if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
td->cookie = true;
if (td->cookie_is_64) {
td->rx_cookie = cpu_to_be64(temp_rx);
td->tx_cookie = cpu_to_be64(temp_tx);
vp->header_size += 8;
td->counter_offset += 8;
} else {
td->rx_cookie = cpu_to_be32(temp_rx);
td->tx_cookie = cpu_to_be32(temp_tx);
vp->header_size += 4;
td->counter_offset += 4;
}
} else {
return -EINVAL;
}
}
td->has_counter = false;
if (get_uint_param(vp->parsed, "counter", &temp_int)) {
if (temp_int > 0) {
td->has_counter = true;
vp->header_size += 4;
if (get_uint_param(
vp->parsed, "pin_counter", &temp_int)) {
if (temp_int > 0)
td->pin_counter = true;
}
}
}
if (get_uint_param(vp->parsed, "udp", &temp_int)) {
if (temp_int > 0) {
td->udp = true;
vp->header_size += 4;
td->counter_offset += 4;
td->session_offset += 4;
td->cookie_offset += 4;
}
}
vp->rx_header_size = vp->header_size;
if ((!td->ipv6) && (!td->udp))
vp->rx_header_size += sizeof(struct iphdr);
return 0;
}
static int build_raw_transport_data(struct vector_private *vp)
{
if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
return -1;
vp->form_header = &raw_form_header;
vp->verify_header = &raw_verify_header;
vp->header_size = sizeof(struct virtio_net_hdr);
vp->rx_header_size = sizeof(struct virtio_net_hdr);
vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
vp->dev->features |=
(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_GRO);
netdev_info(
vp->dev,
"raw: using vnet headers for tso and tx/rx checksum"
);
}
return 0;
}
static int build_hybrid_transport_data(struct vector_private *vp)
{
if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
vp->form_header = &raw_form_header;
vp->verify_header = &raw_verify_header;
vp->header_size = sizeof(struct virtio_net_hdr);
vp->rx_header_size = sizeof(struct virtio_net_hdr);
vp->dev->hw_features |=
(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
vp->dev->features |=
(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
netdev_info(
vp->dev,
"tap/raw hybrid: using vnet headers for tso and tx/rx checksum"
);
} else {
return 0; /* do not try to enable tap too if raw failed */
}
if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
return 0;
return -1;
}
static int build_tap_transport_data(struct vector_private *vp)
{
/* "Pure" tap uses the same fd for rx and tx */
if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) {
vp->form_header = &raw_form_header;
vp->verify_header = &raw_verify_header;
vp->header_size = sizeof(struct virtio_net_hdr);
vp->rx_header_size = sizeof(struct virtio_net_hdr);
vp->dev->hw_features |=
(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
vp->dev->features |=
(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
netdev_info(
vp->dev,
"tap: using vnet headers for tso and tx/rx checksum"
);
return 0;
}
return -1;
}
static int build_bess_transport_data(struct vector_private *vp)
{
vp->form_header = NULL;
vp->verify_header = NULL;
vp->header_size = 0;
vp->rx_header_size = 0;
return 0;
}
int build_transport_data(struct vector_private *vp)
{
char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
return build_gre_transport_data(vp);
if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
return build_l2tpv3_transport_data(vp);
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
return build_raw_transport_data(vp);
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
return build_tap_transport_data(vp);
if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
return build_hybrid_transport_data(vp);
if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
return build_bess_transport_data(vp);
return 0;
}
| linux-master | arch/um/drivers/vector_transports.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Cambridge Greys Ltd
* Copyright (C) 2015-2016 Anton Ivanov ([email protected])
* Copyright (C) 2000 Jeff Dike ([email protected])
*/
/* 2001-09-28...2002-04-17
* Partition stuff by [email protected]
* old style ubd by setting UBD_SHIFT to 0
* 2002-09-27...2002-10-18 massive tinkering for 2.5
* partitions have changed in 2.5
* 2003-01-29 more tinkering for 2.5.59-1
* This should now address the sysfs problems and has
* the symlink for devfs to allow for booting with
* the common /dev/ubd/discX/... names rather than
* only /dev/ubdN/discN this version also has lots of
* clean ups preparing for ubd-many.
* James McMechan
*/
#define UBD_SHIFT 4
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/major.h>
#include <linux/cdrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <asm/tlbflush.h>
#include <kern_util.h>
#include "mconsole_kern.h"
#include <init.h>
#include <irq_kern.h>
#include "ubd.h"
#include <os.h>
#include "cow.h"
/* Max request size is determined by sector mask - 32K */
#define UBD_MAX_REQUEST (8 * sizeof(long))
struct io_desc {
char *buffer;
unsigned long length;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
};
struct io_thread_req {
struct request *req;
int fds[2];
unsigned long offsets[2];
unsigned long long offset;
int sectorsize;
int error;
int desc_cnt;
/* io_desc has to be the last element of the struct */
struct io_desc io_desc[];
};
static struct io_thread_req * (*irq_req_buffer)[];
static struct io_thread_req *irq_remainder;
static int irq_remainder_size;
static struct io_thread_req * (*io_req_buffer)[];
static struct io_thread_req *io_remainder;
static int io_remainder_size;
static inline int ubd_test_bit(__u64 bit, unsigned char *data)
{
__u64 n;
int bits, off;
bits = sizeof(data[0]) * 8;
n = bit / bits;
off = bit % bits;
return (data[n] & (1 << off)) != 0;
}
static inline void ubd_set_bit(__u64 bit, unsigned char *data)
{
__u64 n;
int bits, off;
bits = sizeof(data[0]) * 8;
n = bit / bits;
off = bit % bits;
data[n] |= (1 << off);
}
/*End stuff from ubd_user.h*/
#define DRIVER_NAME "uml-blkdev"
static DEFINE_MUTEX(ubd_lock);
static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */
static int ubd_open(struct gendisk *disk, blk_mode_t mode);
static void ubd_release(struct gendisk *disk);
static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
#define MAX_DEV (16)
static const struct block_device_operations ubd_blops = {
.owner = THIS_MODULE,
.open = ubd_open,
.release = ubd_release,
.ioctl = ubd_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = ubd_getgeo,
};
/* Protected by ubd_lock */
static struct gendisk *ubd_gendisk[MAX_DEV];
#ifdef CONFIG_BLK_DEV_UBD_SYNC
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \
.cl = 1 })
#else
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \
.cl = 1 })
#endif
static struct openflags global_openflags = OPEN_FLAGS;
struct cow {
/* backing file name */
char *file;
/* backing file fd */
int fd;
unsigned long *bitmap;
unsigned long bitmap_len;
int bitmap_offset;
int data_offset;
};
#define MAX_SG 64
struct ubd {
/* name (and fd, below) of the file opened for writing, either the
* backing or the cow file. */
char *file;
char *serial;
int count;
int fd;
__u64 size;
struct openflags boot_openflags;
struct openflags openflags;
unsigned shared:1;
unsigned no_cow:1;
unsigned no_trim:1;
struct cow cow;
struct platform_device pdev;
struct request_queue *queue;
struct blk_mq_tag_set tag_set;
spinlock_t lock;
};
#define DEFAULT_COW { \
.file = NULL, \
.fd = -1, \
.bitmap = NULL, \
.bitmap_offset = 0, \
.data_offset = 0, \
}
#define DEFAULT_UBD { \
.file = NULL, \
.serial = NULL, \
.count = 0, \
.fd = -1, \
.size = -1, \
.boot_openflags = OPEN_FLAGS, \
.openflags = OPEN_FLAGS, \
.no_cow = 0, \
.no_trim = 0, \
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
}
/* Protected by ubd_lock */
static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD };
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
static int fake_ide_setup(char *str)
{
pr_warn("The fake_ide option has been removed\n");
return 1;
}
__setup("fake_ide", fake_ide_setup);
__uml_help(fake_ide_setup,
"fake_ide\n"
" Obsolete stub.\n\n"
);
static int parse_unit(char **ptr)
{
char *str = *ptr, *end;
int n = -1;
if(isdigit(*str)) {
n = simple_strtoul(str, &end, 0);
if(end == str)
return -1;
*ptr = end;
}
else if (('a' <= *str) && (*str <= 'z')) {
n = *str - 'a';
str++;
*ptr = str;
}
return n;
}
/* If *index_out == -1 at exit, the passed option was a general one;
* otherwise, the str pointer is used (and owned) inside ubd_devs array, so it
* should not be freed on exit.
*/
static int ubd_setup_common(char *str, int *index_out, char **error_out)
{
struct ubd *ubd_dev;
struct openflags flags = global_openflags;
char *file, *backing_file, *serial;
int n, err = 0, i;
if(index_out) *index_out = -1;
n = *str;
if(n == '='){
str++;
if(!strcmp(str, "sync")){
global_openflags = of_sync(global_openflags);
return err;
}
pr_warn("fake major not supported any more\n");
return 0;
}
n = parse_unit(&str);
if(n < 0){
*error_out = "Couldn't parse device number";
return -EINVAL;
}
if(n >= MAX_DEV){
*error_out = "Device number out of range";
return 1;
}
err = -EBUSY;
mutex_lock(&ubd_lock);
ubd_dev = &ubd_devs[n];
if(ubd_dev->file != NULL){
*error_out = "Device is already configured";
goto out;
}
if (index_out)
*index_out = n;
err = -EINVAL;
for (i = 0; i < sizeof("rscdt="); i++) {
switch (*str) {
case 'r':
flags.w = 0;
break;
case 's':
flags.s = 1;
break;
case 'd':
ubd_dev->no_cow = 1;
break;
case 'c':
ubd_dev->shared = 1;
break;
case 't':
ubd_dev->no_trim = 1;
break;
case '=':
str++;
goto break_loop;
default:
*error_out = "Expected '=' or flag letter "
"(r, s, c, t or d)";
goto out;
}
str++;
}
if (*str == '=')
*error_out = "Too many flags specified";
else
*error_out = "Missing '='";
goto out;
break_loop:
file = strsep(&str, ",:");
if (*file == '\0')
file = NULL;
backing_file = strsep(&str, ",:");
if (backing_file && *backing_file == '\0')
backing_file = NULL;
serial = strsep(&str, ",:");
if (serial && *serial == '\0')
serial = NULL;
if (backing_file && ubd_dev->no_cow) {
*error_out = "Can't specify both 'd' and a cow file";
goto out;
}
err = 0;
ubd_dev->file = file;
ubd_dev->cow.file = backing_file;
ubd_dev->serial = serial;
ubd_dev->boot_openflags = flags;
out:
mutex_unlock(&ubd_lock);
return err;
}
static int ubd_setup(char *str)
{
char *error;
int err;
err = ubd_setup_common(str, NULL, &error);
if(err)
printk(KERN_ERR "Failed to initialize device with \"%s\" : "
"%s\n", str, error);
return 1;
}
__setup("ubd", ubd_setup);
__uml_help(ubd_setup,
"ubd<n><flags>=<filename>[(:|,)<filename2>][(:|,)<serial>]\n"
" This is used to associate a device with a file in the underlying\n"
" filesystem. When specifying two filenames, the first one is the\n"
" COW name and the second is the backing file name. As separator you can\n"
" use either a ':' or a ',': the first one allows writing things like;\n"
" ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n"
" while with a ',' the shell would not expand the 2nd '~'.\n"
" When using only one filename, UML will detect whether to treat it like\n"
" a COW file or a backing file. To override this detection, add the 'd'\n"
" flag:\n"
" ubd0d=BackingFile\n"
" Usually, there is a filesystem in the file, but \n"
" that's not required. Swap devices containing swap files can be\n"
" specified like this. Also, a file which doesn't contain a\n"
" filesystem can have its contents read in the virtual \n"
" machine by running 'dd' on the device. <n> must be in the range\n"
" 0 to 7. Appending an 'r' to the number will cause that device\n"
" to be mounted read-only. For example ubd1r=./ext_fs. Appending\n"
" an 's' will cause data to be written to disk on the host immediately.\n"
" 'c' will cause the device to be treated as being shared between multiple\n"
" UMLs and file locking will be turned off - this is appropriate for a\n"
" cluster filesystem and inappropriate at almost all other times.\n\n"
" 't' will disable trim/discard support on the device (enabled by default).\n\n"
" An optional device serial number can be exposed using the serial parameter\n"
" on the cmdline which is exposed as a sysfs entry. This is particularly\n"
" useful when a unique number should be given to the device. Note when\n"
" specifying a label, the filename2 must be also presented. It can be\n"
" an empty string, in which case the backing file is not used:\n"
" ubd0=File,,Serial\n"
);
static int udb_setup(char *str)
{
printk("udb%s specified on command line is almost certainly a ubd -> "
"udb TYPO\n", str);
return 1;
}
__setup("udb", udb_setup);
__uml_help(udb_setup,
"udb\n"
" This option is here solely to catch ubd -> udb typos, which can be\n"
" to impossible to catch visually unless you specifically look for\n"
" them. The only result of any option starting with 'udb' is an error\n"
" in the boot output.\n\n"
);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
/* Function to read several request pointers at a time
* handling fractional reads if (and as) needed
*/
static int bulk_req_safe_read(
int fd,
struct io_thread_req * (*request_buffer)[],
struct io_thread_req **remainder,
int *remainder_size,
int max_recs
)
{
int n = 0;
int res = 0;
if (*remainder_size > 0) {
memmove(
(char *) request_buffer,
(char *) remainder, *remainder_size
);
n = *remainder_size;
}
res = os_read_file(
fd,
((char *) request_buffer) + *remainder_size,
sizeof(struct io_thread_req *)*max_recs
- *remainder_size
);
if (res > 0) {
n += res;
if ((n % sizeof(struct io_thread_req *)) > 0) {
/*
* Read somehow returned not a multiple of dword
* theoretically possible, but never observed in the
* wild, so read routine must be able to handle it
*/
*remainder_size = n % sizeof(struct io_thread_req *);
WARN(*remainder_size > 0, "UBD IPC read returned a partial result");
memmove(
remainder,
((char *) request_buffer) +
(n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *),
*remainder_size
);
n = n - *remainder_size;
}
} else {
n = res;
}
return n;
}
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
{
int n;
int count;
while(1){
n = bulk_req_safe_read(
thread_fd,
irq_req_buffer,
&irq_remainder,
&irq_remainder_size,
UBD_REQ_BUFFER_SIZE
);
if (n < 0) {
if(n == -EAGAIN)
break;
printk(KERN_ERR "spurious interrupt in ubd_handler, "
"err = %d\n", -n);
return;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *io_req = (*irq_req_buffer)[count];
if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
blk_queue_max_discard_sectors(io_req->req->q, 0);
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
}
blk_mq_end_request(io_req->req, io_req->error);
kfree(io_req);
}
}
}
static irqreturn_t ubd_intr(int irq, void *dev)
{
ubd_handler();
return IRQ_HANDLED;
}
/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
static void kill_io_thread(void)
{
if(io_pid != -1)
os_kill_process(io_pid, 1);
}
__uml_exitcall(kill_io_thread);
static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
{
char *file;
int fd;
int err;
__u32 version;
__u32 align;
char *backing_file;
time64_t mtime;
unsigned long long size;
int sector_size;
int bitmap_offset;
if (ubd_dev->file && ubd_dev->cow.file) {
file = ubd_dev->cow.file;
goto out;
}
fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0);
if (fd < 0)
return fd;
err = read_cow_header(file_reader, &fd, &version, &backing_file, \
&mtime, &size, §or_size, &align, &bitmap_offset);
os_close_file(fd);
if(err == -EINVAL)
file = ubd_dev->file;
else
file = backing_file;
out:
return os_file_size(file, size_out);
}
static int read_cow_bitmap(int fd, void *buf, int offset, int len)
{
int err;
err = os_pread_file(fd, buf, len, offset);
if (err < 0)
return err;
return 0;
}
static int backing_file_mismatch(char *file, __u64 size, time64_t mtime)
{
time64_t modtime;
unsigned long long actual;
int err;
err = os_file_modtime(file, &modtime);
if (err < 0) {
printk(KERN_ERR "Failed to get modification time of backing "
"file \"%s\", err = %d\n", file, -err);
return err;
}
err = os_file_size(file, &actual);
if (err < 0) {
printk(KERN_ERR "Failed to get size of backing file \"%s\", "
"err = %d\n", file, -err);
return err;
}
if (actual != size) {
/*__u64 can be a long on AMD64 and with %lu GCC complains; so
* the typecast.*/
printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header "
"vs backing file\n", (unsigned long long) size, actual);
return -EINVAL;
}
if (modtime != mtime) {
printk(KERN_ERR "mtime mismatch (%lld vs %lld) of COW header vs "
"backing file\n", mtime, modtime);
return -EINVAL;
}
return 0;
}
static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
{
struct uml_stat buf1, buf2;
int err;
if (from_cmdline == NULL)
return 0;
if (!strcmp(from_cmdline, from_cow))
return 0;
err = os_stat_file(from_cmdline, &buf1);
if (err < 0) {
printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline,
-err);
return 0;
}
err = os_stat_file(from_cow, &buf2);
if (err < 0) {
printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow,
-err);
return 1;
}
if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
return 0;
printk(KERN_ERR "Backing file mismatch - \"%s\" requested, "
"\"%s\" specified in COW header of \"%s\"\n",
from_cmdline, from_cow, cow);
return 1;
}
static int open_ubd_file(char *file, struct openflags *openflags, int shared,
char **backing_file_out, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out,
int *create_cow_out)
{
time64_t mtime;
unsigned long long size;
__u32 version, align;
char *backing_file;
int fd, err, sectorsize, asked_switch, mode = 0644;
fd = os_open_file(file, *openflags, mode);
if (fd < 0) {
if ((fd == -ENOENT) && (create_cow_out != NULL))
*create_cow_out = 1;
if (!openflags->w ||
((fd != -EROFS) && (fd != -EACCES)))
return fd;
openflags->w = 0;
fd = os_open_file(file, *openflags, mode);
if (fd < 0)
return fd;
}
if (shared)
printk(KERN_INFO "Not locking \"%s\" on the host\n", file);
else {
err = os_lock_file(fd, openflags->w);
if (err < 0) {
printk(KERN_ERR "Failed to lock '%s', err = %d\n",
file, -err);
goto out_close;
}
}
/* Successful return case! */
if (backing_file_out == NULL)
return fd;
err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
&size, §orsize, &align, bitmap_offset_out);
if (err && (*backing_file_out != NULL)) {
printk(KERN_ERR "Failed to read COW header from COW file "
"\"%s\", errno = %d\n", file, -err);
goto out_close;
}
if (err)
return fd;
asked_switch = path_requires_switch(*backing_file_out, backing_file,
file);
/* Allow switching only if no mismatch. */
if (asked_switch && !backing_file_mismatch(*backing_file_out, size,
mtime)) {
printk(KERN_ERR "Switching backing file to '%s'\n",
*backing_file_out);
err = write_cow_header(file, fd, *backing_file_out,
sectorsize, align, &size);
if (err) {
printk(KERN_ERR "Switch failed, errno = %d\n", -err);
goto out_close;
}
} else {
*backing_file_out = backing_file;
err = backing_file_mismatch(*backing_file_out, size, mtime);
if (err)
goto out_close;
}
cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
bitmap_len_out, data_offset_out);
return fd;
out_close:
os_close_file(fd);
return err;
}
static int create_cow_file(char *cow_file, char *backing_file,
struct openflags flags,
int sectorsize, int alignment, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out)
{
int err, fd;
flags.c = 1;
fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
if (fd < 0) {
err = fd;
printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n",
cow_file, -err);
goto out;
}
err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
bitmap_offset_out, bitmap_len_out,
data_offset_out);
if (!err)
return fd;
os_close_file(fd);
out:
return err;
}
static void ubd_close_dev(struct ubd *ubd_dev)
{
os_close_file(ubd_dev->fd);
if(ubd_dev->cow.file == NULL)
return;
os_close_file(ubd_dev->cow.fd);
vfree(ubd_dev->cow.bitmap);
ubd_dev->cow.bitmap = NULL;
}
static int ubd_open_dev(struct ubd *ubd_dev)
{
struct openflags flags;
char **back_ptr;
int err, create_cow, *create_ptr;
int fd;
ubd_dev->openflags = ubd_dev->boot_openflags;
create_cow = 0;
create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL;
back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file;
fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared,
back_ptr, &ubd_dev->cow.bitmap_offset,
&ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset,
create_ptr);
if((fd == -ENOENT) && create_cow){
fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file,
ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE,
&ubd_dev->cow.bitmap_offset,
&ubd_dev->cow.bitmap_len,
&ubd_dev->cow.data_offset);
if(fd >= 0){
printk(KERN_INFO "Creating \"%s\" as COW file for "
"\"%s\"\n", ubd_dev->file, ubd_dev->cow.file);
}
}
if(fd < 0){
printk("Failed to open '%s', errno = %d\n", ubd_dev->file,
-fd);
return fd;
}
ubd_dev->fd = fd;
if(ubd_dev->cow.file != NULL){
blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
err = -ENOMEM;
ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
if(ubd_dev->cow.bitmap == NULL){
printk(KERN_ERR "Failed to vmalloc COW bitmap\n");
goto error;
}
flush_tlb_kernel_vm();
err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap,
ubd_dev->cow.bitmap_offset,
ubd_dev->cow.bitmap_len);
if(err < 0)
goto error;
flags = ubd_dev->openflags;
flags.w = 0;
err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL,
NULL, NULL, NULL, NULL);
if(err < 0) goto error;
ubd_dev->cow.fd = err;
}
if (ubd_dev->no_trim == 0) {
ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE;
blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
return 0;
error:
os_close_file(ubd_dev->fd);
return err;
}
static void ubd_device_release(struct device *dev)
{
struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_mq_free_tag_set(&ubd_dev->tag_set);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
}
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct ubd *ubd_dev = disk->private_data;
if (!ubd_dev)
return 0;
return sprintf(buf, "%s", ubd_dev->serial);
}
static DEVICE_ATTR_RO(serial);
static struct attribute *ubd_attrs[] = {
&dev_attr_serial.attr,
NULL,
};
static umode_t ubd_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
return a->mode;
}
static const struct attribute_group ubd_attr_group = {
.attrs = ubd_attrs,
.is_visible = ubd_attrs_are_visible,
};
static const struct attribute_group *ubd_attr_groups[] = {
&ubd_attr_group,
NULL,
};
static int ubd_disk_register(int major, u64 size, int unit,
struct gendisk *disk)
{
disk->major = major;
disk->first_minor = unit << UBD_SHIFT;
disk->minors = 1 << UBD_SHIFT;
disk->fops = &ubd_blops;
set_capacity(disk, size / 512);
sprintf(disk->disk_name, "ubd%c", 'a' + unit);
ubd_devs[unit].pdev.id = unit;
ubd_devs[unit].pdev.name = DRIVER_NAME;
ubd_devs[unit].pdev.dev.release = ubd_device_release;
dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
return device_add_disk(&ubd_devs[unit].pdev.dev, disk, ubd_attr_groups);
}
#define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE))
static const struct blk_mq_ops ubd_mq_ops = {
.queue_rq = ubd_queue_rq,
};
static int ubd_add(int n, char **error_out)
{
struct ubd *ubd_dev = &ubd_devs[n];
struct gendisk *disk;
int err = 0;
if(ubd_dev->file == NULL)
goto out;
err = ubd_file_size(ubd_dev, &ubd_dev->size);
if(err < 0){
*error_out = "Couldn't determine size of device's file";
goto out;
}
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
ubd_dev->tag_set.ops = &ubd_mq_ops;
ubd_dev->tag_set.queue_depth = 64;
ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ubd_dev->tag_set.driver_data = ubd_dev;
ubd_dev->tag_set.nr_hw_queues = 1;
err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
if (err)
goto out;
disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_cleanup_tags;
}
ubd_dev->queue = disk->queue;
blk_queue_write_cache(ubd_dev->queue, true, false);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk);
if (err)
goto out_cleanup_disk;
ubd_gendisk[n] = disk;
return 0;
out_cleanup_disk:
put_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&ubd_dev->tag_set);
out:
return err;
}
static int ubd_config(char *str, char **error_out)
{
int n, ret;
/* This string is possibly broken up and stored, so it's only
* freed if ubd_setup_common fails, or if only general options
* were set.
*/
str = kstrdup(str, GFP_KERNEL);
if (str == NULL) {
*error_out = "Failed to allocate memory";
return -ENOMEM;
}
ret = ubd_setup_common(str, &n, error_out);
if (ret)
goto err_free;
if (n == -1) {
ret = 0;
goto err_free;
}
mutex_lock(&ubd_lock);
ret = ubd_add(n, error_out);
if (ret)
ubd_devs[n].file = NULL;
mutex_unlock(&ubd_lock);
out:
return ret;
err_free:
kfree(str);
goto out;
}
static int ubd_get_config(char *name, char *str, int size, char **error_out)
{
struct ubd *ubd_dev;
int n, len = 0;
n = parse_unit(&name);
if((n >= MAX_DEV) || (n < 0)){
*error_out = "ubd_get_config : device number out of range";
return -1;
}
ubd_dev = &ubd_devs[n];
mutex_lock(&ubd_lock);
if(ubd_dev->file == NULL){
CONFIG_CHUNK(str, size, len, "", 1);
goto out;
}
CONFIG_CHUNK(str, size, len, ubd_dev->file, 0);
if(ubd_dev->cow.file != NULL){
CONFIG_CHUNK(str, size, len, ",", 0);
CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1);
}
else CONFIG_CHUNK(str, size, len, "", 1);
out:
mutex_unlock(&ubd_lock);
return len;
}
static int ubd_id(char **str, int *start_out, int *end_out)
{
int n;
n = parse_unit(str);
*start_out = 0;
*end_out = MAX_DEV - 1;
return n;
}
static int ubd_remove(int n, char **error_out)
{
struct gendisk *disk = ubd_gendisk[n];
struct ubd *ubd_dev;
int err = -ENODEV;
mutex_lock(&ubd_lock);
ubd_dev = &ubd_devs[n];
if(ubd_dev->file == NULL)
goto out;
/* you cannot remove a open disk */
err = -EBUSY;
if(ubd_dev->count > 0)
goto out;
ubd_gendisk[n] = NULL;
if(disk != NULL){
del_gendisk(disk);
put_disk(disk);
}
err = 0;
platform_device_unregister(&ubd_dev->pdev);
out:
mutex_unlock(&ubd_lock);
return err;
}
/* All these are called by mconsole in process context and without
* ubd-specific locks. The structure itself is const except for .list.
*/
static struct mc_device ubd_mc = {
.list = LIST_HEAD_INIT(ubd_mc.list),
.name = "ubd",
.config = ubd_config,
.get_config = ubd_get_config,
.id = ubd_id,
.remove = ubd_remove,
};
static int __init ubd_mc_init(void)
{
mconsole_register_dev(&ubd_mc);
return 0;
}
__initcall(ubd_mc_init);
static int __init ubd0_init(void)
{
struct ubd *ubd_dev = &ubd_devs[0];
mutex_lock(&ubd_lock);
if(ubd_dev->file == NULL)
ubd_dev->file = "root_fs";
mutex_unlock(&ubd_lock);
return 0;
}
__initcall(ubd0_init);
/* Used in ubd_init, which is an initcall */
static struct platform_driver ubd_driver = {
.driver = {
.name = DRIVER_NAME,
},
};
static int __init ubd_init(void)
{
char *error;
int i, err;
if (register_blkdev(UBD_MAJOR, "ubd"))
return -1;
irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
GFP_KERNEL
);
irq_remainder = 0;
if (irq_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1;
}
io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
GFP_KERNEL
);
io_remainder = 0;
if (io_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1;
}
platform_driver_register(&ubd_driver);
mutex_lock(&ubd_lock);
for (i = 0; i < MAX_DEV; i++){
err = ubd_add(i, &error);
if(err)
printk(KERN_ERR "Failed to initialize ubd device %d :"
"%s\n", i, error);
}
mutex_unlock(&ubd_lock);
return 0;
}
late_initcall(ubd_init);
static int __init ubd_driver_init(void){
unsigned long stack;
int err;
/* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
if(global_openflags.s){
printk(KERN_INFO "ubd: Synchronous mode\n");
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
if(io_pid < 0){
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
"falling back to synchronous I/O\n", -io_pid);
io_pid = -1;
return 0;
}
err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
0, "ubd", ubd_devs);
if(err < 0)
printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
return 0;
}
device_initcall(ubd_driver_init);
static int ubd_open(struct gendisk *disk, blk_mode_t mode)
{
struct ubd *ubd_dev = disk->private_data;
int err = 0;
mutex_lock(&ubd_mutex);
if(ubd_dev->count == 0){
err = ubd_open_dev(ubd_dev);
if(err){
printk(KERN_ERR "%s: Can't open \"%s\": errno = %d\n",
disk->disk_name, ubd_dev->file, -err);
goto out;
}
}
ubd_dev->count++;
set_disk_ro(disk, !ubd_dev->openflags.w);
out:
mutex_unlock(&ubd_mutex);
return err;
}
static void ubd_release(struct gendisk *disk)
{
struct ubd *ubd_dev = disk->private_data;
mutex_lock(&ubd_mutex);
if(--ubd_dev->count == 0)
ubd_close_dev(ubd_dev);
mutex_unlock(&ubd_mutex);
}
static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
__u64 *cow_offset, unsigned long *bitmap,
__u64 bitmap_offset, unsigned long *bitmap_words,
__u64 bitmap_len)
{
__u64 sector = io_offset >> SECTOR_SHIFT;
int i, update_bitmap = 0;
for (i = 0; i < length >> SECTOR_SHIFT; i++) {
if(cow_mask != NULL)
ubd_set_bit(i, (unsigned char *) cow_mask);
if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
continue;
update_bitmap = 1;
ubd_set_bit(sector + i, (unsigned char *) bitmap);
}
if(!update_bitmap)
return;
*cow_offset = sector / (sizeof(unsigned long) * 8);
/* This takes care of the case where we're exactly at the end of the
* device, and *cow_offset + 1 is off the end. So, just back it up
* by one word. Thanks to Lynn Kerby for the fix and James McMechan
* for the original diagnosis.
*/
if (*cow_offset == (DIV_ROUND_UP(bitmap_len,
sizeof(unsigned long)) - 1))
(*cow_offset)--;
bitmap_words[0] = bitmap[*cow_offset];
bitmap_words[1] = bitmap[*cow_offset + 1];
*cow_offset *= sizeof(unsigned long);
*cow_offset += bitmap_offset;
}
static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
unsigned long offset, unsigned long *bitmap,
__u64 bitmap_offset, __u64 bitmap_len)
{
__u64 sector = offset >> SECTOR_SHIFT;
int i;
if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT)
panic("Operation too long");
if (req_op(req->req) == REQ_OP_READ) {
for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
ubd_set_bit(i, (unsigned char *)
&segment->sector_mask);
}
} else {
cowify_bitmap(offset, segment->length, &segment->sector_mask,
&segment->cow_offset, bitmap, bitmap_offset,
segment->bitmap_words, bitmap_len);
}
}
static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
struct request *req)
{
struct bio_vec bvec;
struct req_iterator iter;
int i = 0;
unsigned long byte_offset = io_req->offset;
enum req_op op = req_op(req);
if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
io_req->io_desc[0].buffer = NULL;
io_req->io_desc[0].length = blk_rq_bytes(req);
} else {
rq_for_each_segment(bvec, req, iter) {
BUG_ON(i >= io_req->desc_cnt);
io_req->io_desc[i].buffer = bvec_virt(&bvec);
io_req->io_desc[i].length = bvec.bv_len;
i++;
}
}
if (dev->cow.file) {
for (i = 0; i < io_req->desc_cnt; i++) {
cowify_req(io_req, &io_req->io_desc[i], byte_offset,
dev->cow.bitmap, dev->cow.bitmap_offset,
dev->cow.bitmap_len);
byte_offset += io_req->io_desc[i].length;
}
}
}
static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
int desc_cnt)
{
struct io_thread_req *io_req;
int i;
io_req = kmalloc(sizeof(*io_req) +
(desc_cnt * sizeof(struct io_desc)),
GFP_ATOMIC);
if (!io_req)
return NULL;
io_req->req = req;
if (dev->cow.file)
io_req->fds[0] = dev->cow.fd;
else
io_req->fds[0] = dev->fd;
io_req->error = 0;
io_req->sectorsize = SECTOR_SIZE;
io_req->fds[1] = dev->fd;
io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset;
for (i = 0 ; i < desc_cnt; i++) {
io_req->io_desc[i].sector_mask = 0;
io_req->io_desc[i].cow_offset = -1;
}
return io_req;
}
static int ubd_submit_request(struct ubd *dev, struct request *req)
{
int segs = 0;
struct io_thread_req *io_req;
int ret;
enum req_op op = req_op(req);
if (op == REQ_OP_FLUSH)
segs = 0;
else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
segs = 1;
else
segs = blk_rq_nr_phys_segments(req);
io_req = ubd_alloc_req(dev, req, segs);
if (!io_req)
return -ENOMEM;
io_req->desc_cnt = segs;
if (segs)
ubd_map_req(dev, io_req, req);
ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
if (ret != sizeof(io_req)) {
if (ret != -EAGAIN)
pr_err("write to io thread failed: %d\n", -ret);
kfree(io_req);
}
return ret;
}
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct ubd *ubd_dev = hctx->queue->queuedata;
struct request *req = bd->rq;
int ret = 0, res = BLK_STS_OK;
blk_mq_start_request(req);
spin_lock_irq(&ubd_dev->lock);
switch (req_op(req)) {
case REQ_OP_FLUSH:
case REQ_OP_READ:
case REQ_OP_WRITE:
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
ret = ubd_submit_request(ubd_dev, req);
break;
default:
WARN_ON_ONCE(1);
res = BLK_STS_NOTSUPP;
}
spin_unlock_irq(&ubd_dev->lock);
if (ret < 0) {
if (ret == -ENOMEM)
res = BLK_STS_RESOURCE;
else
res = BLK_STS_DEV_RESOURCE;
}
return res;
}
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct ubd *ubd_dev = bdev->bd_disk->private_data;
geo->heads = 128;
geo->sectors = 32;
geo->cylinders = ubd_dev->size / (128 * 32 * 512);
return 0;
}
static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct ubd *ubd_dev = bdev->bd_disk->private_data;
u16 ubd_id[ATA_ID_WORDS];
switch (cmd) {
struct cdrom_volctrl volume;
case HDIO_GET_IDENTITY:
memset(&ubd_id, 0, ATA_ID_WORDS * 2);
ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512);
ubd_id[ATA_ID_HEADS] = 128;
ubd_id[ATA_ID_SECTORS] = 32;
if(copy_to_user((char __user *) arg, (char *) &ubd_id,
sizeof(ubd_id)))
return -EFAULT;
return 0;
case CDROMVOLREAD:
if(copy_from_user(&volume, (char __user *) arg, sizeof(volume)))
return -EFAULT;
volume.channel0 = 255;
volume.channel1 = 255;
volume.channel2 = 255;
volume.channel3 = 255;
if(copy_to_user((char __user *) arg, &volume, sizeof(volume)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static int map_error(int error_code)
{
switch (error_code) {
case 0:
return BLK_STS_OK;
case ENOSYS:
case EOPNOTSUPP:
return BLK_STS_NOTSUPP;
case ENOSPC:
return BLK_STS_NOSPC;
}
return BLK_STS_IOERR;
}
/*
* Everything from here onwards *IS NOT PART OF THE KERNEL*
*
* The following functions are part of UML hypervisor code.
* All functions from here onwards are executed as a helper
* thread and are not allowed to execute any kernel functions.
*
* Any communication must occur strictly via shared memory and IPC.
*
* Do not add printks, locks, kernel memory operations, etc - it
* will result in unpredictable behaviour and/or crashes.
*/
static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
{
int n;
if (segment->cow_offset == -1)
return map_error(0);
n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
sizeof(segment->bitmap_words), segment->cow_offset);
if (n != sizeof(segment->bitmap_words))
return map_error(-n);
return map_error(0);
}
static void do_io(struct io_thread_req *req, struct io_desc *desc)
{
char *buf = NULL;
unsigned long len;
int n, nsectors, start, end, bit;
__u64 off;
/* FLUSH is really a special case, we cannot "case" it with others */
if (req_op(req->req) == REQ_OP_FLUSH) {
/* fds[0] is always either the rw image or our cow file */
req->error = map_error(-os_sync_file(req->fds[0]));
return;
}
nsectors = desc->length / req->sectorsize;
start = 0;
do {
bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
end = start;
while((end < nsectors) &&
(ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit))
end++;
off = req->offset + req->offsets[bit] +
start * req->sectorsize;
len = (end - start) * req->sectorsize;
if (desc->buffer != NULL)
buf = &desc->buffer[start * req->sectorsize];
switch (req_op(req->req)) {
case REQ_OP_READ:
n = 0;
do {
buf = &buf[n];
len -= n;
n = os_pread_file(req->fds[bit], buf, len, off);
if (n < 0) {
req->error = map_error(-n);
return;
}
} while((n < len) && (n != 0));
if (n < len) memset(&buf[n], 0, len - n);
break;
case REQ_OP_WRITE:
n = os_pwrite_file(req->fds[bit], buf, len, off);
if(n != len){
req->error = map_error(-n);
return;
}
break;
case REQ_OP_DISCARD:
n = os_falloc_punch(req->fds[bit], off, len);
if (n) {
req->error = map_error(-n);
return;
}
break;
case REQ_OP_WRITE_ZEROES:
n = os_falloc_zeroes(req->fds[bit], off, len);
if (n) {
req->error = map_error(-n);
return;
}
break;
default:
WARN_ON_ONCE(1);
req->error = BLK_STS_NOTSUPP;
return;
}
start = end;
} while(start < nsectors);
req->offset += len;
req->error = update_bitmap(req, desc);
}
/* Changed in start_io_thread, which is serialized by being called only
* from ubd_init, which is an initcall.
*/
int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
static int io_count;
int io_thread(void *arg)
{
int n, count, written, res;
os_fix_helper_signals();
while(1){
n = bulk_req_safe_read(
kernel_fd,
io_req_buffer,
&io_remainder,
&io_remainder_size,
UBD_REQ_BUFFER_SIZE
);
if (n <= 0) {
if (n == -EAGAIN)
ubd_read_poll(-1);
continue;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *req = (*io_req_buffer)[count];
int i;
io_count++;
for (i = 0; !req->error && i < req->desc_cnt; i++)
do_io(req, &(req->io_desc[i]));
}
written = 0;
do {
res = os_write_file(kernel_fd,
((char *) io_req_buffer) + written,
n - written);
if (res >= 0) {
written += res;
}
if (written < n) {
ubd_write_poll(-1);
}
} while (written < n);
}
return 0;
}
| linux-master | arch/um/drivers/ubd_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <termios.h>
#include <unistd.h>
#include <netinet/in.h>
#include "chan_user.h"
#include <os.h>
#include "port.h"
#include <um_malloc.h>
struct port_chan {
int raw;
struct termios tt;
void *kernel_data;
char dev[sizeof("32768\0")];
};
static void *port_init(char *str, int device, const struct chan_opts *opts)
{
struct port_chan *data;
void *kern_data;
char *end;
int port;
if (*str != ':') {
printk(UM_KERN_ERR "port_init : channel type 'port' must "
"specify a port number\n");
return NULL;
}
str++;
port = strtoul(str, &end, 0);
if ((*end != '\0') || (end == str)) {
printk(UM_KERN_ERR "port_init : couldn't parse port '%s'\n",
str);
return NULL;
}
kern_data = port_data(port);
if (kern_data == NULL)
return NULL;
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
goto err;
*data = ((struct port_chan) { .raw = opts->raw,
.kernel_data = kern_data });
sprintf(data->dev, "%d", port);
return data;
err:
port_kern_free(kern_data);
return NULL;
}
static void port_free(void *d)
{
struct port_chan *data = d;
port_kern_free(data->kernel_data);
kfree(data);
}
static int port_open(int input, int output, int primary, void *d,
char **dev_out)
{
struct port_chan *data = d;
int fd, err;
fd = port_wait(data->kernel_data);
if ((fd >= 0) && data->raw) {
CATCH_EINTR(err = tcgetattr(fd, &data->tt));
if (err)
return err;
err = raw(fd);
if (err)
return err;
}
*dev_out = data->dev;
return fd;
}
static void port_close(int fd, void *d)
{
struct port_chan *data = d;
port_remove_dev(data->kernel_data);
os_close_file(fd);
}
const struct chan_ops port_ops = {
.type = "port",
.init = port_init,
.open = port_open,
.close = port_close,
.read = generic_read,
.write = generic_write,
.console_write = generic_console_write,
.window_size = generic_window_size,
.free = port_free,
.winch = 1,
};
int port_listen_fd(int port)
{
struct sockaddr_in addr;
int fd, err, arg;
fd = socket(PF_INET, SOCK_STREAM, 0);
if (fd == -1)
return -errno;
arg = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &arg, sizeof(arg)) < 0) {
err = -errno;
goto out;
}
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
err = -errno;
goto out;
}
if (listen(fd, 1) < 0) {
err = -errno;
goto out;
}
err = os_set_fd_block(fd, 0);
if (err < 0)
goto out;
return fd;
out:
close(fd);
return err;
}
struct port_pre_exec_data {
int sock_fd;
int pipe_fd;
};
static void port_pre_exec(void *arg)
{
struct port_pre_exec_data *data = arg;
dup2(data->sock_fd, 0);
dup2(data->sock_fd, 1);
dup2(data->sock_fd, 2);
close(data->sock_fd);
dup2(data->pipe_fd, 3);
shutdown(3, SHUT_RD);
close(data->pipe_fd);
}
int port_connection(int fd, int *socket, int *pid_out)
{
int new, err;
char *env;
char *argv[] = { "in.telnetd", "-L",
OS_LIB_PATH "/uml/port-helper", NULL };
struct port_pre_exec_data data;
if ((env = getenv("UML_PORT_HELPER")))
argv[2] = env;
new = accept(fd, NULL, 0);
if (new < 0)
return -errno;
err = os_access(argv[2], X_OK);
if (err < 0) {
printk(UM_KERN_ERR "port_connection : error accessing port-helper "
"executable at %s: %s\n", argv[2], strerror(-err));
if (env == NULL)
printk(UM_KERN_ERR "Set UML_PORT_HELPER environment "
"variable to path to uml-utilities port-helper "
"binary\n");
goto out_close;
}
err = os_pipe(socket, 0, 0);
if (err < 0)
goto out_close;
data = ((struct port_pre_exec_data)
{ .sock_fd = new,
.pipe_fd = socket[1] });
err = run_helper(port_pre_exec, &data, argv);
if (err < 0)
goto out_shutdown;
*pid_out = err;
return new;
out_shutdown:
shutdown(socket[0], SHUT_RDWR);
close(socket[0]);
shutdown(socket[1], SHUT_RDWR);
close(socket[1]);
out_close:
close(new);
return err;
}
| linux-master | arch/um/drivers/port_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ptrace.h>
#include <sys/wait.h>
#include <asm/unistd.h>
#include <init.h>
#include <longjmp.h>
#include <os.h>
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
#define STAT_PATH_LEN sizeof("/proc/#######/stat\0")
#define COMM_SCANF "%*[^)])"
unsigned long os_process_pc(int pid)
{
char proc_stat[STAT_PATH_LEN], buf[256];
unsigned long pc = ARBITRARY_ADDR;
int fd, err;
sprintf(proc_stat, "/proc/%d/stat", pid);
fd = open(proc_stat, O_RDONLY, 0);
if (fd < 0) {
printk(UM_KERN_ERR "os_process_pc - couldn't open '%s', "
"errno = %d\n", proc_stat, errno);
goto out;
}
CATCH_EINTR(err = read(fd, buf, sizeof(buf)));
if (err < 0) {
printk(UM_KERN_ERR "os_process_pc - couldn't read '%s', "
"err = %d\n", proc_stat, errno);
goto out_close;
}
os_close_file(fd);
pc = ARBITRARY_ADDR;
if (sscanf(buf, "%*d " COMM_SCANF " %*c %*d %*d %*d %*d %*d %*d %*d "
"%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d "
"%*d %*d %*d %*d %*d %lu", &pc) != 1)
printk(UM_KERN_ERR "os_process_pc - couldn't find pc in '%s'\n",
buf);
out_close:
close(fd);
out:
return pc;
}
int os_process_parent(int pid)
{
char stat[STAT_PATH_LEN];
char data[256];
int parent = FAILURE_PID, n, fd;
if (pid == -1)
return parent;
snprintf(stat, sizeof(stat), "/proc/%d/stat", pid);
fd = open(stat, O_RDONLY, 0);
if (fd < 0) {
printk(UM_KERN_ERR "Couldn't open '%s', errno = %d\n", stat,
errno);
return parent;
}
CATCH_EINTR(n = read(fd, data, sizeof(data)));
close(fd);
if (n < 0) {
printk(UM_KERN_ERR "Couldn't read '%s', errno = %d\n", stat,
errno);
return parent;
}
parent = FAILURE_PID;
n = sscanf(data, "%*d " COMM_SCANF " %*c %d", &parent);
if (n != 1)
printk(UM_KERN_ERR "Failed to scan '%s'\n", data);
return parent;
}
void os_alarm_process(int pid)
{
kill(pid, SIGALRM);
}
void os_stop_process(int pid)
{
kill(pid, SIGSTOP);
}
void os_kill_process(int pid, int reap_child)
{
kill(pid, SIGKILL);
if (reap_child)
CATCH_EINTR(waitpid(pid, NULL, __WALL));
}
/* Kill off a ptraced child by all means available. kill it normally first,
* then PTRACE_KILL it, then PTRACE_CONT it in case it's in a run state from
* which it can't exit directly.
*/
void os_kill_ptraced_process(int pid, int reap_child)
{
kill(pid, SIGKILL);
ptrace(PTRACE_KILL, pid);
ptrace(PTRACE_CONT, pid);
if (reap_child)
CATCH_EINTR(waitpid(pid, NULL, __WALL));
}
/* Don't use the glibc version, which caches the result in TLS. It misses some
* syscalls, and also breaks with clone(), which does not unshare the TLS.
*/
int os_getpid(void)
{
return syscall(__NR_getpid);
}
int os_getpgrp(void)
{
return getpgrp();
}
int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len,
int r, int w, int x)
{
void *loc;
int prot;
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
(x ? PROT_EXEC : 0);
loc = mmap64((void *) virt, len, prot, MAP_SHARED | MAP_FIXED,
fd, off);
if (loc == MAP_FAILED)
return -errno;
return 0;
}
int os_protect_memory(void *addr, unsigned long len, int r, int w, int x)
{
int prot = ((r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
(x ? PROT_EXEC : 0));
if (mprotect(addr, len, prot) < 0)
return -errno;
return 0;
}
int os_unmap_memory(void *addr, int len)
{
int err;
err = munmap(addr, len);
if (err < 0)
return -errno;
return 0;
}
#ifndef MADV_REMOVE
#define MADV_REMOVE KERNEL_MADV_REMOVE
#endif
int os_drop_memory(void *addr, int length)
{
int err;
err = madvise(addr, length, MADV_REMOVE);
if (err < 0)
err = -errno;
return err;
}
int __init can_drop_memory(void)
{
void *addr;
int fd, ok = 0;
printk(UM_KERN_INFO "Checking host MADV_REMOVE support...");
fd = create_mem_file(UM_KERN_PAGE_SIZE);
if (fd < 0) {
printk(UM_KERN_ERR "Creating test memory file failed, "
"err = %d\n", -fd);
goto out;
}
addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
printk(UM_KERN_ERR "Mapping test memory file failed, "
"err = %d\n", -errno);
goto out_close;
}
if (madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0) {
printk(UM_KERN_ERR "MADV_REMOVE failed, err = %d\n", -errno);
goto out_unmap;
}
printk(UM_KERN_CONT "OK\n");
ok = 1;
out_unmap:
munmap(addr, UM_KERN_PAGE_SIZE);
out_close:
close(fd);
out:
return ok;
}
static int os_page_mincore(void *addr)
{
char vec[2];
int ret;
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
if (ret < 0) {
if (errno == ENOMEM || errno == EINVAL)
return 0;
else
return -errno;
}
return vec[0] & 1;
}
int os_mincore(void *addr, unsigned long len)
{
char *vec;
int ret, i;
if (len <= UM_KERN_PAGE_SIZE)
return os_page_mincore(addr);
vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
if (!vec)
return -ENOMEM;
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
if (ret < 0) {
if (errno == ENOMEM || errno == EINVAL)
ret = 0;
else
ret = -errno;
goto out;
}
for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
if (!(vec[i] & 1)) {
ret = 0;
goto out;
}
}
ret = 1;
out:
free(vec);
return ret;
}
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);
set_handler(SIGTRAP);
set_handler(SIGFPE);
set_handler(SIGILL);
set_handler(SIGBUS);
signal(SIGHUP, SIG_IGN);
set_handler(SIGIO);
signal(SIGWINCH, SIG_IGN);
}
| linux-master | arch/um/os-Linux/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <asm/unistd.h>
#include <init.h>
#include <os.h>
#include <mem_user.h>
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
static void ptrace_child(void)
{
int ret;
/* Calling os_getpid because some libcs cached getpid incorrectly */
int pid = os_getpid(), ppid = getppid();
int sc_result;
if (change_sig(SIGWINCH, 0) < 0 ||
ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
perror("ptrace");
kill(pid, SIGKILL);
}
kill(pid, SIGSTOP);
/*
* This syscall will be intercepted by the parent. Don't call more than
* once, please.
*/
sc_result = os_getpid();
if (sc_result == pid)
/* Nothing modified by the parent, we are running normally. */
ret = 1;
else if (sc_result == ppid)
/*
* Expected in check_ptrace and check_sysemu when they succeed
* in modifying the stack frame
*/
ret = 0;
else
/* Serious trouble! This could be caused by a bug in host 2.6
* SKAS3/2.6 patch before release -V6, together with a bug in
* the UML code itself.
*/
ret = 2;
exit(ret);
}
static void fatal_perror(const char *str)
{
perror(str);
exit(1);
}
static void fatal(char *fmt, ...)
{
va_list list;
va_start(list, fmt);
vfprintf(stderr, fmt, list);
va_end(list);
exit(1);
}
static void non_fatal(char *fmt, ...)
{
va_list list;
va_start(list, fmt);
vfprintf(stderr, fmt, list);
va_end(list);
}
static int start_ptraced_child(void)
{
int pid, n, status;
fflush(stdout);
pid = fork();
if (pid == 0)
ptrace_child();
else if (pid < 0)
fatal_perror("start_ptraced_child : fork failed");
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
if (n < 0)
fatal_perror("check_ptrace : waitpid failed");
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
fatal("check_ptrace : expected SIGSTOP, got status = %d",
status);
return pid;
}
/* When testing for SYSEMU support, if it is one of the broken versions, we
* must just avoid using sysemu, not panic, but only if SYSEMU features are
* broken.
* So only for SYSEMU features we test mustpanic, while normal host features
* must work anyway!
*/
static int stop_ptraced_child(int pid, int exitcode, int mustexit)
{
int status, n, ret = 0;
if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) {
perror("stop_ptraced_child : ptrace failed");
return -1;
}
CATCH_EINTR(n = waitpid(pid, &status, 0));
if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
int exit_with = WEXITSTATUS(status);
if (exit_with == 2)
non_fatal("check_ptrace : child exited with status 2. "
"\nDisabling SYSEMU support.\n");
non_fatal("check_ptrace : child exited with exitcode %d, while "
"expecting %d; status 0x%x\n", exit_with,
exitcode, status);
if (mustexit)
exit(1);
ret = -1;
}
return ret;
}
/* Changed only during early boot */
static int force_sysemu_disabled = 0;
static int __init nosysemu_cmd_param(char *str, int* add)
{
force_sysemu_disabled = 1;
return 0;
}
__uml_setup("nosysemu", nosysemu_cmd_param,
"nosysemu\n"
" Turns off syscall emulation patch for ptrace (SYSEMU).\n"
" SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n"
" behaviour of ptrace() and helps reduce host context switch rates.\n"
" To make it work, you need a kernel patch for your host, too.\n"
" See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n"
" information.\n\n");
static void __init check_sysemu(void)
{
unsigned long regs[MAX_REG_NR];
int pid, n, status, count=0;
os_info("Checking syscall emulation patch for ptrace...");
sysemu_supported = 0;
pid = start_ptraced_child();
if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
goto fail;
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
if (n < 0)
fatal_perror("check_sysemu : wait failed");
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP))
fatal("check_sysemu : expected SIGTRAP, got status = %d\n",
status);
if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
fatal_perror("check_sysemu : PTRACE_GETREGS failed");
if (PT_SYSCALL_NR(regs) != __NR_getpid) {
non_fatal("check_sysemu got system call number %d, "
"expected %d...", PT_SYSCALL_NR(regs), __NR_getpid);
goto fail;
}
n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid());
if (n < 0) {
non_fatal("check_sysemu : failed to modify system call "
"return");
goto fail;
}
if (stop_ptraced_child(pid, 0, 0) < 0)
goto fail_stopped;
sysemu_supported = 1;
os_info("OK\n");
set_using_sysemu(!force_sysemu_disabled);
os_info("Checking advanced syscall emulation patch for ptrace...");
pid = start_ptraced_child();
if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
(void *) PTRACE_O_TRACESYSGOOD) < 0))
fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed");
while (1) {
count++;
if (ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
goto fail;
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
if (n < 0)
fatal_perror("check_sysemu: wait failed");
if (WIFSTOPPED(status) &&
(WSTOPSIG(status) == (SIGTRAP|0x80))) {
if (!count) {
non_fatal("check_sysemu: SYSEMU_SINGLESTEP "
"doesn't singlestep");
goto fail;
}
n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET,
os_getpid());
if (n < 0)
fatal_perror("check_sysemu : failed to modify "
"system call return");
break;
}
else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP))
count++;
else {
non_fatal("check_sysemu: expected SIGTRAP or "
"(SIGTRAP | 0x80), got status = %d\n",
status);
goto fail;
}
}
if (stop_ptraced_child(pid, 0, 0) < 0)
goto fail_stopped;
sysemu_supported = 2;
os_info("OK\n");
if (!force_sysemu_disabled)
set_using_sysemu(sysemu_supported);
return;
fail:
stop_ptraced_child(pid, 1, 0);
fail_stopped:
non_fatal("missing\n");
}
static void __init check_ptrace(void)
{
int pid, syscall, n, status;
os_info("Checking that ptrace can change system call numbers...");
pid = start_ptraced_child();
if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
(void *) PTRACE_O_TRACESYSGOOD) < 0))
fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed");
while (1) {
if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0)
fatal_perror("check_ptrace : ptrace failed");
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
if (n < 0)
fatal_perror("check_ptrace : wait failed");
if (!WIFSTOPPED(status) ||
(WSTOPSIG(status) != (SIGTRAP | 0x80)))
fatal("check_ptrace : expected (SIGTRAP|0x80), "
"got status = %d", status);
syscall = ptrace(PTRACE_PEEKUSER, pid, PT_SYSCALL_NR_OFFSET,
0);
if (syscall == __NR_getpid) {
n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
__NR_getppid);
if (n < 0)
fatal_perror("check_ptrace : failed to modify "
"system call");
break;
}
}
stop_ptraced_child(pid, 0, 1);
os_info("OK\n");
check_sysemu();
}
extern void check_tmpexec(void);
static void __init check_coredump_limit(void)
{
struct rlimit lim;
int err = getrlimit(RLIMIT_CORE, &lim);
if (err) {
perror("Getting core dump limit");
return;
}
os_info("Core dump limits :\n\tsoft - ");
if (lim.rlim_cur == RLIM_INFINITY)
os_info("NONE\n");
else
os_info("%llu\n", (unsigned long long)lim.rlim_cur);
os_info("\thard - ");
if (lim.rlim_max == RLIM_INFINITY)
os_info("NONE\n");
else
os_info("%llu\n", (unsigned long long)lim.rlim_max);
}
void __init get_host_cpu_features(
void (*flags_helper_func)(char *line),
void (*cache_helper_func)(char *line))
{
FILE *cpuinfo;
char *line = NULL;
size_t len = 0;
int done_parsing = 0;
cpuinfo = fopen("/proc/cpuinfo", "r");
if (cpuinfo == NULL) {
os_info("Failed to get host CPU features\n");
} else {
while ((getline(&line, &len, cpuinfo)) != -1) {
if (strstr(line, "flags")) {
flags_helper_func(line);
done_parsing++;
}
if (strstr(line, "cache_alignment")) {
cache_helper_func(line);
done_parsing++;
}
free(line);
line = NULL;
if (done_parsing > 1)
break;
}
fclose(cpuinfo);
}
}
void __init os_early_checks(void)
{
int pid;
/* Print out the core dump limits early */
check_coredump_limit();
check_ptrace();
/* Need to check this early because mmapping happens before the
* kernel is running.
*/
check_tmpexec();
pid = start_ptraced_child();
if (init_pid_registers(pid))
fatal("Failed to initialize default registers");
stop_ptraced_child(pid, 1, 1);
}
int __init parse_iomem(char *str, int *add)
{
struct iomem_region *new;
struct stat64 buf;
char *file, *driver;
int fd, size;
driver = str;
file = strchr(str,',');
if (file == NULL) {
os_warn("parse_iomem : failed to parse iomem\n");
goto out;
}
*file = '\0';
file++;
fd = open(file, O_RDWR, 0);
if (fd < 0) {
perror("parse_iomem - Couldn't open io file");
goto out;
}
if (fstat64(fd, &buf) < 0) {
perror("parse_iomem - cannot stat_fd file");
goto out_close;
}
new = malloc(sizeof(*new));
if (new == NULL) {
perror("Couldn't allocate iomem_region struct");
goto out_close;
}
size = (buf.st_size + UM_KERN_PAGE_SIZE) & ~(UM_KERN_PAGE_SIZE - 1);
*new = ((struct iomem_region) { .next = iomem_regions,
.driver = driver,
.fd = fd,
.size = size,
.phys = 0,
.virt = 0 });
iomem_regions = new;
iomem_size += new->size + UM_KERN_PAGE_SIZE;
return 0;
out_close:
close(fd);
out:
return 1;
}
| linux-master | arch/um/os-Linux/start_up.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <termios.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/random.h>
#include <init.h>
#include <os.h>
void stack_protections(unsigned long address)
{
if (mprotect((void *) address, UM_THREAD_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
panic("protecting stack failed, errno = %d", errno);
}
int raw(int fd)
{
struct termios tt;
int err;
CATCH_EINTR(err = tcgetattr(fd, &tt));
if (err < 0)
return -errno;
cfmakeraw(&tt);
CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt));
if (err < 0)
return -errno;
/*
* XXX tcsetattr could have applied only some changes
* (and cfmakeraw() is a set of changes)
*/
return 0;
}
void setup_machinename(char *machine_out)
{
struct utsname host;
uname(&host);
#ifdef UML_CONFIG_UML_X86
# ifndef UML_CONFIG_64BIT
if (!strcmp(host.machine, "x86_64")) {
strcpy(machine_out, "i686");
return;
}
# else
if (!strcmp(host.machine, "i686")) {
strcpy(machine_out, "x86_64");
return;
}
# endif
#endif
strcpy(machine_out, host.machine);
}
void setup_hostinfo(char *buf, int len)
{
struct utsname host;
uname(&host);
snprintf(buf, len, "%s %s %s %s %s", host.sysname, host.nodename,
host.release, host.version, host.machine);
}
/*
* We cannot use glibc's abort(). It makes use of tgkill() which
* has no effect within UML's kernel threads.
* After that glibc would execute an invalid instruction to kill
* the calling process and UML crashes with SIGSEGV.
*/
static inline void __attribute__ ((noreturn)) uml_abort(void)
{
sigset_t sig;
fflush(NULL);
if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
sigprocmask(SIG_UNBLOCK, &sig, 0);
for (;;)
if (kill(getpid(), SIGABRT) < 0)
exit(127);
}
ssize_t os_getrandom(void *buf, size_t len, unsigned int flags)
{
return getrandom(buf, len, flags);
}
/*
* UML helper threads must not handle SIGWINCH/INT/TERM
*/
void os_fix_helper_signals(void)
{
signal(SIGWINCH, SIG_IGN);
signal(SIGINT, SIG_DFL);
signal(SIGTERM, SIG_DFL);
}
void os_dump_core(void)
{
int pid;
signal(SIGSEGV, SIG_DFL);
/*
* We are about to SIGTERM this entire process group to ensure that
* nothing is around to run after the kernel exits. The
* kernel wants to abort, not die through SIGTERM, so we
* ignore it here.
*/
signal(SIGTERM, SIG_IGN);
kill(0, SIGTERM);
/*
* Most of the other processes associated with this UML are
* likely sTopped, so give them a SIGCONT so they see the
* SIGTERM.
*/
kill(0, SIGCONT);
/*
* Now, having sent signals to everyone but us, make sure they
* die by ptrace. Processes can survive what's been done to
* them so far - the mechanism I understand is receiving a
* SIGSEGV and segfaulting immediately upon return. There is
* always a SIGSEGV pending, and (I'm guessing) signals are
* processed in numeric order so the SIGTERM (signal 15 vs
* SIGSEGV being signal 11) is never handled.
*
* Run a waitpid loop until we get some kind of error.
* Hopefully, it's ECHILD, but there's not a lot we can do if
* it's something else. Tell os_kill_ptraced_process not to
* wait for the child to report its death because there's
* nothing reasonable to do if that fails.
*/
while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
os_kill_ptraced_process(pid, 0);
uml_abort();
}
void um_early_printk(const char *s, unsigned int n)
{
printf("%.*s", n, s);
}
static int quiet_info;
static int __init quiet_cmd_param(char *str, int *add)
{
quiet_info = 1;
return 0;
}
__uml_setup("quiet", quiet_cmd_param,
"quiet\n"
" Turns off information messages during boot.\n\n");
void os_info(const char *fmt, ...)
{
va_list list;
if (quiet_info)
return;
va_start(list, fmt);
vfprintf(stderr, fmt, list);
va_end(list);
}
void os_warn(const char *fmt, ...)
{
va_list list;
va_start(list, fmt);
vfprintf(stderr, fmt, list);
va_end(list);
}
| linux-master | arch/um/os-Linux/util.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 - Cambridge Greys Ltd
* Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdlib.h>
#include <errno.h>
#include <sys/epoll.h>
#include <signal.h>
#include <string.h>
#include <irq_user.h>
#include <os.h>
#include <um_malloc.h>
/* Epoll support */
static int epollfd = -1;
#define MAX_EPOLL_EVENTS 64
static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
/* Helper to return an Epoll data pointer from an epoll event structure.
* We need to keep this one on the userspace side to keep includes separate
*/
void *os_epoll_get_data_pointer(int index)
{
return epoll_events[index].data.ptr;
}
/* Helper to compare events versus the events in the epoll structure.
* Same as above - needs to be on the userspace side
*/
int os_epoll_triggered(int index, int events)
{
return epoll_events[index].events & events;
}
/* Helper to set the event mask.
* The event mask is opaque to the kernel side, because it does not have
* access to the right includes/defines for EPOLL constants.
*/
int os_event_mask(enum um_irq_type irq_type)
{
if (irq_type == IRQ_READ)
return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
if (irq_type == IRQ_WRITE)
return EPOLLOUT;
return 0;
}
/*
* Initial Epoll Setup
*/
int os_setup_epoll(void)
{
epollfd = epoll_create(MAX_EPOLL_EVENTS);
return epollfd;
}
/*
* Helper to run the actual epoll_wait
*/
int os_waiting_for_events_epoll(void)
{
int n, err;
n = epoll_wait(epollfd,
(struct epoll_event *) &epoll_events, MAX_EPOLL_EVENTS, 0);
if (n < 0) {
err = -errno;
if (errno != EINTR)
printk(
UM_KERN_ERR "os_waiting_for_events:"
" epoll returned %d, error = %s\n", n,
strerror(errno)
);
return err;
}
return n;
}
/*
* Helper to add a fd to epoll
*/
int os_add_epoll_fd(int events, int fd, void *data)
{
struct epoll_event event;
int result;
event.data.ptr = data;
event.events = events | EPOLLET;
result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
if ((result) && (errno == EEXIST))
result = os_mod_epoll_fd(events, fd, data);
if (result)
printk("epollctl add err fd %d, %s\n", fd, strerror(errno));
return result;
}
/*
* Helper to mod the fd event mask and/or data backreference
*/
int os_mod_epoll_fd(int events, int fd, void *data)
{
struct epoll_event event;
int result;
event.data.ptr = data;
event.events = events;
result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event);
if (result)
printk(UM_KERN_ERR
"epollctl mod err fd %d, %s\n", fd, strerror(errno));
return result;
}
/*
* Helper to delete the epoll fd
*/
int os_del_epoll_fd(int fd)
{
struct epoll_event event;
/* This is quiet as we use this as IO ON/OFF - so it is often
* invoked on a non-existent fd
*/
return epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event);
}
void os_set_ioignore(void)
{
signal(SIGIO, SIG_IGN);
}
void os_close_epoll_fd(void)
{
/* Needed so we do not leak an fd when rebooting */
os_close_file(epollfd);
}
| linux-master | arch/um/os-Linux/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <linux/magic.h>
#include <init.h>
#include <os.h>
/*
* kasan_map_memory - maps memory from @start with a size of @len.
* The allocated memory is filled with zeroes upon success.
* @start: the start address of the memory to be mapped
* @len: the length of the memory to be mapped
*
* This function is used to map shadow memory for KASAN in uml
*/
void kasan_map_memory(void *start, size_t len)
{
if (mmap(start,
len,
PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE,
-1,
0) == MAP_FAILED) {
os_info("Couldn't allocate shadow memory: %s\n.",
strerror(errno));
exit(1);
}
}
/* Set by make_tempfile() during early boot. */
static char *tempdir = NULL;
/* Check if dir is on tmpfs. Return 0 if yes, -1 if no or error. */
static int __init check_tmpfs(const char *dir)
{
struct statfs st;
os_info("Checking if %s is on tmpfs...", dir);
if (statfs(dir, &st) < 0) {
os_info("%s\n", strerror(errno));
} else if (st.f_type != TMPFS_MAGIC) {
os_info("no\n");
} else {
os_info("OK\n");
return 0;
}
return -1;
}
/*
* Choose the tempdir to use. We want something on tmpfs so that our memory is
* not subject to the host's vm.dirty_ratio. If a tempdir is specified in the
* environment, we use that even if it's not on tmpfs, but we warn the user.
* Otherwise, we try common tmpfs locations, and if no tmpfs directory is found
* then we fall back to /tmp.
*/
static char * __init choose_tempdir(void)
{
static const char * const vars[] = {
"TMPDIR",
"TMP",
"TEMP",
NULL
};
static const char fallback_dir[] = "/tmp";
static const char * const tmpfs_dirs[] = {
"/dev/shm",
fallback_dir,
NULL
};
int i;
const char *dir;
os_info("Checking environment variables for a tempdir...");
for (i = 0; vars[i]; i++) {
dir = getenv(vars[i]);
if ((dir != NULL) && (*dir != '\0')) {
os_info("%s\n", dir);
if (check_tmpfs(dir) >= 0)
goto done;
else
goto warn;
}
}
os_info("none found\n");
for (i = 0; tmpfs_dirs[i]; i++) {
dir = tmpfs_dirs[i];
if (check_tmpfs(dir) >= 0)
goto done;
}
dir = fallback_dir;
warn:
os_warn("Warning: tempdir %s is not on tmpfs\n", dir);
done:
/* Make a copy since getenv results may not remain valid forever. */
return strdup(dir);
}
/*
* Create an unlinked tempfile in a suitable tempdir. template must be the
* basename part of the template with a leading '/'.
*/
static int __init make_tempfile(const char *template)
{
char *tempname;
int fd;
if (tempdir == NULL) {
tempdir = choose_tempdir();
if (tempdir == NULL) {
os_warn("Failed to choose tempdir: %s\n",
strerror(errno));
return -1;
}
}
#ifdef O_TMPFILE
fd = open(tempdir, O_CLOEXEC | O_RDWR | O_EXCL | O_TMPFILE, 0700);
/*
* If the running system does not support O_TMPFILE flag then retry
* without it.
*/
if (fd != -1 || (errno != EINVAL && errno != EISDIR &&
errno != EOPNOTSUPP))
return fd;
#endif
tempname = malloc(strlen(tempdir) + strlen(template) + 1);
if (tempname == NULL)
return -1;
strcpy(tempname, tempdir);
strcat(tempname, template);
fd = mkstemp(tempname);
if (fd < 0) {
os_warn("open - cannot create %s: %s\n", tempname,
strerror(errno));
goto out;
}
if (unlink(tempname) < 0) {
perror("unlink");
goto close;
}
free(tempname);
return fd;
close:
close(fd);
out:
free(tempname);
return -1;
}
#define TEMPNAME_TEMPLATE "/vm_file-XXXXXX"
static int __init create_tmp_file(unsigned long long len)
{
int fd, err;
char zero;
fd = make_tempfile(TEMPNAME_TEMPLATE);
if (fd < 0)
exit(1);
/*
* Seek to len - 1 because writing a character there will
* increase the file size by one byte, to the desired length.
*/
if (lseek64(fd, len - 1, SEEK_SET) < 0) {
perror("lseek64");
exit(1);
}
zero = 0;
err = write(fd, &zero, 1);
if (err != 1) {
perror("write");
exit(1);
}
return fd;
}
int __init create_mem_file(unsigned long long len)
{
int err, fd;
fd = create_tmp_file(len);
err = os_set_exec_close(fd);
if (err < 0) {
errno = -err;
perror("exec_close");
}
return fd;
}
void __init check_tmpexec(void)
{
void *addr;
int err, fd = create_tmp_file(UM_KERN_PAGE_SIZE);
addr = mmap(NULL, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
os_info("Checking PROT_EXEC mmap in %s...", tempdir);
if (addr == MAP_FAILED) {
err = errno;
os_warn("%s\n", strerror(err));
close(fd);
if (err == EPERM)
os_warn("%s must be not mounted noexec\n", tempdir);
exit(1);
}
os_info("OK\n");
munmap(addr, UM_KERN_PAGE_SIZE);
close(fd);
}
| linux-master | arch/um/os-Linux/mem.c |
/* Copyright (C) 2006 by Paolo Giarrusso - modified from glibc' execvp.c.
Original copyright notice follows:
Copyright (C) 1991,92,1995-99,2002,2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <unistd.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <limits.h>
#ifndef TEST
#include <um_malloc.h>
#else
#include <stdio.h>
#define um_kmalloc malloc
#endif
#include <os.h>
/* Execute FILE, searching in the `PATH' environment variable if it contains
no slashes, with arguments ARGV and environment from `environ'. */
int execvp_noalloc(char *buf, const char *file, char *const argv[])
{
if (*file == '\0') {
return -ENOENT;
}
if (strchr (file, '/') != NULL) {
/* Don't search when it contains a slash. */
execv(file, argv);
} else {
int got_eacces;
size_t len, pathlen;
char *name, *p;
char *path = getenv("PATH");
if (path == NULL)
path = ":/bin:/usr/bin";
len = strlen(file) + 1;
pathlen = strlen(path);
/* Copy the file name at the top. */
name = memcpy(buf + pathlen + 1, file, len);
/* And add the slash. */
*--name = '/';
got_eacces = 0;
p = path;
do {
char *startp;
path = p;
//Let's avoid this GNU extension.
//p = strchrnul (path, ':');
p = strchr(path, ':');
if (!p)
p = strchr(path, '\0');
if (p == path)
/* Two adjacent colons, or a colon at the beginning or the end
of `PATH' means to search the current directory. */
startp = name + 1;
else
startp = memcpy(name - (p - path), path, p - path);
/* Try to execute this name. If it works, execv will not return. */
execv(startp, argv);
/*
if (errno == ENOEXEC) {
}
*/
switch (errno) {
case EACCES:
/* Record the we got a `Permission denied' error. If we end
up finding no executable we can use, we want to diagnose
that we did find one but were denied access. */
got_eacces = 1;
break;
case ENOENT:
case ESTALE:
case ENOTDIR:
/* Those errors indicate the file is missing or not executable
by us, in which case we want to just try the next path
directory. */
case ENODEV:
case ETIMEDOUT:
/* Some strange filesystems like AFS return even
stranger error numbers. They cannot reasonably mean
anything else so ignore those, too. */
case ENOEXEC:
/* We won't go searching for the shell
* if it is not executable - the Linux
* kernel already handles this enough,
* for us. */
break;
default:
/* Some other error means we found an executable file, but
something went wrong executing it; return the error to our
caller. */
return -errno;
}
} while (*p++ != '\0');
/* We tried every element and none of them worked. */
if (got_eacces)
/* At least one failure was due to permissions, so report that
error. */
return -EACCES;
}
/* Return the error from the last attempt (probably ENOENT). */
return -errno;
}
#ifdef TEST
int main(int argc, char**argv)
{
char buf[PATH_MAX];
int ret;
argc--;
if (!argc) {
os_warn("Not enough arguments\n");
return 1;
}
argv++;
if (ret = execvp_noalloc(buf, argv[0], argv)) {
errno = -ret;
perror("execvp_noalloc");
}
return 0;
}
#endif
| linux-master | arch/um/os-Linux/execvp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <init.h>
#include <os.h>
#define UML_DIR "~/.uml/"
#define UMID_LEN 64
/* Changed by set_umid, which is run early in boot */
static char umid[UMID_LEN] = { 0 };
/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
static char *uml_dir = UML_DIR;
static int __init make_uml_dir(void)
{
char dir[512] = { '\0' };
int len, err;
if (*uml_dir == '~') {
char *home = getenv("HOME");
err = -ENOENT;
if (home == NULL) {
printk(UM_KERN_ERR
"%s: no value in environment for $HOME\n",
__func__);
goto err;
}
strscpy(dir, home, sizeof(dir));
uml_dir++;
}
strlcat(dir, uml_dir, sizeof(dir));
len = strlen(dir);
if (len > 0 && dir[len - 1] != '/')
strlcat(dir, "/", sizeof(dir));
err = -ENOMEM;
uml_dir = malloc(strlen(dir) + 1);
if (uml_dir == NULL) {
printk(UM_KERN_ERR "%s : malloc failed, errno = %d\n",
__func__, errno);
goto err;
}
strcpy(uml_dir, dir);
if ((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)) {
printk(UM_KERN_ERR "Failed to mkdir '%s': %s\n",
uml_dir, strerror(errno));
err = -errno;
goto err_free;
}
return 0;
err_free:
free(uml_dir);
err:
uml_dir = NULL;
return err;
}
/*
* Unlinks the files contained in @dir and then removes @dir.
* Doesn't handle directory trees, so it's not like rm -rf, but almost such. We
* ignore ENOENT errors for anything (they happen, strangely enough - possibly
* due to races between multiple dying UML threads).
*/
static int remove_files_and_dir(char *dir)
{
DIR *directory;
struct dirent *ent;
int len;
char file[256];
int ret;
directory = opendir(dir);
if (directory == NULL) {
if (errno != ENOENT)
return -errno;
else
return 0;
}
while ((ent = readdir(directory)) != NULL) {
if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
continue;
len = strlen(dir) + strlen("/") + strlen(ent->d_name) + 1;
if (len > sizeof(file)) {
ret = -E2BIG;
goto out;
}
sprintf(file, "%s/%s", dir, ent->d_name);
if (unlink(file) < 0 && errno != ENOENT) {
ret = -errno;
goto out;
}
}
if (rmdir(dir) < 0 && errno != ENOENT) {
ret = -errno;
goto out;
}
ret = 0;
out:
closedir(directory);
return ret;
}
/*
* This says that there isn't already a user of the specified directory even if
* there are errors during the checking. This is because if these errors
* happen, the directory is unusable by the pre-existing UML, so we might as
* well take it over. This could happen either by
* the existing UML somehow corrupting its umid directory
* something other than UML sticking stuff in the directory
* this boot racing with a shutdown of the other UML
* In any of these cases, the directory isn't useful for anything else.
*
* Boolean return: 1 if in use, 0 otherwise.
*/
static inline int is_umdir_used(char *dir)
{
char pid[sizeof("nnnnnnnnn")], *end, *file;
int fd, p, n, err;
size_t filelen = strlen(dir) + sizeof("/pid") + 1;
file = malloc(filelen);
if (!file)
return -ENOMEM;
snprintf(file, filelen, "%s/pid", dir);
fd = open(file, O_RDONLY);
if (fd < 0) {
fd = -errno;
if (fd != -ENOENT) {
printk(UM_KERN_ERR "is_umdir_used : couldn't open pid "
"file '%s', err = %d\n", file, -fd);
}
goto out;
}
err = 0;
n = read(fd, pid, sizeof(pid));
if (n < 0) {
printk(UM_KERN_ERR "is_umdir_used : couldn't read pid file "
"'%s', err = %d\n", file, errno);
goto out_close;
} else if (n == 0) {
printk(UM_KERN_ERR "is_umdir_used : couldn't read pid file "
"'%s', 0-byte read\n", file);
goto out_close;
}
p = strtoul(pid, &end, 0);
if (end == pid) {
printk(UM_KERN_ERR "is_umdir_used : couldn't parse pid file "
"'%s', errno = %d\n", file, errno);
goto out_close;
}
if ((kill(p, 0) == 0) || (errno != ESRCH)) {
printk(UM_KERN_ERR "umid \"%s\" is already in use by pid %d\n",
umid, p);
return 1;
}
out_close:
close(fd);
out:
free(file);
return 0;
}
/*
* Try to remove the directory @dir unless it's in use.
* Precondition: @dir exists.
* Returns 0 for success, < 0 for failure in removal or if the directory is in
* use.
*/
static int umdir_take_if_dead(char *dir)
{
int ret;
if (is_umdir_used(dir))
return -EEXIST;
ret = remove_files_and_dir(dir);
if (ret) {
printk(UM_KERN_ERR "is_umdir_used - remove_files_and_dir "
"failed with err = %d\n", ret);
}
return ret;
}
static void __init create_pid_file(void)
{
char pid[sizeof("nnnnnnnnn")], *file;
int fd, n;
n = strlen(uml_dir) + UMID_LEN + sizeof("/pid");
file = malloc(n);
if (!file)
return;
if (umid_file_name("pid", file, n))
goto out;
fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
if (fd < 0) {
printk(UM_KERN_ERR "Open of machine pid file \"%s\" failed: "
"%s\n", file, strerror(errno));
goto out;
}
snprintf(pid, sizeof(pid), "%d\n", getpid());
n = write(fd, pid, strlen(pid));
if (n != strlen(pid))
printk(UM_KERN_ERR "Write of pid file failed - err = %d\n",
errno);
close(fd);
out:
free(file);
}
int __init set_umid(char *name)
{
if (strlen(name) > UMID_LEN - 1)
return -E2BIG;
strscpy(umid, name, sizeof(umid));
return 0;
}
/* Changed in make_umid, which is called during early boot */
static int umid_setup = 0;
static int __init make_umid(void)
{
int fd, err;
char tmp[256];
if (umid_setup)
return 0;
make_uml_dir();
if (*umid == '\0') {
strscpy(tmp, uml_dir, sizeof(tmp));
strlcat(tmp, "XXXXXX", sizeof(tmp));
fd = mkstemp(tmp);
if (fd < 0) {
printk(UM_KERN_ERR "make_umid - mkstemp(%s) failed: "
"%s\n", tmp, strerror(errno));
err = -errno;
goto err;
}
close(fd);
set_umid(&tmp[strlen(uml_dir)]);
/*
* There's a nice tiny little race between this unlink and
* the mkdir below. It'd be nice if there were a mkstemp
* for directories.
*/
if (unlink(tmp)) {
err = -errno;
goto err;
}
}
snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid);
err = mkdir(tmp, 0777);
if (err < 0) {
err = -errno;
if (err != -EEXIST)
goto err;
if (umdir_take_if_dead(tmp) < 0)
goto err;
err = mkdir(tmp, 0777);
}
if (err) {
err = -errno;
printk(UM_KERN_ERR "Failed to create '%s' - err = %d\n", umid,
errno);
goto err;
}
umid_setup = 1;
create_pid_file();
err = 0;
err:
return err;
}
static int __init make_umid_init(void)
{
if (!make_umid())
return 0;
/*
* If initializing with the given umid failed, then try again with
* a random one.
*/
printk(UM_KERN_ERR "Failed to initialize umid \"%s\", trying with a "
"random umid\n", umid);
*umid = '\0';
make_umid();
return 0;
}
__initcall(make_umid_init);
int __init umid_file_name(char *name, char *buf, int len)
{
int n, err;
err = make_umid();
if (err)
return err;
n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name);
if (n >= len) {
printk(UM_KERN_ERR "umid_file_name : buffer too short\n");
return -E2BIG;
}
return 0;
}
char *get_umid(void)
{
return umid;
}
static int __init set_uml_dir(char *name, int *add)
{
if (*name == '\0') {
os_warn("uml_dir can't be an empty string\n");
return 0;
}
if (name[strlen(name) - 1] == '/') {
uml_dir = name;
return 0;
}
uml_dir = malloc(strlen(name) + 2);
if (uml_dir == NULL) {
os_warn("Failed to malloc uml_dir - error = %d\n", errno);
/*
* Return 0 here because do_initcalls doesn't look at
* the return value.
*/
return 0;
}
sprintf(uml_dir, "%s/", name);
return 0;
}
__uml_setup("uml_dir=", set_uml_dir,
"uml_dir=<directory>\n"
" The location to place the pid and umid files.\n\n"
);
static void remove_umid_dir(void)
{
char *dir, err;
dir = malloc(strlen(uml_dir) + UMID_LEN + 1);
if (!dir)
return;
sprintf(dir, "%s%s", uml_dir, umid);
err = remove_files_and_dir(dir);
if (err)
os_warn("%s - remove_files_and_dir failed with err = %d\n",
__func__, err);
free(dir);
}
__uml_exitcall(remove_umid_dir);
| linux-master | arch/um/os-Linux/umid.c |
// SPDX-License-Identifier: GPL-2.0
#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/module.h>
/*
* This file exports some critical string functions and compiler
* built-in functions (where calls are emitted by the compiler
* itself that we cannot avoid even in kernel code) to modules.
*
* "_user.c" code that previously used exports here such as hostfs
* really should be considered part of the 'hypervisor' and define
* its own API boundary like hostfs does now; don't add exports to
* this file for such cases.
*/
/* If it's not defined, the export is included in lib/string.c.*/
#ifdef __HAVE_ARCH_STRSTR
#undef strstr
EXPORT_SYMBOL(strstr);
#endif
#ifndef __x86_64__
#undef memcpy
extern void *memcpy(void *, const void *, size_t);
EXPORT_SYMBOL(memcpy);
extern void *memmove(void *, const void *, size_t);
EXPORT_SYMBOL(memmove);
#undef memset
extern void *memset(void *, int, size_t);
EXPORT_SYMBOL(memset);
#endif
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
/* needed for __access_ok() */
EXPORT_SYMBOL(vsyscall_ehdr);
EXPORT_SYMBOL(vsyscall_end);
#endif
#ifdef _FORTIFY_SOURCE
extern int __sprintf_chk(char *str, int flag, size_t len, const char *format);
EXPORT_SYMBOL(__sprintf_chk);
#endif
| linux-master | arch/um/os-Linux/user_syms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/um/kernel/elf_aux.c
*
* Scan the ELF auxiliary vector provided by the host to extract
* information about vsyscall-page, etc.
*
* Copyright (C) 2004 Fujitsu Siemens Computers GmbH
* Author: Bodo Stroesser ([email protected])
*/
#include <elf.h>
#include <stddef.h>
#include <init.h>
#include <elf_user.h>
#include <mem_user.h>
typedef Elf32_auxv_t elf_auxv_t;
/* These are initialized very early in boot and never changed */
char * elf_aux_platform;
extern long elf_aux_hwcap;
unsigned long vsyscall_ehdr;
unsigned long vsyscall_end;
unsigned long __kernel_vsyscall;
__init void scan_elf_aux( char **envp)
{
long page_size = 0;
elf_auxv_t * auxv;
while ( *envp++ != NULL) ;
for ( auxv = (elf_auxv_t *)envp; auxv->a_type != AT_NULL; auxv++) {
switch ( auxv->a_type ) {
case AT_SYSINFO:
__kernel_vsyscall = auxv->a_un.a_val;
/* See if the page is under TASK_SIZE */
if (__kernel_vsyscall < (unsigned long) envp)
__kernel_vsyscall = 0;
break;
case AT_SYSINFO_EHDR:
vsyscall_ehdr = auxv->a_un.a_val;
/* See if the page is under TASK_SIZE */
if (vsyscall_ehdr < (unsigned long) envp)
vsyscall_ehdr = 0;
break;
case AT_HWCAP:
elf_aux_hwcap = auxv->a_un.a_val;
break;
case AT_PLATFORM:
/* elf.h removed the pointer elements from
* a_un, so we have to use a_val, which is
* all that's left.
*/
elf_aux_platform =
(char *) (long) auxv->a_un.a_val;
break;
case AT_PAGESZ:
page_size = auxv->a_un.a_val;
break;
}
}
if ( ! __kernel_vsyscall || ! vsyscall_ehdr ||
! elf_aux_hwcap || ! elf_aux_platform ||
! page_size || (vsyscall_ehdr % page_size) ) {
__kernel_vsyscall = 0;
vsyscall_ehdr = 0;
elf_aux_hwcap = 0;
elf_aux_platform = "i586";
}
else {
vsyscall_end = vsyscall_ehdr + page_size;
}
}
| linux-master | arch/um/os-Linux/elf_aux.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
#include <linux/limits.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <kern_util.h>
#include <os.h>
#include <um_malloc.h>
struct helper_data {
void (*pre_exec)(void*);
void *pre_data;
char **argv;
int fd;
char *buf;
};
static int helper_child(void *arg)
{
struct helper_data *data = arg;
char **argv = data->argv;
int err, ret;
if (data->pre_exec != NULL)
(*data->pre_exec)(data->pre_data);
err = execvp_noalloc(data->buf, argv[0], argv);
/* If the exec succeeds, we don't get here */
CATCH_EINTR(ret = write(data->fd, &err, sizeof(err)));
return 0;
}
/* Returns either the pid of the child process we run or -E* on failure. */
int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
{
struct helper_data data;
unsigned long stack, sp;
int pid, fds[2], ret, n;
stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fds);
if (ret < 0) {
ret = -errno;
printk(UM_KERN_ERR "run_helper : pipe failed, errno = %d\n",
errno);
goto out_free;
}
ret = os_set_exec_close(fds[1]);
if (ret < 0) {
printk(UM_KERN_ERR "run_helper : setting FD_CLOEXEC failed, "
"ret = %d\n", -ret);
goto out_close;
}
sp = stack + UM_KERN_PAGE_SIZE;
data.pre_exec = pre_exec;
data.pre_data = pre_data;
data.argv = argv;
data.fd = fds[1];
data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
if (pid < 0) {
ret = -errno;
printk(UM_KERN_ERR "run_helper : clone failed, errno = %d\n",
errno);
goto out_free2;
}
close(fds[1]);
fds[1] = -1;
/*
* Read the errno value from the child, if the exec failed, or get 0 if
* the exec succeeded because the pipe fd was set as close-on-exec.
*/
n = read(fds[0], &ret, sizeof(ret));
if (n == 0) {
ret = pid;
} else {
if (n < 0) {
n = -errno;
printk(UM_KERN_ERR "run_helper : read on pipe failed, "
"ret = %d\n", -n);
ret = n;
}
CATCH_EINTR(waitpid(pid, NULL, __WALL));
}
if (ret < 0)
printk(UM_KERN_ERR "run_helper : failed to exec %s on host: %s\n",
argv[0], strerror(-ret));
out_free2:
kfree(data.buf);
out_close:
if (fds[1] != -1)
close(fds[1]);
close(fds[0]);
out_free:
free_stack(stack, 0);
return ret;
}
int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long *stack_out)
{
unsigned long stack, sp;
int pid, status, err;
stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
sp = stack + UM_KERN_PAGE_SIZE;
pid = clone(proc, (void *) sp, flags, arg);
if (pid < 0) {
err = -errno;
printk(UM_KERN_ERR "run_helper_thread : clone failed, "
"errno = %d\n", errno);
return err;
}
if (stack_out == NULL) {
CATCH_EINTR(pid = waitpid(pid, &status, __WALL));
if (pid < 0) {
err = -errno;
printk(UM_KERN_ERR "run_helper_thread - wait failed, "
"errno = %d\n", errno);
pid = err;
}
if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0))
printk(UM_KERN_ERR "run_helper_thread - thread "
"returned status 0x%x\n", status);
free_stack(stack, 0);
} else
*stack_out = stack;
return pid;
}
int helper_wait(int pid)
{
int ret, status;
int wflags = __WALL;
CATCH_EINTR(ret = waitpid(pid, &status, wflags));
if (ret < 0) {
printk(UM_KERN_ERR "helper_wait : waitpid process %d failed, "
"errno = %d\n", pid, errno);
return -errno;
} else if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
printk(UM_KERN_ERR "helper_wait : process %d exited with "
"status 0x%x\n", pid, status);
return -ECHILD;
} else
return 0;
}
| linux-master | arch/um/os-Linux/helper.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <sys/resource.h>
#include <as-layout.h>
#include <init.h>
#include <kern_util.h>
#include <os.h>
#include <um_malloc.h>
#define PGD_BOUND (4 * 1024 * 1024)
#define STACKSIZE (8 * 1024 * 1024)
#define THREAD_NAME_LEN (256)
long elf_aux_hwcap;
static void set_stklim(void)
{
struct rlimit lim;
if (getrlimit(RLIMIT_STACK, &lim) < 0) {
perror("getrlimit");
exit(1);
}
if ((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)) {
lim.rlim_cur = STACKSIZE;
if (setrlimit(RLIMIT_STACK, &lim) < 0) {
perror("setrlimit");
exit(1);
}
}
}
static void last_ditch_exit(int sig)
{
uml_cleanup();
exit(1);
}
static void install_fatal_handler(int sig)
{
struct sigaction action;
/* All signals are enabled in this handler ... */
sigemptyset(&action.sa_mask);
/*
* ... including the signal being handled, plus we want the
* handler reset to the default behavior, so that if an exit
* handler is hanging for some reason, the UML will just die
* after this signal is sent a second time.
*/
action.sa_flags = SA_RESETHAND | SA_NODEFER;
action.sa_restorer = NULL;
action.sa_handler = last_ditch_exit;
if (sigaction(sig, &action, NULL) < 0) {
os_warn("failed to install handler for signal %d "
"- errno = %d\n", sig, errno);
exit(1);
}
}
#define UML_LIB_PATH ":" OS_LIB_PATH "/uml"
static void setup_env_path(void)
{
char *new_path = NULL;
char *old_path = NULL;
int path_len = 0;
old_path = getenv("PATH");
/*
* if no PATH variable is set or it has an empty value
* just use the default + /usr/lib/uml
*/
if (!old_path || (path_len = strlen(old_path)) == 0) {
if (putenv("PATH=:/bin:/usr/bin/" UML_LIB_PATH))
perror("couldn't putenv");
return;
}
/* append /usr/lib/uml to the existing path */
path_len += strlen("PATH=" UML_LIB_PATH) + 1;
new_path = malloc(path_len);
if (!new_path) {
perror("couldn't malloc to set a new PATH");
return;
}
snprintf(new_path, path_len, "PATH=%s" UML_LIB_PATH, old_path);
if (putenv(new_path)) {
perror("couldn't putenv to set a new PATH");
free(new_path);
}
}
extern void scan_elf_aux( char **envp);
int __init main(int argc, char **argv, char **envp)
{
char **new_argv;
int ret, i, err;
set_stklim();
setup_env_path();
setsid();
new_argv = malloc((argc + 1) * sizeof(char *));
if (new_argv == NULL) {
perror("Mallocing argv");
exit(1);
}
for (i = 0; i < argc; i++) {
new_argv[i] = strdup(argv[i]);
if (new_argv[i] == NULL) {
perror("Mallocing an arg");
exit(1);
}
}
new_argv[argc] = NULL;
/*
* Allow these signals to bring down a UML if all other
* methods of control fail.
*/
install_fatal_handler(SIGINT);
install_fatal_handler(SIGTERM);
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
scan_elf_aux(envp);
#endif
change_sig(SIGPIPE, 0);
ret = linux_main(argc, argv);
/*
* Disable SIGPROF - I have no idea why libc doesn't do this or turn
* off the profiling time, but UML dies with a SIGPROF just before
* exiting when profiling is active.
*/
change_sig(SIGPROF, 0);
/*
* This signal stuff used to be in the reboot case. However,
* sometimes a timer signal can come in when we're halting (reproducably
* when writing out gcov information, presumably because that takes
* some time) and cause a segfault.
*/
/* stop timers and set timer signal to be ignored */
os_timer_disable();
/* disable SIGIO for the fds and set SIGIO to be ignored */
err = deactivate_all_fds();
if (err)
os_warn("deactivate_all_fds failed, errno = %d\n", -err);
/*
* Let any pending signals fire now. This ensures
* that they won't be delivered after the exec, when
* they are definitely not expected.
*/
unblock_signals();
os_info("\n");
/* Reboot */
if (ret) {
execvp(new_argv[0], new_argv);
perror("Failed to exec kernel");
ret = 1;
}
return uml_exitcode;
}
extern void *__real_malloc(int);
void *__wrap_malloc(int size)
{
void *ret;
if (!kmalloc_ok)
return __real_malloc(size);
else if (size <= UM_KERN_PAGE_SIZE)
/* finding contiguous pages can be hard*/
ret = uml_kmalloc(size, UM_GFP_KERNEL);
else ret = vmalloc(size);
/*
* glibc people insist that if malloc fails, errno should be
* set by malloc as well. So we do.
*/
if (ret == NULL)
errno = ENOMEM;
return ret;
}
void *__wrap_calloc(int n, int size)
{
void *ptr = __wrap_malloc(n * size);
if (ptr == NULL)
return NULL;
memset(ptr, 0, n * size);
return ptr;
}
extern void __real_free(void *);
extern unsigned long high_physmem;
void __wrap_free(void *ptr)
{
unsigned long addr = (unsigned long) ptr;
/*
* We need to know how the allocation happened, so it can be correctly
* freed. This is done by seeing what region of memory the pointer is
* in -
* physical memory - kmalloc/kfree
* kernel virtual memory - vmalloc/vfree
* anywhere else - malloc/free
* If kmalloc is not yet possible, then either high_physmem and/or
* end_vm are still 0 (as at startup), in which case we call free, or
* we have set them, but anyway addr has not been allocated from those
* areas. So, in both cases __real_free is called.
*
* CAN_KMALLOC is checked because it would be bad to free a buffer
* with kmalloc/vmalloc after they have been turned off during
* shutdown.
* XXX: However, we sometimes shutdown CAN_KMALLOC temporarily, so
* there is a possibility for memory leaks.
*/
if ((addr >= uml_physmem) && (addr < high_physmem)) {
if (kmalloc_ok)
kfree(ptr);
}
else if ((addr >= start_vm) && (addr < end_vm)) {
if (kmalloc_ok)
vfree(ptr);
}
else __real_free(ptr);
}
| linux-master | arch/um/os-Linux/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2012-2014 Cisco Systems
* Copyright (C) 2000 - 2007 Jeff Dike (jdike{addtoit,linux.intel}.com)
*/
#include <stddef.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <time.h>
#include <sys/time.h>
#include <kern_util.h>
#include <os.h>
#include <string.h>
static timer_t event_high_res_timer = 0;
static inline long long timespec_to_ns(const struct timespec *ts)
{
return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
}
long long os_persistent_clock_emulation(void)
{
struct timespec realtime_tp;
clock_gettime(CLOCK_REALTIME, &realtime_tp);
return timespec_to_ns(&realtime_tp);
}
/**
* os_timer_create() - create an new posix (interval) timer
*/
int os_timer_create(void)
{
timer_t *t = &event_high_res_timer;
if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1)
return -1;
return 0;
}
int os_timer_set_interval(unsigned long long nsecs)
{
struct itimerspec its;
its.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC;
its.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC;
its.it_interval.tv_sec = nsecs / UM_NSEC_PER_SEC;
its.it_interval.tv_nsec = nsecs % UM_NSEC_PER_SEC;
if (timer_settime(event_high_res_timer, 0, &its, NULL) == -1)
return -errno;
return 0;
}
int os_timer_one_shot(unsigned long long nsecs)
{
struct itimerspec its = {
.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC,
.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC,
.it_interval.tv_sec = 0,
.it_interval.tv_nsec = 0, // we cheat here
};
timer_settime(event_high_res_timer, 0, &its, NULL);
return 0;
}
/**
* os_timer_disable() - disable the posix (interval) timer
*/
void os_timer_disable(void)
{
struct itimerspec its;
memset(&its, 0, sizeof(struct itimerspec));
timer_settime(event_high_res_timer, 0, &its, NULL);
}
long long os_nsecs(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC,&ts);
return timespec_to_ns(&ts);
}
/**
* os_idle_sleep() - sleep until interrupted
*/
void os_idle_sleep(void)
{
struct itimerspec its;
sigset_t set, old;
/* block SIGALRM while we analyze the timer state */
sigemptyset(&set);
sigaddset(&set, SIGALRM);
sigprocmask(SIG_BLOCK, &set, &old);
/* check the timer, and if it'll fire then wait for it */
timer_gettime(event_high_res_timer, &its);
if (its.it_value.tv_sec || its.it_value.tv_nsec)
sigsuspend(&old);
/* either way, restore the signal mask */
sigprocmask(SIG_UNBLOCK, &set, NULL);
}
| linux-master | arch/um/os-Linux/time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <kern_util.h>
#include <os.h>
struct grantpt_info {
int fd;
int res;
int err;
};
static void grantpt_cb(void *arg)
{
struct grantpt_info *info = arg;
info->res = grantpt(info->fd);
info->err = errno;
}
int get_pty(void)
{
struct grantpt_info info;
int fd, err;
fd = open("/dev/ptmx", O_RDWR);
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "get_pty : Couldn't open /dev/ptmx - "
"err = %d\n", errno);
return err;
}
info.fd = fd;
initial_thread_cb(grantpt_cb, &info);
if (info.res < 0) {
err = -info.err;
printk(UM_KERN_ERR "get_pty : Couldn't grant pty - "
"errno = %d\n", -info.err);
goto out;
}
if (unlockpt(fd) < 0) {
err = -errno;
printk(UM_KERN_ERR "get_pty : Couldn't unlock pty - "
"errno = %d\n", errno);
goto out;
}
return fd;
out:
close(fd);
return err;
}
| linux-master | arch/um/os-Linux/tty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2004 PathScale, Inc
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <strings.h>
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
#include <sysdep/mcontext.h>
#include <um_malloc.h>
#include <sys/ucontext.h>
#include <timetravel.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
[SIGWINCH] = winch,
[SIGBUS] = bus_handler,
[SIGSEGV] = segv_handler,
[SIGIO] = sigio_handler,
};
static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
{
struct uml_pt_regs r;
int save_errno = errno;
r.is_user = 0;
if (sig == SIGSEGV) {
/* For segfaults, we want the data from the sigcontext. */
get_regs_from_mc(&r, mc);
GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
}
/* enable signals if sig isn't IRQ signal */
if ((sig != SIGIO) && (sig != SIGWINCH))
unblock_signals_trace();
(*sig_info[sig])(sig, si, &r);
errno = save_errno;
}
/*
* These are the asynchronous signals. SIGPROF is excluded because we want to
* be able to profile all of UML, not just the non-critical sections. If
* profiling is not thread-safe, then that is not my problem. We can disable
* profiling when SMP is enabled in that case.
*/
#define SIGIO_BIT 0
#define SIGIO_MASK (1 << SIGIO_BIT)
#define SIGALRM_BIT 1
#define SIGALRM_MASK (1 << SIGALRM_BIT)
int signals_enabled;
#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
static int signals_blocked;
#else
#define signals_blocked 0
#endif
static unsigned int signals_pending;
static unsigned int signals_active = 0;
void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
{
int enabled = signals_enabled;
if ((signals_blocked || !enabled) && (sig == SIGIO)) {
/*
* In TT_MODE_EXTERNAL, need to still call time-travel
* handlers unless signals are also blocked for the
* external time message processing. This will mark
* signals_pending by itself (only if necessary.)
*/
if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL)
sigio_run_timetravel_handlers();
else
signals_pending |= SIGIO_MASK;
return;
}
block_signals_trace();
sig_handler_common(sig, si, mc);
um_set_signals_trace(enabled);
}
static void timer_real_alarm_handler(mcontext_t *mc)
{
struct uml_pt_regs regs;
if (mc != NULL)
get_regs_from_mc(®s, mc);
else
memset(®s, 0, sizeof(regs));
timer_handler(SIGALRM, NULL, ®s);
}
void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
{
int enabled;
enabled = signals_enabled;
if (!signals_enabled) {
signals_pending |= SIGALRM_MASK;
return;
}
block_signals_trace();
signals_active |= SIGALRM_MASK;
timer_real_alarm_handler(mc);
signals_active &= ~SIGALRM_MASK;
um_set_signals_trace(enabled);
}
void deliver_alarm(void) {
timer_alarm_handler(SIGALRM, NULL, NULL);
}
void timer_set_signal_handler(void)
{
set_handler(SIGALRM);
}
void set_sigstack(void *sig_stack, int size)
{
stack_t stack = {
.ss_flags = 0,
.ss_sp = sig_stack,
.ss_size = size
};
if (sigaltstack(&stack, NULL) != 0)
panic("enabling signal stack failed, errno = %d\n", errno);
}
static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
{
uml_pm_wake();
}
void register_pm_wake_signal(void)
{
set_handler(SIGUSR1);
}
static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
[SIGSEGV] = sig_handler,
[SIGBUS] = sig_handler,
[SIGILL] = sig_handler,
[SIGFPE] = sig_handler,
[SIGTRAP] = sig_handler,
[SIGIO] = sig_handler,
[SIGWINCH] = sig_handler,
[SIGALRM] = timer_alarm_handler,
[SIGUSR1] = sigusr1_handler,
};
static void hard_handler(int sig, siginfo_t *si, void *p)
{
ucontext_t *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;
do {
int nested, bail;
/*
* pending comes back with one bit set for each
* interrupt that arrived while setting up the stack,
* plus a bit for this interrupt, plus the zero bit is
* set if this is a nested interrupt.
* If bail is true, then we interrupted another
* handler setting up the stack. In this case, we
* have to return, and the upper handler will deal
* with this interrupt.
*/
bail = to_irq_stack(&pending);
if (bail)
return;
nested = pending & 1;
pending &= ~1;
while ((sig = ffs(pending)) != 0){
sig--;
pending &= ~(1 << sig);
(*handlers[sig])(sig, (struct siginfo *)si, mc);
}
/*
* Again, pending comes back with a mask of signals
* that arrived while tearing down the stack. If this
* is non-zero, we just go back, set up the stack
* again, and handle the new interrupts.
*/
if (!nested)
pending = from_irq_stack(nested);
} while (pending);
}
void set_handler(int sig)
{
struct sigaction action;
int flags = SA_SIGINFO | SA_ONSTACK;
sigset_t sig_mask;
action.sa_sigaction = hard_handler;
/* block irq ones */
sigemptyset(&action.sa_mask);
sigaddset(&action.sa_mask, SIGIO);
sigaddset(&action.sa_mask, SIGWINCH);
sigaddset(&action.sa_mask, SIGALRM);
if (sig == SIGSEGV)
flags |= SA_NODEFER;
if (sigismember(&action.sa_mask, sig))
flags |= SA_RESTART; /* if it's an irq signal */
action.sa_flags = flags;
action.sa_restorer = NULL;
if (sigaction(sig, &action, NULL) < 0)
panic("sigaction failed - errno = %d\n", errno);
sigemptyset(&sig_mask);
sigaddset(&sig_mask, sig);
if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
panic("sigprocmask failed - errno = %d\n", errno);
}
void send_sigio_to_self(void)
{
kill(os_getpid(), SIGIO);
}
int change_sig(int signal, int on)
{
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, signal);
if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
return -errno;
return 0;
}
void block_signals(void)
{
signals_enabled = 0;
/*
* This must return with signals disabled, so this barrier
* ensures that writes are flushed out before the return.
* This might matter if gcc figures out how to inline this and
* decides to shuffle this code into the caller.
*/
barrier();
}
void unblock_signals(void)
{
int save_pending;
if (signals_enabled == 1)
return;
signals_enabled = 1;
#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
deliver_time_travel_irqs();
#endif
/*
* We loop because the IRQ handler returns with interrupts off. So,
* interrupts may have arrived and we need to re-enable them and
* recheck signals_pending.
*/
while (1) {
/*
* Save and reset save_pending after enabling signals. This
* way, signals_pending won't be changed while we're reading it.
*
* Setting signals_enabled and reading signals_pending must
* happen in this order, so have the barrier here.
*/
barrier();
save_pending = signals_pending;
if (save_pending == 0)
return;
signals_pending = 0;
/*
* We have pending interrupts, so disable signals, as the
* handlers expect them off when they are called. They will
* be enabled again above. We need to trace this, as we're
* expected to be enabling interrupts already, but any more
* tracing that happens inside the handlers we call for the
* pending signals will mess up the tracing state.
*/
signals_enabled = 0;
um_trace_signals_off();
/*
* Deal with SIGIO first because the alarm handler might
* schedule, leaving the pending SIGIO stranded until we come
* back here.
*
* SIGIO's handler doesn't use siginfo or mcontext,
* so they can be NULL.
*/
if (save_pending & SIGIO_MASK)
sig_handler_common(SIGIO, NULL, NULL);
/* Do not reenter the handler */
if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK)))
timer_real_alarm_handler(NULL);
/* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */
if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK))
return;
/* Re-enable signals and trace that we're doing so. */
um_trace_signals_on();
signals_enabled = 1;
}
}
int um_set_signals(int enable)
{
int ret;
if (signals_enabled == enable)
return enable;
ret = signals_enabled;
if (enable)
unblock_signals();
else block_signals();
return ret;
}
int um_set_signals_trace(int enable)
{
int ret;
if (signals_enabled == enable)
return enable;
ret = signals_enabled;
if (enable)
unblock_signals_trace();
else
block_signals_trace();
return ret;
}
#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
void mark_sigio_pending(void)
{
signals_pending |= SIGIO_MASK;
}
void block_signals_hard(void)
{
if (signals_blocked)
return;
signals_blocked = 1;
barrier();
}
void unblock_signals_hard(void)
{
if (!signals_blocked)
return;
/* Must be set to 0 before we check the pending bits etc. */
signals_blocked = 0;
barrier();
if (signals_pending && signals_enabled) {
/* this is a bit inefficient, but that's not really important */
block_signals();
unblock_signals();
} else if (signals_pending & SIGIO_MASK) {
/* we need to run time-travel handlers even if not enabled */
sigio_run_timetravel_handlers();
}
}
#endif
int os_is_signal_stack(void)
{
stack_t ss;
sigaltstack(NULL, &ss);
return ss.ss_flags & SS_ONSTACK;
}
| linux-master | arch/um/os-Linux/signal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <linux/falloc.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/un.h>
#include <sys/types.h>
#include <sys/eventfd.h>
#include <poll.h>
#include <os.h>
static void copy_stat(struct uml_stat *dst, const struct stat64 *src)
{
*dst = ((struct uml_stat) {
.ust_dev = src->st_dev, /* device */
.ust_ino = src->st_ino, /* inode */
.ust_mode = src->st_mode, /* protection */
.ust_nlink = src->st_nlink, /* number of hard links */
.ust_uid = src->st_uid, /* user ID of owner */
.ust_gid = src->st_gid, /* group ID of owner */
.ust_size = src->st_size, /* total size, in bytes */
.ust_blksize = src->st_blksize, /* blocksize for filesys I/O */
.ust_blocks = src->st_blocks, /* number of blocks allocated */
.ust_atime = src->st_atime, /* time of last access */
.ust_mtime = src->st_mtime, /* time of last modification */
.ust_ctime = src->st_ctime, /* time of last change */
});
}
int os_stat_fd(const int fd, struct uml_stat *ubuf)
{
struct stat64 sbuf;
int err;
CATCH_EINTR(err = fstat64(fd, &sbuf));
if (err < 0)
return -errno;
if (ubuf != NULL)
copy_stat(ubuf, &sbuf);
return err;
}
int os_stat_file(const char *file_name, struct uml_stat *ubuf)
{
struct stat64 sbuf;
int err;
CATCH_EINTR(err = stat64(file_name, &sbuf));
if (err < 0)
return -errno;
if (ubuf != NULL)
copy_stat(ubuf, &sbuf);
return err;
}
int os_access(const char *file, int mode)
{
int amode, err;
amode = (mode & OS_ACC_R_OK ? R_OK : 0) |
(mode & OS_ACC_W_OK ? W_OK : 0) |
(mode & OS_ACC_X_OK ? X_OK : 0) |
(mode & OS_ACC_F_OK ? F_OK : 0);
err = access(file, amode);
if (err < 0)
return -errno;
return 0;
}
/* FIXME? required only by hostaudio (because it passes ioctls verbatim) */
int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg)
{
int err;
err = ioctl(fd, cmd, arg);
if (err < 0)
return -errno;
return err;
}
/* FIXME: ensure namebuf in os_get_if_name is big enough */
int os_get_ifname(int fd, char* namebuf)
{
if (ioctl(fd, SIOCGIFNAME, namebuf) < 0)
return -errno;
return 0;
}
int os_set_slip(int fd)
{
int disc, sencap;
disc = N_SLIP;
if (ioctl(fd, TIOCSETD, &disc) < 0)
return -errno;
sencap = 0;
if (ioctl(fd, SIOCSIFENCAP, &sencap) < 0)
return -errno;
return 0;
}
int os_mode_fd(int fd, int mode)
{
int err;
CATCH_EINTR(err = fchmod(fd, mode));
if (err < 0)
return -errno;
return 0;
}
int os_file_type(char *file)
{
struct uml_stat buf;
int err;
err = os_stat_file(file, &buf);
if (err < 0)
return err;
if (S_ISDIR(buf.ust_mode))
return OS_TYPE_DIR;
else if (S_ISLNK(buf.ust_mode))
return OS_TYPE_SYMLINK;
else if (S_ISCHR(buf.ust_mode))
return OS_TYPE_CHARDEV;
else if (S_ISBLK(buf.ust_mode))
return OS_TYPE_BLOCKDEV;
else if (S_ISFIFO(buf.ust_mode))
return OS_TYPE_FIFO;
else if (S_ISSOCK(buf.ust_mode))
return OS_TYPE_SOCK;
else return OS_TYPE_FILE;
}
int os_file_mode(const char *file, struct openflags *mode_out)
{
int err;
*mode_out = OPENFLAGS();
err = access(file, W_OK);
if (err && (errno != EACCES))
return -errno;
else if (!err)
*mode_out = of_write(*mode_out);
err = access(file, R_OK);
if (err && (errno != EACCES))
return -errno;
else if (!err)
*mode_out = of_read(*mode_out);
return err;
}
int os_open_file(const char *file, struct openflags flags, int mode)
{
int fd, err, f = 0;
if (flags.r && flags.w)
f = O_RDWR;
else if (flags.r)
f = O_RDONLY;
else if (flags.w)
f = O_WRONLY;
else f = 0;
if (flags.s)
f |= O_SYNC;
if (flags.c)
f |= O_CREAT;
if (flags.t)
f |= O_TRUNC;
if (flags.e)
f |= O_EXCL;
if (flags.a)
f |= O_APPEND;
fd = open64(file, f, mode);
if (fd < 0)
return -errno;
if (flags.cl && fcntl(fd, F_SETFD, 1)) {
err = -errno;
close(fd);
return err;
}
return fd;
}
int os_connect_socket(const char *name)
{
struct sockaddr_un sock;
int fd, err;
sock.sun_family = AF_UNIX;
snprintf(sock.sun_path, sizeof(sock.sun_path), "%s", name);
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
err = -errno;
goto out;
}
err = connect(fd, (struct sockaddr *) &sock, sizeof(sock));
if (err) {
err = -errno;
goto out_close;
}
return fd;
out_close:
close(fd);
out:
return err;
}
void os_close_file(int fd)
{
close(fd);
}
int os_fsync_file(int fd)
{
if (fsync(fd) < 0)
return -errno;
return 0;
}
int os_seek_file(int fd, unsigned long long offset)
{
unsigned long long actual;
actual = lseek64(fd, offset, SEEK_SET);
if (actual != offset)
return -errno;
return 0;
}
int os_read_file(int fd, void *buf, int len)
{
int n = read(fd, buf, len);
if (n < 0)
return -errno;
return n;
}
int os_pread_file(int fd, void *buf, int len, unsigned long long offset)
{
int n = pread(fd, buf, len, offset);
if (n < 0)
return -errno;
return n;
}
int os_write_file(int fd, const void *buf, int len)
{
int n = write(fd, (void *) buf, len);
if (n < 0)
return -errno;
return n;
}
int os_sync_file(int fd)
{
int n = fdatasync(fd);
if (n < 0)
return -errno;
return n;
}
int os_pwrite_file(int fd, const void *buf, int len, unsigned long long offset)
{
int n = pwrite(fd, (void *) buf, len, offset);
if (n < 0)
return -errno;
return n;
}
int os_file_size(const char *file, unsigned long long *size_out)
{
struct uml_stat buf;
int err;
err = os_stat_file(file, &buf);
if (err < 0) {
printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file,
-err);
return err;
}
if (S_ISBLK(buf.ust_mode)) {
int fd;
long blocks;
fd = open(file, O_RDONLY, 0);
if (fd < 0) {
err = -errno;
printk(UM_KERN_ERR "Couldn't open \"%s\", "
"errno = %d\n", file, errno);
return err;
}
if (ioctl(fd, BLKGETSIZE, &blocks) < 0) {
err = -errno;
printk(UM_KERN_ERR "Couldn't get the block size of "
"\"%s\", errno = %d\n", file, errno);
close(fd);
return err;
}
*size_out = ((long long) blocks) * 512;
close(fd);
}
else *size_out = buf.ust_size;
return 0;
}
int os_file_modtime(const char *file, long long *modtime)
{
struct uml_stat buf;
int err;
err = os_stat_file(file, &buf);
if (err < 0) {
printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file,
-err);
return err;
}
*modtime = buf.ust_mtime;
return 0;
}
int os_set_exec_close(int fd)
{
int err;
CATCH_EINTR(err = fcntl(fd, F_SETFD, FD_CLOEXEC));
if (err < 0)
return -errno;
return err;
}
int os_pipe(int *fds, int stream, int close_on_exec)
{
int err, type = stream ? SOCK_STREAM : SOCK_DGRAM;
err = socketpair(AF_UNIX, type, 0, fds);
if (err < 0)
return -errno;
if (!close_on_exec)
return 0;
err = os_set_exec_close(fds[0]);
if (err < 0)
goto error;
err = os_set_exec_close(fds[1]);
if (err < 0)
goto error;
return 0;
error:
printk(UM_KERN_ERR "os_pipe : Setting FD_CLOEXEC failed, err = %d\n",
-err);
close(fds[1]);
close(fds[0]);
return err;
}
int os_set_fd_async(int fd)
{
int err, flags;
flags = fcntl(fd, F_GETFL);
if (flags < 0)
return -errno;
flags |= O_ASYNC | O_NONBLOCK;
if (fcntl(fd, F_SETFL, flags) < 0) {
err = -errno;
printk(UM_KERN_ERR "os_set_fd_async : failed to set O_ASYNC "
"and O_NONBLOCK on fd # %d, errno = %d\n", fd, errno);
return err;
}
if ((fcntl(fd, F_SETSIG, SIGIO) < 0) ||
(fcntl(fd, F_SETOWN, os_getpid()) < 0)) {
err = -errno;
printk(UM_KERN_ERR "os_set_fd_async : Failed to fcntl F_SETOWN "
"(or F_SETSIG) fd %d, errno = %d\n", fd, errno);
return err;
}
return 0;
}
int os_clear_fd_async(int fd)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (flags < 0)
return -errno;
flags &= ~(O_ASYNC | O_NONBLOCK);
if (fcntl(fd, F_SETFL, flags) < 0)
return -errno;
return 0;
}
int os_set_fd_block(int fd, int blocking)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (flags < 0)
return -errno;
if (blocking)
flags &= ~O_NONBLOCK;
else
flags |= O_NONBLOCK;
if (fcntl(fd, F_SETFL, flags) < 0)
return -errno;
return 0;
}
int os_accept_connection(int fd)
{
int new;
new = accept(fd, NULL, 0);
if (new < 0)
return -errno;
return new;
}
#ifndef SHUT_RD
#define SHUT_RD 0
#endif
#ifndef SHUT_WR
#define SHUT_WR 1
#endif
#ifndef SHUT_RDWR
#define SHUT_RDWR 2
#endif
int os_shutdown_socket(int fd, int r, int w)
{
int what, err;
if (r && w)
what = SHUT_RDWR;
else if (r)
what = SHUT_RD;
else if (w)
what = SHUT_WR;
else
return -EINVAL;
err = shutdown(fd, what);
if (err < 0)
return -errno;
return 0;
}
int os_rcv_fd(int fd, int *helper_pid_out)
{
int new, n;
char buf[CMSG_SPACE(sizeof(new))];
struct msghdr msg;
struct cmsghdr *cmsg;
struct iovec iov;
msg.msg_name = NULL;
msg.msg_namelen = 0;
iov = ((struct iovec) { .iov_base = helper_pid_out,
.iov_len = sizeof(*helper_pid_out) });
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_flags = 0;
n = recvmsg(fd, &msg, 0);
if (n < 0)
return -errno;
else if (n != iov.iov_len)
*helper_pid_out = -1;
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
printk(UM_KERN_ERR "rcv_fd didn't receive anything, "
"error = %d\n", errno);
return -1;
}
if ((cmsg->cmsg_level != SOL_SOCKET) ||
(cmsg->cmsg_type != SCM_RIGHTS)) {
printk(UM_KERN_ERR "rcv_fd didn't receive a descriptor\n");
return -1;
}
new = ((int *) CMSG_DATA(cmsg))[0];
return new;
}
int os_create_unix_socket(const char *file, int len, int close_on_exec)
{
struct sockaddr_un addr;
int sock, err;
sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if (sock < 0)
return -errno;
if (close_on_exec) {
err = os_set_exec_close(sock);
if (err < 0)
printk(UM_KERN_ERR "create_unix_socket : "
"close_on_exec failed, err = %d", -err);
}
addr.sun_family = AF_UNIX;
snprintf(addr.sun_path, len, "%s", file);
err = bind(sock, (struct sockaddr *) &addr, sizeof(addr));
if (err < 0)
return -errno;
return sock;
}
void os_flush_stdout(void)
{
fflush(stdout);
}
int os_lock_file(int fd, int excl)
{
int type = excl ? F_WRLCK : F_RDLCK;
struct flock lock = ((struct flock) { .l_type = type,
.l_whence = SEEK_SET,
.l_start = 0,
.l_len = 0 } );
int err, save;
err = fcntl(fd, F_SETLK, &lock);
if (!err)
goto out;
save = -errno;
err = fcntl(fd, F_GETLK, &lock);
if (err) {
err = -errno;
goto out;
}
printk(UM_KERN_ERR "F_SETLK failed, file already locked by pid %d\n",
lock.l_pid);
err = save;
out:
return err;
}
unsigned os_major(unsigned long long dev)
{
return major(dev);
}
unsigned os_minor(unsigned long long dev)
{
return minor(dev);
}
unsigned long long os_makedev(unsigned major, unsigned minor)
{
return makedev(major, minor);
}
int os_falloc_punch(int fd, unsigned long long offset, int len)
{
int n = fallocate(fd, FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE, offset, len);
if (n < 0)
return -errno;
return n;
}
int os_falloc_zeroes(int fd, unsigned long long offset, int len)
{
int n = fallocate(fd, FALLOC_FL_ZERO_RANGE|FALLOC_FL_KEEP_SIZE, offset, len);
if (n < 0)
return -errno;
return n;
}
int os_eventfd(unsigned int initval, int flags)
{
int fd = eventfd(initval, flags);
if (fd < 0)
return -errno;
return fd;
}
int os_sendmsg_fds(int fd, const void *buf, unsigned int len, const int *fds,
unsigned int fds_num)
{
struct iovec iov = {
.iov_base = (void *) buf,
.iov_len = len,
};
union {
char control[CMSG_SPACE(sizeof(*fds) * OS_SENDMSG_MAX_FDS)];
struct cmsghdr align;
} u;
unsigned int fds_size = sizeof(*fds) * fds_num;
struct msghdr msg = {
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = u.control,
.msg_controllen = CMSG_SPACE(fds_size),
};
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
int err;
if (fds_num > OS_SENDMSG_MAX_FDS)
return -EINVAL;
memset(u.control, 0, sizeof(u.control));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(fds_size);
memcpy(CMSG_DATA(cmsg), fds, fds_size);
err = sendmsg(fd, &msg, 0);
if (err < 0)
return -errno;
return err;
}
int os_poll(unsigned int n, const int *fds)
{
/* currently need 2 FDs at most so avoid dynamic allocation */
struct pollfd pollfds[2] = {};
unsigned int i;
int ret;
if (n > ARRAY_SIZE(pollfds))
return -EINVAL;
for (i = 0; i < n; i++) {
pollfds[i].fd = fds[i];
pollfds[i].events = POLLIN;
}
ret = poll(pollfds, n, -1);
if (ret < 0)
return -errno;
/* Return the index of the available FD */
for (i = 0; i < n; i++) {
if (pollfds[i].revents)
return i;
}
return -EIO;
}
| linux-master | arch/um/os-Linux/file.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <pty.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <kern_util.h>
#include <init.h>
#include <os.h>
#include <sigio.h>
#include <um_malloc.h>
/*
* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
static int write_sigio_pid = -1;
static unsigned long write_sigio_stack;
/*
* These arrays are initialized before the sigio thread is started, and
* the descriptors closed after it is killed. So, it can't see them change.
* On the UML side, they are changed under the sigio_lock.
*/
#define SIGIO_FDS_INIT {-1, -1}
static int write_sigio_fds[2] = SIGIO_FDS_INIT;
static int sigio_private[2] = SIGIO_FDS_INIT;
struct pollfds {
struct pollfd *poll;
int size;
int used;
};
/*
* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
* synchronizes with it.
*/
static struct pollfds current_poll;
static struct pollfds next_poll;
static struct pollfds all_sigio_fds;
static int write_sigio_thread(void *unused)
{
struct pollfds *fds, tmp;
struct pollfd *p;
int i, n, respond_fd;
char c;
os_fix_helper_signals();
fds = ¤t_poll;
while (1) {
n = poll(fds->poll, fds->used, -1);
if (n < 0) {
if (errno == EINTR)
continue;
printk(UM_KERN_ERR "write_sigio_thread : poll returned "
"%d, errno = %d\n", n, errno);
}
for (i = 0; i < fds->used; i++) {
p = &fds->poll[i];
if (p->revents == 0)
continue;
if (p->fd == sigio_private[1]) {
CATCH_EINTR(n = read(sigio_private[1], &c,
sizeof(c)));
if (n != sizeof(c))
printk(UM_KERN_ERR
"write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
tmp = current_poll;
current_poll = next_poll;
next_poll = tmp;
respond_fd = sigio_private[1];
}
else {
respond_fd = write_sigio_fds[1];
fds->used--;
memmove(&fds->poll[i], &fds->poll[i + 1],
(fds->used - i) * sizeof(*fds->poll));
}
CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
if (n != sizeof(c))
printk(UM_KERN_ERR "write_sigio_thread : "
"write on socket failed, err = %d\n",
errno);
}
}
return 0;
}
static int need_poll(struct pollfds *polls, int n)
{
struct pollfd *new;
if (n <= polls->size)
return 0;
new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
if (new == NULL) {
printk(UM_KERN_ERR "need_poll : failed to allocate new "
"pollfds\n");
return -ENOMEM;
}
memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
kfree(polls->poll);
polls->poll = new;
polls->size = n;
return 0;
}
/*
* Must be called with sigio_lock held, because it's needed by the marked
* critical section.
*/
static void update_thread(void)
{
unsigned long flags;
int n;
char c;
flags = um_set_signals_trace(0);
CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
errno);
goto fail;
}
CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
errno);
goto fail;
}
um_set_signals_trace(flags);
return;
fail:
/* Critical section start */
if (write_sigio_pid != -1) {
os_kill_process(write_sigio_pid, 1);
free_stack(write_sigio_stack, 0);
}
write_sigio_pid = -1;
close(sigio_private[0]);
close(sigio_private[1]);
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
/* Critical section end */
um_set_signals_trace(flags);
}
int __add_sigio_fd(int fd)
{
struct pollfd *p;
int err, i, n;
for (i = 0; i < all_sigio_fds.used; i++) {
if (all_sigio_fds.poll[i].fd == fd)
break;
}
if (i == all_sigio_fds.used)
return -ENOSPC;
p = &all_sigio_fds.poll[i];
for (i = 0; i < current_poll.used; i++) {
if (current_poll.poll[i].fd == fd)
return 0;
}
n = current_poll.used;
err = need_poll(&next_poll, n + 1);
if (err)
return err;
memcpy(next_poll.poll, current_poll.poll,
current_poll.used * sizeof(struct pollfd));
next_poll.poll[n] = *p;
next_poll.used = n + 1;
update_thread();
return 0;
}
int add_sigio_fd(int fd)
{
int err;
sigio_lock();
err = __add_sigio_fd(fd);
sigio_unlock();
return err;
}
int __ignore_sigio_fd(int fd)
{
struct pollfd *p;
int err, i, n = 0;
/*
* This is called from exitcalls elsewhere in UML - if
* sigio_cleanup has already run, then update_thread will hang
* or fail because the thread is no longer running.
*/
if (write_sigio_pid == -1)
return -EIO;
for (i = 0; i < current_poll.used; i++) {
if (current_poll.poll[i].fd == fd)
break;
}
if (i == current_poll.used)
return -ENOENT;
err = need_poll(&next_poll, current_poll.used - 1);
if (err)
return err;
for (i = 0; i < current_poll.used; i++) {
p = ¤t_poll.poll[i];
if (p->fd != fd)
next_poll.poll[n++] = *p;
}
next_poll.used = current_poll.used - 1;
update_thread();
return 0;
}
int ignore_sigio_fd(int fd)
{
int err;
sigio_lock();
err = __ignore_sigio_fd(fd);
sigio_unlock();
return err;
}
static struct pollfd *setup_initial_poll(int fd)
{
struct pollfd *p;
p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
if (p == NULL) {
printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
"poll\n");
return NULL;
}
*p = ((struct pollfd) { .fd = fd,
.events = POLLIN,
.revents = 0 });
return p;
}
static void write_sigio_workaround(void)
{
struct pollfd *p;
int err;
int l_write_sigio_fds[2];
int l_sigio_private[2];
int l_write_sigio_pid;
/* We call this *tons* of times - and most ones we must just fail. */
sigio_lock();
l_write_sigio_pid = write_sigio_pid;
sigio_unlock();
if (l_write_sigio_pid != -1)
return;
err = os_pipe(l_write_sigio_fds, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
"err = %d\n", -err);
return;
}
err = os_pipe(l_sigio_private, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
"err = %d\n", -err);
goto out_close1;
}
p = setup_initial_poll(l_sigio_private[1]);
if (!p)
goto out_close2;
sigio_lock();
/*
* Did we race? Don't try to optimize this, please, it's not so likely
* to happen, and no more than once at the boot.
*/
if (write_sigio_pid != -1)
goto out_free;
current_poll = ((struct pollfds) { .poll = p,
.used = 1,
.size = 1 });
if (write_sigio_irq(l_write_sigio_fds[0]))
goto out_clear_poll;
memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
CLONE_FILES | CLONE_VM,
&write_sigio_stack);
if (write_sigio_pid < 0)
goto out_clear;
sigio_unlock();
return;
out_clear:
write_sigio_pid = -1;
write_sigio_fds[0] = -1;
write_sigio_fds[1] = -1;
sigio_private[0] = -1;
sigio_private[1] = -1;
out_clear_poll:
current_poll = ((struct pollfds) { .poll = NULL,
.size = 0,
.used = 0 });
out_free:
sigio_unlock();
kfree(p);
out_close2:
close(l_sigio_private[0]);
close(l_sigio_private[1]);
out_close1:
close(l_write_sigio_fds[0]);
close(l_write_sigio_fds[1]);
}
void sigio_broken(int fd)
{
int err;
write_sigio_workaround();
sigio_lock();
err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
if (err) {
printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
"for descriptor %d\n", fd);
goto out;
}
all_sigio_fds.poll[all_sigio_fds.used++] =
((struct pollfd) { .fd = fd,
.events = POLLIN,
.revents = 0 });
out:
sigio_unlock();
}
/* Changed during early boot */
static int pty_output_sigio;
void maybe_sigio_broken(int fd)
{
if (!isatty(fd))
return;
if (pty_output_sigio)
return;
sigio_broken(fd);
}
static void sigio_cleanup(void)
{
if (write_sigio_pid == -1)
return;
os_kill_process(write_sigio_pid, 1);
free_stack(write_sigio_stack, 0);
write_sigio_pid = -1;
}
__uml_exitcall(sigio_cleanup);
/* Used as a flag during SIGIO testing early in boot */
static int got_sigio;
static void __init handler(int sig)
{
got_sigio = 1;
}
struct openpty_arg {
int master;
int slave;
int err;
};
static void openpty_cb(void *arg)
{
struct openpty_arg *info = arg;
info->err = 0;
if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
info->err = -errno;
}
static int async_pty(int master, int slave)
{
int flags;
flags = fcntl(master, F_GETFL);
if (flags < 0)
return -errno;
if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
(fcntl(master, F_SETOWN, os_getpid()) < 0))
return -errno;
if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
return -errno;
return 0;
}
static void __init check_one_sigio(void (*proc)(int, int))
{
struct sigaction old, new;
struct openpty_arg pty = { .master = -1, .slave = -1 };
int master, slave, err;
initial_thread_cb(openpty_cb, &pty);
if (pty.err) {
printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
-pty.err);
return;
}
master = pty.master;
slave = pty.slave;
if ((master == -1) || (slave == -1)) {
printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
"pty\n");
return;
}
/* Not now, but complain so we now where we failed. */
err = raw(master);
if (err < 0) {
printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
-err);
return;
}
err = async_pty(master, slave);
if (err < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
"err = %d\n", -err);
return;
}
if (sigaction(SIGIO, NULL, &old) < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
"errno = %d\n", errno);
return;
}
new = old;
new.sa_handler = handler;
if (sigaction(SIGIO, &new, NULL) < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
"errno = %d\n", errno);
return;
}
got_sigio = 0;
(*proc)(master, slave);
close(master);
close(slave);
if (sigaction(SIGIO, &old, NULL) < 0)
printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
"errno = %d\n", errno);
}
static void tty_output(int master, int slave)
{
int n;
char buf[512];
printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
memset(buf, 0, sizeof(buf));
while (write(master, buf, sizeof(buf)) > 0) ;
if (errno != EAGAIN)
printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
errno);
while (((n = read(slave, buf, sizeof(buf))) > 0) &&
!({ barrier(); got_sigio; }))
;
if (got_sigio) {
printk(UM_KERN_CONT "Yes\n");
pty_output_sigio = 1;
} else if (n == -EAGAIN)
printk(UM_KERN_CONT "No, enabling workaround\n");
else
printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
}
static void __init check_sigio(void)
{
if ((access("/dev/ptmx", R_OK) < 0) &&
(access("/dev/ptyp0", R_OK) < 0)) {
printk(UM_KERN_WARNING "No pseudo-terminals available - "
"skipping pty SIGIO check\n");
return;
}
check_one_sigio(tty_output);
}
/* Here because it only does the SIGIO testing for now */
void __init os_check_bugs(void)
{
check_sigio();
}
| linux-master | arch/um/os-Linux/sigio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2004 PathScale, Inc
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <errno.h>
#include <string.h>
#include <sys/ptrace.h>
#include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h>
#include <registers.h>
int save_registers(int pid, struct uml_pt_regs *regs)
{
int err;
err = ptrace(PTRACE_GETREGS, pid, 0, regs->gp);
if (err < 0)
return -errno;
return 0;
}
int restore_pid_registers(int pid, struct uml_pt_regs *regs)
{
int err;
err = ptrace(PTRACE_SETREGS, pid, 0, regs->gp);
if (err < 0)
return -errno;
return 0;
}
/* This is set once at boot time and not changed thereafter */
static unsigned long exec_regs[MAX_REG_NR];
static unsigned long exec_fp_regs[FP_SIZE];
int init_pid_registers(int pid)
{
int err;
err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs);
if (err < 0)
return -errno;
arch_init_registers(pid);
get_fp_registers(pid, exec_fp_regs);
return 0;
}
void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
{
memcpy(regs, exec_regs, sizeof(exec_regs));
if (fp_regs)
memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs));
}
| linux-master | arch/um/os-Linux/registers.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include "etap.h"
#include <os.h>
#include <net_user.h>
#include <um_malloc.h>
#define MAX_PACKET ETH_MAX_PACKET
static int etap_user_init(void *data, void *dev)
{
struct ethertap_data *pri = data;
pri->dev = dev;
return 0;
}
struct addr_change {
enum { ADD_ADDR, DEL_ADDR } what;
unsigned char addr[4];
unsigned char netmask[4];
};
static void etap_change(int op, unsigned char *addr, unsigned char *netmask,
int fd)
{
struct addr_change change;
char *output;
int n;
change.what = op;
memcpy(change.addr, addr, sizeof(change.addr));
memcpy(change.netmask, netmask, sizeof(change.netmask));
CATCH_EINTR(n = write(fd, &change, sizeof(change)));
if (n != sizeof(change)) {
printk(UM_KERN_ERR "etap_change - request failed, err = %d\n",
errno);
return;
}
output = uml_kmalloc(UM_KERN_PAGE_SIZE, UM_GFP_KERNEL);
if (output == NULL)
printk(UM_KERN_ERR "etap_change : Failed to allocate output "
"buffer\n");
read_output(fd, output, UM_KERN_PAGE_SIZE);
if (output != NULL) {
printk("%s", output);
kfree(output);
}
}
static void etap_open_addr(unsigned char *addr, unsigned char *netmask,
void *arg)
{
etap_change(ADD_ADDR, addr, netmask, *((int *) arg));
}
static void etap_close_addr(unsigned char *addr, unsigned char *netmask,
void *arg)
{
etap_change(DEL_ADDR, addr, netmask, *((int *) arg));
}
struct etap_pre_exec_data {
int control_remote;
int control_me;
int data_me;
};
static void etap_pre_exec(void *arg)
{
struct etap_pre_exec_data *data = arg;
dup2(data->control_remote, 1);
close(data->data_me);
close(data->control_me);
}
static int etap_tramp(char *dev, char *gate, int control_me,
int control_remote, int data_me, int data_remote)
{
struct etap_pre_exec_data pe_data;
int pid, err, n;
char version_buf[sizeof("nnnnn\0")];
char data_fd_buf[sizeof("nnnnnn\0")];
char gate_buf[sizeof("nnn.nnn.nnn.nnn\0")];
char *setup_args[] = { "uml_net", version_buf, "ethertap", dev,
data_fd_buf, gate_buf, NULL };
char *nosetup_args[] = { "uml_net", version_buf, "ethertap",
dev, data_fd_buf, NULL };
char **args, c;
sprintf(data_fd_buf, "%d", data_remote);
sprintf(version_buf, "%d", UML_NET_VERSION);
if (gate != NULL) {
strncpy(gate_buf, gate, 15);
args = setup_args;
}
else args = nosetup_args;
err = 0;
pe_data.control_remote = control_remote;
pe_data.control_me = control_me;
pe_data.data_me = data_me;
pid = run_helper(etap_pre_exec, &pe_data, args);
if (pid < 0)
err = pid;
close(data_remote);
close(control_remote);
CATCH_EINTR(n = read(control_me, &c, sizeof(c)));
if (n != sizeof(c)) {
err = -errno;
printk(UM_KERN_ERR "etap_tramp : read of status failed, "
"err = %d\n", -err);
return err;
}
if (c != 1) {
printk(UM_KERN_ERR "etap_tramp : uml_net failed\n");
err = helper_wait(pid);
}
return err;
}
static int etap_open(void *data)
{
struct ethertap_data *pri = data;
char *output;
int data_fds[2], control_fds[2], err, output_len;
err = tap_open_common(pri->dev, pri->gate_addr);
if (err)
return err;
err = socketpair(AF_UNIX, SOCK_DGRAM, 0, data_fds);
if (err) {
err = -errno;
printk(UM_KERN_ERR "etap_open - data socketpair failed - "
"err = %d\n", errno);
return err;
}
err = socketpair(AF_UNIX, SOCK_STREAM, 0, control_fds);
if (err) {
err = -errno;
printk(UM_KERN_ERR "etap_open - control socketpair failed - "
"err = %d\n", errno);
goto out_close_data;
}
err = etap_tramp(pri->dev_name, pri->gate_addr, control_fds[0],
control_fds[1], data_fds[0], data_fds[1]);
output_len = UM_KERN_PAGE_SIZE;
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
read_output(control_fds[0], output, output_len);
if (output == NULL)
printk(UM_KERN_ERR "etap_open : failed to allocate output "
"buffer\n");
else {
printk("%s", output);
kfree(output);
}
if (err < 0) {
printk(UM_KERN_ERR "etap_tramp failed - err = %d\n", -err);
goto out_close_control;
}
pri->data_fd = data_fds[0];
pri->control_fd = control_fds[0];
iter_addresses(pri->dev, etap_open_addr, &pri->control_fd);
return data_fds[0];
out_close_control:
close(control_fds[0]);
close(control_fds[1]);
out_close_data:
close(data_fds[0]);
close(data_fds[1]);
return err;
}
static void etap_close(int fd, void *data)
{
struct ethertap_data *pri = data;
iter_addresses(pri->dev, etap_close_addr, &pri->control_fd);
close(fd);
if (shutdown(pri->data_fd, SHUT_RDWR) < 0)
printk(UM_KERN_ERR "etap_close - shutdown data socket failed, "
"errno = %d\n", errno);
if (shutdown(pri->control_fd, SHUT_RDWR) < 0)
printk(UM_KERN_ERR "etap_close - shutdown control socket "
"failed, errno = %d\n", errno);
close(pri->data_fd);
pri->data_fd = -1;
close(pri->control_fd);
pri->control_fd = -1;
}
static void etap_add_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct ethertap_data *pri = data;
tap_check_ips(pri->gate_addr, addr);
if (pri->control_fd == -1)
return;
etap_open_addr(addr, netmask, &pri->control_fd);
}
static void etap_del_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct ethertap_data *pri = data;
if (pri->control_fd == -1)
return;
etap_close_addr(addr, netmask, &pri->control_fd);
}
const struct net_user_info ethertap_user_info = {
.init = etap_user_init,
.open = etap_open,
.close = etap_close,
.remove = NULL,
.add_address = etap_add_addr,
.delete_address = etap_del_addr,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_ETHERTAP,
};
| linux-master | arch/um/os-Linux/drivers/ethertap_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include "etap.h"
#include <net_kern.h>
struct ethertap_init {
char *dev_name;
char *gate_addr;
};
static void etap_init(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct ethertap_data *epri;
struct ethertap_init *init = data;
pri = netdev_priv(dev);
epri = (struct ethertap_data *) pri->user;
epri->dev_name = init->dev_name;
epri->gate_addr = init->gate_addr;
epri->data_fd = -1;
epri->control_fd = -1;
epri->dev = dev;
printk(KERN_INFO "ethertap backend - %s", epri->dev_name);
if (epri->gate_addr != NULL)
printk(KERN_CONT ", IP = %s", epri->gate_addr);
printk(KERN_CONT "\n");
}
static int etap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
int len;
len = net_recvfrom(fd, skb_mac_header(skb),
skb->dev->mtu + 2 + ETH_HEADER_ETHERTAP);
if (len <= 0)
return(len);
skb_pull(skb, 2);
len -= 2;
return len;
}
static int etap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
skb_push(skb, 2);
return net_send(fd, skb->data, skb->len);
}
const struct net_kern_info ethertap_kern_info = {
.init = etap_init,
.protocol = eth_protocol,
.read = etap_read,
.write = etap_write,
};
int ethertap_setup(char *str, char **mac_out, void *data)
{
struct ethertap_init *init = data;
*init = ((struct ethertap_init)
{ .dev_name = NULL,
.gate_addr = NULL });
if (tap_setup_common(str, "ethertap", &init->dev_name, mac_out,
&init->gate_addr))
return 0;
if (init->dev_name == NULL) {
printk(KERN_ERR "ethertap_setup : Missing tap device name\n");
return 0;
}
return 1;
}
static struct transport ethertap_transport = {
.list = LIST_HEAD_INIT(ethertap_transport.list),
.name = "ethertap",
.setup = ethertap_setup,
.user = ðertap_user_info,
.kern = ðertap_kern_info,
.private_size = sizeof(struct ethertap_data),
.setup_size = sizeof(struct ethertap_init),
};
static int register_ethertap(void)
{
register_transport(ðertap_transport);
return 0;
}
late_initcall(register_ethertap);
| linux-master | arch/um/os-Linux/drivers/ethertap_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <linux/if_tun.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <kern_util.h>
#include <os.h>
#include "tuntap.h"
static int tuntap_user_init(void *data, void *dev)
{
struct tuntap_data *pri = data;
pri->dev = dev;
return 0;
}
static void tuntap_add_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct tuntap_data *pri = data;
tap_check_ips(pri->gate_addr, addr);
if ((pri->fd == -1) || pri->fixed_config)
return;
open_addr(addr, netmask, pri->dev_name);
}
static void tuntap_del_addr(unsigned char *addr, unsigned char *netmask,
void *data)
{
struct tuntap_data *pri = data;
if ((pri->fd == -1) || pri->fixed_config)
return;
close_addr(addr, netmask, pri->dev_name);
}
struct tuntap_pre_exec_data {
int stdout_fd;
int close_me;
};
static void tuntap_pre_exec(void *arg)
{
struct tuntap_pre_exec_data *data = arg;
dup2(data->stdout_fd, 1);
close(data->close_me);
}
static int tuntap_open_tramp(char *gate, int *fd_out, int me, int remote,
char *buffer, int buffer_len, int *used_out)
{
struct tuntap_pre_exec_data data;
char version_buf[sizeof("nnnnn\0")];
char *argv[] = { "uml_net", version_buf, "tuntap", "up", gate,
NULL };
char buf[CMSG_SPACE(sizeof(*fd_out))];
struct msghdr msg;
struct cmsghdr *cmsg;
struct iovec iov;
int pid, n, err;
sprintf(version_buf, "%d", UML_NET_VERSION);
data.stdout_fd = remote;
data.close_me = me;
pid = run_helper(tuntap_pre_exec, &data, argv);
if (pid < 0)
return pid;
close(remote);
msg.msg_name = NULL;
msg.msg_namelen = 0;
if (buffer != NULL) {
iov = ((struct iovec) { buffer, buffer_len });
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
}
else {
msg.msg_iov = NULL;
msg.msg_iovlen = 0;
}
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_flags = 0;
n = recvmsg(me, &msg, 0);
*used_out = n;
if (n < 0) {
err = -errno;
printk(UM_KERN_ERR "tuntap_open_tramp : recvmsg failed - "
"errno = %d\n", errno);
return err;
}
helper_wait(pid);
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
printk(UM_KERN_ERR "tuntap_open_tramp : didn't receive a "
"message\n");
return -EINVAL;
}
if ((cmsg->cmsg_level != SOL_SOCKET) ||
(cmsg->cmsg_type != SCM_RIGHTS)) {
printk(UM_KERN_ERR "tuntap_open_tramp : didn't receive a "
"descriptor\n");
return -EINVAL;
}
*fd_out = ((int *) CMSG_DATA(cmsg))[0];
os_set_exec_close(*fd_out);
return 0;
}
static int tuntap_open(void *data)
{
struct ifreq ifr;
struct tuntap_data *pri = data;
char *output, *buffer;
int err, fds[2], len, used;
err = tap_open_common(pri->dev, pri->gate_addr);
if (err < 0)
return err;
if (pri->fixed_config) {
pri->fd = os_open_file("/dev/net/tun",
of_cloexec(of_rdwr(OPENFLAGS())), 0);
if (pri->fd < 0) {
printk(UM_KERN_ERR "Failed to open /dev/net/tun, "
"err = %d\n", -pri->fd);
return pri->fd;
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strscpy(ifr.ifr_name, pri->dev_name, sizeof(ifr.ifr_name));
if (ioctl(pri->fd, TUNSETIFF, &ifr) < 0) {
err = -errno;
printk(UM_KERN_ERR "TUNSETIFF failed, errno = %d\n",
errno);
close(pri->fd);
return err;
}
}
else {
err = socketpair(AF_UNIX, SOCK_DGRAM, 0, fds);
if (err) {
err = -errno;
printk(UM_KERN_ERR "tuntap_open : socketpair failed - "
"errno = %d\n", errno);
return err;
}
buffer = get_output_buffer(&len);
if (buffer != NULL)
len--;
used = 0;
err = tuntap_open_tramp(pri->gate_addr, &pri->fd, fds[0],
fds[1], buffer, len, &used);
output = buffer;
if (err < 0) {
printk("%s", output);
free_output_buffer(buffer);
printk(UM_KERN_ERR "tuntap_open_tramp failed - "
"err = %d\n", -err);
return err;
}
pri->dev_name = uml_strdup(buffer);
output += IFNAMSIZ;
printk("%s", output);
free_output_buffer(buffer);
close(fds[0]);
iter_addresses(pri->dev, open_addr, pri->dev_name);
}
return pri->fd;
}
static void tuntap_close(int fd, void *data)
{
struct tuntap_data *pri = data;
if (!pri->fixed_config)
iter_addresses(pri->dev, close_addr, pri->dev_name);
close(fd);
pri->fd = -1;
}
const struct net_user_info tuntap_user_info = {
.init = tuntap_user_init,
.open = tuntap_open,
.close = tuntap_close,
.remove = NULL,
.add_address = tuntap_add_addr,
.delete_address = tuntap_del_addr,
.mtu = ETH_MAX_PACKET,
.max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
};
| linux-master | arch/um/os-Linux/drivers/tuntap_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <asm/errno.h>
#include <net_kern.h>
#include "tuntap.h"
struct tuntap_init {
char *dev_name;
char *gate_addr;
};
static void tuntap_init(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct tuntap_data *tpri;
struct tuntap_init *init = data;
pri = netdev_priv(dev);
tpri = (struct tuntap_data *) pri->user;
tpri->dev_name = init->dev_name;
tpri->fixed_config = (init->dev_name != NULL);
tpri->gate_addr = init->gate_addr;
tpri->fd = -1;
tpri->dev = dev;
printk(KERN_INFO "TUN/TAP backend - ");
if (tpri->gate_addr != NULL)
printk(KERN_CONT "IP = %s", tpri->gate_addr);
printk(KERN_CONT "\n");
}
static int tuntap_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return net_read(fd, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER);
}
static int tuntap_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return net_write(fd, skb->data, skb->len);
}
const struct net_kern_info tuntap_kern_info = {
.init = tuntap_init,
.protocol = eth_protocol,
.read = tuntap_read,
.write = tuntap_write,
};
int tuntap_setup(char *str, char **mac_out, void *data)
{
struct tuntap_init *init = data;
*init = ((struct tuntap_init)
{ .dev_name = NULL,
.gate_addr = NULL });
if (tap_setup_common(str, "tuntap", &init->dev_name, mac_out,
&init->gate_addr))
return 0;
return 1;
}
static struct transport tuntap_transport = {
.list = LIST_HEAD_INIT(tuntap_transport.list),
.name = "tuntap",
.setup = tuntap_setup,
.user = &tuntap_user_info,
.kern = &tuntap_kern_info,
.private_size = sizeof(struct tuntap_data),
.setup_size = sizeof(struct tuntap_init),
};
static int register_tuntap(void)
{
register_transport(&tuntap_transport);
return 0;
}
late_initcall(register_tuntap);
| linux-master | arch/um/os-Linux/drivers/tuntap_kern.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <asm/unistd.h>
#include <as-layout.h>
#include <init.h>
#include <kern_util.h>
#include <mem.h>
#include <os.h>
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
#include <sysdep/stub.h>
#include <linux/threads.h>
int is_skas_winch(int pid, int fd, void *data)
{
return pid == getpgrp();
}
static const char *ptrace_reg_name(int idx)
{
#define R(n) case HOST_##n: return #n
switch (idx) {
#ifdef __x86_64__
R(BX);
R(CX);
R(DI);
R(SI);
R(DX);
R(BP);
R(AX);
R(R8);
R(R9);
R(R10);
R(R11);
R(R12);
R(R13);
R(R14);
R(R15);
R(ORIG_AX);
R(CS);
R(SS);
R(EFLAGS);
#elif defined(__i386__)
R(IP);
R(SP);
R(EFLAGS);
R(AX);
R(BX);
R(CX);
R(DX);
R(SI);
R(DI);
R(BP);
R(CS);
R(SS);
R(DS);
R(FS);
R(ES);
R(GS);
R(ORIG_AX);
#endif
}
return "";
}
static int ptrace_dump_regs(int pid)
{
unsigned long regs[MAX_REG_NR];
int i;
if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
return -errno;
printk(UM_KERN_ERR "Stub registers -\n");
for (i = 0; i < ARRAY_SIZE(regs); i++) {
const char *regname = ptrace_reg_name(i);
printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
}
return 0;
}
/*
* Signals that are OK to receive in the stub - we'll just continue it.
* SIGWINCH will happen when UML is inside a detached screen.
*/
#define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
/* Signals that the stub will finish with - anything else is an error */
#define STUB_DONE_MASK (1 << SIGTRAP)
void wait_stub_done(int pid)
{
int n, status, err;
while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
if ((n < 0) || !WIFSTOPPED(status))
goto bad_wait;
if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
break;
err = ptrace(PTRACE_CONT, pid, 0, 0);
if (err) {
printk(UM_KERN_ERR "%s : continue failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
}
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return;
bad_wait:
err = ptrace_dump_regs(pid);
if (err)
printk(UM_KERN_ERR "Failed to get registers from stub, errno = %d\n",
-err);
printk(UM_KERN_ERR "%s : failed to wait for SIGTRAP, pid = %d, n = %d, errno = %d, status = 0x%x\n",
__func__, pid, n, errno, status);
fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);
static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs)
{
int err;
err = get_fp_registers(pid, aux_fp_regs);
if (err < 0) {
printk(UM_KERN_ERR "save_fp_registers returned %d\n",
err);
fatal_sigsegv();
}
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
if (err) {
printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
"errno = %d\n", pid, errno);
fatal_sigsegv();
}
wait_stub_done(pid);
/*
* faultinfo is prepared by the stub_segv_handler at start of
* the stub stack page. We just have to copy it.
*/
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
err = put_fp_registers(pid, aux_fp_regs);
if (err < 0) {
printk(UM_KERN_ERR "put_fp_registers returned %d\n",
err);
fatal_sigsegv();
}
}
static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
{
get_skas_faultinfo(pid, ®s->faultinfo, aux_fp_regs);
segv(regs->faultinfo, 0, 1, NULL);
}
/*
* To use the same value of using_sysemu as the caller, ask it that value
* (in local_using_sysemu
*/
static void handle_trap(int pid, struct uml_pt_regs *regs,
int local_using_sysemu)
{
int err, status;
if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
fatal_sigsegv();
if (!local_using_sysemu)
{
err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
__NR_getpid);
if (err < 0) {
printk(UM_KERN_ERR "%s - nullifying syscall failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
if (err < 0) {
printk(UM_KERN_ERR "%s - continuing to end of syscall failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
if ((err < 0) || !WIFSTOPPED(status) ||
(WSTOPSIG(status) != SIGTRAP + 0x80)) {
err = ptrace_dump_regs(pid);
if (err)
printk(UM_KERN_ERR "Failed to get registers from process, errno = %d\n",
-err);
printk(UM_KERN_ERR "%s - failed to wait at end of syscall, errno = %d, status = %d\n",
__func__, errno, status);
fatal_sigsegv();
}
}
handle_syscall(regs);
}
extern char __syscall_stub_start[];
/**
* userspace_tramp() - userspace trampoline
* @stack: pointer to the new userspace stack page, can be NULL, if? FIXME:
*
* The userspace trampoline is used to setup a new userspace process in start_userspace() after it was clone()'ed.
* This function will run on a temporary stack page.
* It ptrace()'es itself, then
* Two pages are mapped into the userspace address space:
* - STUB_CODE (with EXEC), which contains the skas stub code
* - STUB_DATA (with R/W), which contains a data page that is used to transfer certain data between the UML userspace process and the UML kernel.
* Also for the userspace process a SIGSEGV handler is installed to catch pagefaults in the userspace process.
* And last the process stops itself to give control to the UML kernel for this userspace process.
*
* Return: Always zero, otherwise the current userspace process is ended with non null exit() call
*/
static int userspace_tramp(void *stack)
{
void *addr;
int fd;
unsigned long long offset;
ptrace(PTRACE_TRACEME, 0, 0, 0);
signal(SIGTERM, SIG_DFL);
signal(SIGWINCH, SIG_IGN);
fd = phys_mapping(uml_to_phys(__syscall_stub_start), &offset);
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
if (addr == MAP_FAILED) {
printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, errno = %d\n",
STUB_CODE, errno);
exit(1);
}
if (stack != NULL) {
fd = phys_mapping(uml_to_phys(stack), &offset);
addr = mmap((void *) STUB_DATA,
STUB_DATA_PAGES * UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_SHARED, fd, offset);
if (addr == MAP_FAILED) {
printk(UM_KERN_ERR "mapping segfault stack at 0x%lx failed, errno = %d\n",
STUB_DATA, errno);
exit(1);
}
}
if (stack != NULL) {
struct sigaction sa;
unsigned long v = STUB_CODE +
(unsigned long) stub_segv_handler -
(unsigned long) __syscall_stub_start;
set_sigstack((void *) STUB_DATA, STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
sa.sa_sigaction = (void *) v;
sa.sa_restorer = NULL;
if (sigaction(SIGSEGV, &sa, NULL) < 0) {
printk(UM_KERN_ERR "%s - setting SIGSEGV handler failed - errno = %d\n",
__func__, errno);
exit(1);
}
}
kill(os_getpid(), SIGSTOP);
return 0;
}
int userspace_pid[NR_CPUS];
int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
* @stub_stack: pointer to the stub stack. Can be NULL, if? FIXME:
*
* Setups a new temporary stack page that is used while userspace_tramp() runs
* Clones the kernel process into a new userspace process, with FDs only.
*
* Return: When positive: the process id of the new userspace process,
* when negative: an error number.
* FIXME: can PIDs become negative?!
*/
int start_userspace(unsigned long stub_stack)
{
void *stack;
unsigned long sp;
int pid, status, n, flags, err;
/* setup a temporary stack page */
stack = mmap(NULL, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (stack == MAP_FAILED) {
err = -errno;
printk(UM_KERN_ERR "%s : mmap failed, errno = %d\n",
__func__, errno);
return err;
}
/* set stack pointer to the end of the stack page, so it can grow downwards */
sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
flags = CLONE_FILES | SIGCHLD;
/* clone into new userspace process */
pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
if (pid < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : clone failed, errno = %d\n",
__func__, errno);
return err;
}
do {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
if (n < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : wait failed, errno = %d\n",
__func__, errno);
goto out_kill;
}
} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
err = -EINVAL;
printk(UM_KERN_ERR "%s : expected SIGSTOP, got status = %d\n",
__func__, status);
goto out_kill;
}
if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
(void *) PTRACE_O_TRACESYSGOOD) < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : PTRACE_OLDSETOPTIONS failed, errno = %d\n",
__func__, errno);
goto out_kill;
}
if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : munmap failed, errno = %d\n",
__func__, errno);
goto out_kill;
}
return pid;
out_kill:
os_kill_ptraced_process(pid, 1);
return err;
}
void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
{
int err, status, op, pid = userspace_pid[0];
/* To prevent races if using_sysemu changes under us.*/
int local_using_sysemu;
siginfo_t si;
/* Handle any immediate reschedules or signals */
interrupt_end();
while (1) {
if (kill_userspace_mm[0])
fatal_sigsegv();
/*
* This can legitimately fail if the process loads a
* bogus value into a segment register. It will
* segfault and PTRACE_GETREGS will read that value
* out of the process. However, PTRACE_SETREGS will
* fail. In this case, there is nothing to do but
* just kill the process.
*/
if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
printk(UM_KERN_ERR "%s - ptrace set regs failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
if (put_fp_registers(pid, regs->fp)) {
printk(UM_KERN_ERR "%s - ptrace set fp regs failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
/* Now we set local_using_sysemu to be used for one loop */
local_using_sysemu = get_using_sysemu();
op = SELECT_PTRACE_OPERATION(local_using_sysemu,
singlestepping(NULL));
if (ptrace(op, pid, 0, 0)) {
printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
__func__, op, errno);
fatal_sigsegv();
}
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
if (err < 0) {
printk(UM_KERN_ERR "%s - wait failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
regs->is_user = 1;
if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
printk(UM_KERN_ERR "%s - PTRACE_GETREGS failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
if (get_fp_registers(pid, regs->fp)) {
printk(UM_KERN_ERR "%s - get_fp_registers failed, errno = %d\n",
__func__, errno);
fatal_sigsegv();
}
UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
if (WIFSTOPPED(status)) {
int sig = WSTOPSIG(status);
/* These signal handlers need the si argument.
* The SIGIO and SIGALARM handlers which constitute the
* majority of invocations, do not use it.
*/
switch (sig) {
case SIGSEGV:
case SIGTRAP:
case SIGILL:
case SIGBUS:
case SIGFPE:
case SIGWINCH:
ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
break;
}
switch (sig) {
case SIGSEGV:
if (PTRACE_FULL_FAULTINFO) {
get_skas_faultinfo(pid,
®s->faultinfo, aux_fp_regs);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
regs);
}
else handle_segv(pid, regs, aux_fp_regs);
break;
case SIGTRAP + 0x80:
handle_trap(pid, regs, local_using_sysemu);
break;
case SIGTRAP:
relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
break;
case SIGALRM:
break;
case SIGIO:
case SIGILL:
case SIGBUS:
case SIGFPE:
case SIGWINCH:
block_signals_trace();
(*sig_info[sig])(sig, (struct siginfo *)&si, regs);
unblock_signals_trace();
break;
default:
printk(UM_KERN_ERR "%s - child stopped with signal %d\n",
__func__, sig);
fatal_sigsegv();
}
pid = userspace_pid[0];
interrupt_end();
/* Avoid -ERESTARTSYS handling in host */
if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
PT_SYSCALL_NR(regs->gp) = -1;
}
}
}
static unsigned long thread_regs[MAX_REG_NR];
static unsigned long thread_fp_regs[FP_SIZE];
static int __init init_thread_regs(void)
{
get_safe_registers(thread_regs, thread_fp_regs);
/* Set parent's instruction pointer to start of clone-stub */
thread_regs[REGS_IP_INDEX] = STUB_CODE +
(unsigned long) stub_clone_handler -
(unsigned long) __syscall_stub_start;
thread_regs[REGS_SP_INDEX] = STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE -
sizeof(void *);
#ifdef __SIGNAL_FRAMESIZE
thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
#endif
return 0;
}
__initcall(init_thread_regs);
int copy_context_skas0(unsigned long new_stack, int pid)
{
int err;
unsigned long current_stack = current_stub_stack();
struct stub_data *data = (struct stub_data *) current_stack;
struct stub_data *child_data = (struct stub_data *) new_stack;
unsigned long long new_offset;
int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);
/*
* prepare offset and fd of child's stack as argument for parent's
* and child's mmap2 calls
*/
*data = ((struct stub_data) {
.offset = MMAP_OFFSET(new_offset),
.fd = new_fd,
.parent_err = -ESRCH,
.child_err = 0,
});
*child_data = ((struct stub_data) {
.child_err = -ESRCH,
});
err = ptrace_setregs(pid, thread_regs);
if (err < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : PTRACE_SETREGS failed, pid = %d, errno = %d\n",
__func__, pid, -err);
return err;
}
err = put_fp_registers(pid, thread_fp_regs);
if (err < 0) {
printk(UM_KERN_ERR "%s : put_fp_registers failed, pid = %d, err = %d\n",
__func__, pid, err);
return err;
}
/*
* Wait, until parent has finished its work: read child's pid from
* parent's stack, and check, if bad result.
*/
err = ptrace(PTRACE_CONT, pid, 0, 0);
if (err) {
err = -errno;
printk(UM_KERN_ERR "Failed to continue new process, pid = %d, errno = %d\n",
pid, errno);
return err;
}
wait_stub_done(pid);
pid = data->parent_err;
if (pid < 0) {
printk(UM_KERN_ERR "%s - stub-parent reports error %d\n",
__func__, -pid);
return pid;
}
/*
* Wait, until child has finished too: read child's result from
* child's stack and check it.
*/
wait_stub_done(pid);
if (child_data->child_err != STUB_DATA) {
printk(UM_KERN_ERR "%s - stub-child %d reports error %ld\n",
__func__, pid, data->child_err);
err = data->child_err;
goto out_kill;
}
if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
(void *)PTRACE_O_TRACESYSGOOD) < 0) {
err = -errno;
printk(UM_KERN_ERR "%s : PTRACE_OLDSETOPTIONS failed, errno = %d\n",
__func__, errno);
goto out_kill;
}
return pid;
out_kill:
os_kill_ptraced_process(pid, 1);
return err;
}
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
{
(*buf)[0].JB_IP = (unsigned long) handler;
(*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
sizeof(void *);
}
#define INIT_JMP_NEW_THREAD 0
#define INIT_JMP_CALLBACK 1
#define INIT_JMP_HALT 2
#define INIT_JMP_REBOOT 3
void switch_threads(jmp_buf *me, jmp_buf *you)
{
if (UML_SETJMP(me) == 0)
UML_LONGJMP(you, 1);
}
static jmp_buf initial_jmpbuf;
/* XXX Make these percpu */
static void (*cb_proc)(void *arg);
static void *cb_arg;
static jmp_buf *cb_back;
int start_idle_thread(void *stack, jmp_buf *switch_buf)
{
int n;
set_handler(SIGWINCH);
/*
* Can't use UML_SETJMP or UML_LONGJMP here because they save
* and restore signals, with the possible side-effect of
* trying to handle any signals which came when they were
* blocked, which can't be done on this stack.
* Signals must be blocked when jumping back here and restored
* after returning to the jumper.
*/
n = setjmp(initial_jmpbuf);
switch (n) {
case INIT_JMP_NEW_THREAD:
(*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
(*switch_buf)[0].JB_SP = (unsigned long) stack +
UM_THREAD_SIZE - sizeof(void *);
break;
case INIT_JMP_CALLBACK:
(*cb_proc)(cb_arg);
longjmp(*cb_back, 1);
break;
case INIT_JMP_HALT:
kmalloc_ok = 0;
return 0;
case INIT_JMP_REBOOT:
kmalloc_ok = 0;
return 1;
default:
printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
__func__, n);
fatal_sigsegv();
}
longjmp(*switch_buf, 1);
/* unreachable */
printk(UM_KERN_ERR "impossible long jump!");
fatal_sigsegv();
return 0;
}
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
{
jmp_buf here;
cb_proc = proc;
cb_arg = arg;
cb_back = &here;
block_signals_trace();
if (UML_SETJMP(&here) == 0)
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
unblock_signals_trace();
cb_proc = NULL;
cb_arg = NULL;
cb_back = NULL;
}
void halt_skas(void)
{
block_signals_trace();
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
}
static bool noreboot;
static int __init noreboot_cmd_param(char *str, int *add)
{
noreboot = true;
return 0;
}
__uml_setup("noreboot", noreboot_cmd_param,
"noreboot\n"
" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
" crashes in CI\n");
void reboot_skas(void)
{
block_signals_trace();
UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
}
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
kill_userspace_mm[0] = mm_idp->kill;
}
| linux-master | arch/um/os-Linux/skas/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <stddef.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/mman.h>
#include <init.h>
#include <as-layout.h>
#include <mm_id.h>
#include <os.h>
#include <ptrace_user.h>
#include <registers.h>
#include <skas.h>
#include <sysdep/ptrace.h>
#include <sysdep/stub.h>
extern char batch_syscall_stub[], __syscall_stub_start[];
extern void wait_stub_done(int pid);
static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
unsigned long *stack)
{
if (stack == NULL) {
stack = (unsigned long *) mm_idp->stack + 2;
*stack = 0;
}
return stack;
}
static unsigned long syscall_regs[MAX_REG_NR];
static int __init init_syscall_regs(void)
{
get_safe_registers(syscall_regs, NULL);
syscall_regs[REGS_IP_INDEX] = STUB_CODE +
((unsigned long) batch_syscall_stub -
(unsigned long) __syscall_stub_start);
syscall_regs[REGS_SP_INDEX] = STUB_DATA;
return 0;
}
__initcall(init_syscall_regs);
static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
{
int n, i;
long ret, offset;
unsigned long * data;
unsigned long * syscall;
int err, pid = mm_idp->u.pid;
n = ptrace_setregs(pid, syscall_regs);
if (n < 0) {
printk(UM_KERN_ERR "Registers - \n");
for (i = 0; i < MAX_REG_NR; i++)
printk(UM_KERN_ERR "\t%d\t0x%lx\n", i, syscall_regs[i]);
panic("%s : PTRACE_SETREGS failed, errno = %d\n",
__func__, -n);
}
err = ptrace(PTRACE_CONT, pid, 0, 0);
if (err)
panic("Failed to continue stub, pid = %d, errno = %d\n", pid,
errno);
wait_stub_done(pid);
/*
* When the stub stops, we find the following values on the
* beginning of the stack:
* (long )return_value
* (long )offset to failed sycall-data (0, if no error)
*/
ret = *((unsigned long *) mm_idp->stack);
offset = *((unsigned long *) mm_idp->stack + 1);
if (offset) {
data = (unsigned long *)(mm_idp->stack + offset - STUB_DATA);
printk(UM_KERN_ERR "%s : ret = %ld, offset = %ld, data = %p\n",
__func__, ret, offset, data);
syscall = (unsigned long *)((unsigned long)data + data[0]);
printk(UM_KERN_ERR "%s: syscall %ld failed, return value = 0x%lx, expected return value = 0x%lx\n",
__func__, syscall[0], ret, syscall[7]);
printk(UM_KERN_ERR " syscall parameters: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
syscall[1], syscall[2], syscall[3],
syscall[4], syscall[5], syscall[6]);
for (n = 1; n < data[0]/sizeof(long); n++) {
if (n == 1)
printk(UM_KERN_ERR " additional syscall data:");
if (n % 4 == 1)
printk("\n" UM_KERN_ERR " ");
printk(" 0x%lx", data[n]);
}
if (n > 1)
printk("\n");
}
else ret = 0;
*addr = check_init_stack(mm_idp, NULL);
return ret;
}
long run_syscall_stub(struct mm_id * mm_idp, int syscall,
unsigned long *args, long expected, void **addr,
int done)
{
unsigned long *stack = check_init_stack(mm_idp, *addr);
*stack += sizeof(long);
stack += *stack / sizeof(long);
*stack++ = syscall;
*stack++ = args[0];
*stack++ = args[1];
*stack++ = args[2];
*stack++ = args[3];
*stack++ = args[4];
*stack++ = args[5];
*stack++ = expected;
*stack = 0;
if (!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) <
UM_KERN_PAGE_SIZE - 10 * sizeof(long))) {
*addr = stack;
return 0;
}
return do_syscall_stub(mm_idp, addr);
}
long syscall_stub_data(struct mm_id * mm_idp,
unsigned long *data, int data_count,
void **addr, void **stub_addr)
{
unsigned long *stack;
int ret = 0;
/*
* If *addr still is uninitialized, it *must* contain NULL.
* Thus in this case do_syscall_stub correctly won't be called.
*/
if ((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >=
UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) {
ret = do_syscall_stub(mm_idp, addr);
/* in case of error, don't overwrite data on stack */
if (ret)
return ret;
}
stack = check_init_stack(mm_idp, *addr);
*addr = stack;
*stack = data_count * sizeof(long);
memcpy(stack + 1, data, data_count * sizeof(long));
*stub_addr = (void *)(((unsigned long)(stack + 1) &
~UM_KERN_PAGE_MASK) + STUB_DATA);
return 0;
}
int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot,
int phys_fd, unsigned long long offset, int done, void **data)
{
int ret;
unsigned long args[] = { virt, len, prot,
MAP_SHARED | MAP_FIXED, phys_fd,
MMAP_OFFSET(offset) };
ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
data, done);
return ret;
}
int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
int done, void **data)
{
int ret;
unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
0 };
ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
data, done);
return ret;
}
int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
unsigned int prot, int done, void **data)
{
int ret;
unsigned long args[] = { addr, len, prot, 0, 0, 0 };
ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
data, done);
return ret;
}
| linux-master | arch/um/os-Linux/skas/mem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Richard Weinberger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/init.h>
#include <os.h>
static void early_console_write(struct console *con, const char *s, unsigned int n)
{
um_early_printk(s, n);
}
static struct console early_console_dev = {
.name = "earlycon",
.write = early_console_write,
.flags = CON_BOOT,
.index = -1,
};
static int __init setup_early_printk(char *buf)
{
if (!early_console) {
early_console = &early_console_dev;
register_console(&early_console_dev);
}
return 0;
}
early_param("earlyprintk", setup_early_printk);
| linux-master | arch/um/kernel/early_printk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
*/
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/seq_file.h>
#include <linux/tick.h>
#include <linux/threads.h>
#include <linux/resume_user_mode.h>
#include <asm/current.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
#include <skas.h>
#include <registers.h>
#include <linux/time-internal.h>
#include <linux/elfcore.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
* cares about its entry, so it's OK if another processor is modifying its
* entry.
*/
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
static inline int external_pid(void)
{
/* FIXME: Need to look up userspace_pid by cpu */
return userspace_pid[0];
}
int pid_to_processor_id(int pid)
{
int i;
for (i = 0; i < ncpus; i++) {
if (cpu_tasks[i].pid == pid)
return i;
}
return -1;
}
void free_stack(unsigned long stack, int order)
{
free_pages(stack, order);
}
unsigned long alloc_stack(int order, int atomic)
{
unsigned long page;
gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
page = __get_free_pages(flags, order);
return page;
}
static inline void set_current(struct task_struct *task)
{
cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
{ external_pid(), task });
}
extern void arch_switch_to(struct task_struct *to);
void *__switch_to(struct task_struct *from, struct task_struct *to)
{
to->thread.prev_sched = from;
set_current(to);
switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
arch_switch_to(current);
return current->thread.prev_sched;
}
void interrupt_end(void)
{
struct pt_regs *regs = ¤t->thread.regs;
if (need_resched())
schedule();
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_NOTIFY_SIGNAL))
do_signal(regs);
if (test_thread_flag(TIF_NOTIFY_RESUME))
resume_user_mode_work(regs);
}
int get_current_pid(void)
{
return task_pid_nr(current);
}
/*
* This is called magically, by its address being stuffed in a jmp_buf
* and being longjmp-d to.
*/
void new_thread_handler(void)
{
int (*fn)(void *), n;
void *arg;
if (current->thread.prev_sched != NULL)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
fn = current->thread.request.u.thread.proc;
arg = current->thread.request.u.thread.arg;
/*
* callback returns only if the kernel thread execs a process
*/
n = fn(arg);
userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
/* Called magically, see new_thread_handler above */
void fork_handler(void)
{
force_flush_all();
schedule_tail(current->thread.prev_sched);
/*
* XXX: if interrupt_end() calls schedule, this call to
* arch_switch_to isn't needed. We could want to apply this to
* improve performance. -bb
*/
arch_switch_to(current);
current->thread.prev_sched = NULL;
userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
void (*handler)(void);
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
if (!args->fn) {
memcpy(&p->thread.regs.regs, current_pt_regs(),
sizeof(p->thread.regs.regs));
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
if (sp != 0)
REGS_SP(p->thread.regs.regs.gp) = sp;
handler = fork_handler;
arch_copy_thread(¤t->thread.arch, &p->thread.arch);
} else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
p->thread.request.u.thread.proc = args->fn;
p->thread.request.u.thread.arg = args->fn_arg;
handler = new_thread_handler;
}
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
if (!args->fn) {
clear_flushed_tls(p);
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
ret = arch_set_tls(p, tls);
}
return ret;
}
void initial_thread_cb(void (*proc)(void *), void *arg)
{
int save_kmalloc_ok = kmalloc_ok;
kmalloc_ok = 0;
initial_thread_cb_skas(proc, arg);
kmalloc_ok = save_kmalloc_ok;
}
void um_idle_sleep(void)
{
if (time_travel_mode != TT_MODE_OFF)
time_travel_sleep();
else
os_idle_sleep();
}
void arch_cpu_idle(void)
{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep();
}
int __cant_sleep(void) {
return in_atomic() || irqs_disabled() || in_interrupt();
/* Is in_interrupt() really needed? */
}
int user_context(unsigned long sp)
{
unsigned long stack;
stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
return stack != (unsigned long) current_thread_info();
}
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
void do_uml_exitcalls(void)
{
exitcall_t *call;
call = &__uml_exitcall_end;
while (--call >= &__uml_exitcall_begin)
(*call)();
}
char *uml_strdup(const char *string)
{
return kstrdup(string, GFP_KERNEL);
}
EXPORT_SYMBOL(uml_strdup);
int copy_to_user_proc(void __user *to, void *from, int size)
{
return copy_to_user(to, from, size);
}
int copy_from_user_proc(void *to, void __user *from, int size)
{
return copy_from_user(to, from, size);
}
int clear_user_proc(void __user *buf, int size)
{
return clear_user(buf, size);
}
static atomic_t using_sysemu = ATOMIC_INIT(0);
int sysemu_supported;
void set_using_sysemu(int value)
{
if (value > sysemu_supported)
return;
atomic_set(&using_sysemu, value);
}
int get_using_sysemu(void)
{
return atomic_read(&using_sysemu);
}
static int sysemu_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", get_using_sysemu());
return 0;
}
static int sysemu_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, sysemu_proc_show, NULL);
}
static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
char tmp[2];
if (copy_from_user(tmp, buf, 1))
return -EFAULT;
if (tmp[0] >= '0' && tmp[0] <= '2')
set_using_sysemu(tmp[0] - '0');
/* We use the first char, but pretend to write everything */
return count;
}
static const struct proc_ops sysemu_proc_ops = {
.proc_open = sysemu_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = sysemu_proc_write,
};
int __init make_proc_sysemu(void)
{
struct proc_dir_entry *ent;
if (!sysemu_supported)
return 0;
ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
if (ent == NULL)
{
printk(KERN_WARNING "Failed to register /proc/sysemu\n");
return 0;
}
return 0;
}
late_initcall(make_proc_sysemu);
int singlestepping(void * t)
{
struct task_struct *task = t ? t : current;
if (!test_thread_flag(TIF_SINGLESTEP))
return 0;
if (task->thread.singlestep_syscall)
return 1;
return 2;
}
/*
* Only x86 and x86_64 have an arch_align_stack().
* All other arches have "#define arch_align_stack(x) (x)"
* in their asm/exec.h
* As this is included in UML from asm-um/system-generic.h,
* we can use it to behave as the subarch does.
*/
#ifndef arch_align_stack
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(8192);
return sp & ~0xf;
}
#endif
unsigned long __get_wchan(struct task_struct *p)
{
unsigned long stack_page, sp, ip;
bool seen_sched = 0;
stack_page = (unsigned long) task_stack_page(p);
/* Bail if the process has no kernel stack for some reason */
if (stack_page == 0)
return 0;
sp = p->thread.switch_buf->JB_SP;
/*
* Bail if the stack pointer is below the bottom of the kernel
* stack for some reason
*/
if (sp < stack_page)
return 0;
while (sp < stack_page + THREAD_SIZE) {
ip = *((unsigned long *) sp);
if (in_sched_functions(ip))
/* Ignore everything until we're above the scheduler */
seen_sched = 1;
else if (kernel_text_address(ip) && seen_sched)
return ip;
sp += sizeof(unsigned long);
}
return 0;
}
int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
{
int cpu = current_thread_info()->cpu;
return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
}
| linux-master | arch/um/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2013 Richard Weinberger <[email protected]>
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <asm/sysrq.h>
#include <asm/stacktrace.h>
#include <os.h>
static void _print_addr(void *data, unsigned long address, int reliable)
{
const char *loglvl = data;
printk("%s [<%08lx>] %s%pS\n", loglvl, address, reliable ? "" : "? ",
(void *)address);
}
static const struct stacktrace_ops stackops = {
.address = _print_addr
};
void show_stack(struct task_struct *task, unsigned long *stack,
const char *loglvl)
{
struct pt_regs *segv_regs = current->thread.segv_regs;
int i;
if (!segv_regs && os_is_signal_stack()) {
pr_err("Received SIGSEGV in SIGSEGV handler,"
" aborting stack trace!\n");
return;
}
if (!stack)
stack = get_stack_pointer(task, segv_regs);
printk("%sStack:\n", loglvl);
for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) {
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
pr_cont("\n");
pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack));
stack++;
}
printk("%sCall Trace:\n", loglvl);
dump_trace(current, &stackops, (void *)loglvl);
}
| linux-master | arch/um/kernel/sysrq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 - Cambridge Greys Ltd
* Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*/
#include <linux/cpumask.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
#include <irq_user.h>
#include <irq_kern.h>
#include <linux/time-internal.h>
/* When epoll triggers we do not know why it did so
* we can also have different IRQs for read and write.
* This is why we keep a small irq_reg array for each fd -
* one entry per IRQ type
*/
struct irq_reg {
void *id;
int irq;
/* it's cheaper to store this than to query it */
int events;
bool active;
bool pending;
bool wakeup;
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
bool pending_on_resume;
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *);
struct time_travel_event event;
#endif
};
struct irq_entry {
struct list_head list;
int fd;
struct irq_reg reg[NUM_IRQ_TYPES];
bool suspended;
bool sigio_workaround;
};
static DEFINE_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
static bool irqs_suspended;
static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
{
/*
* irq->active guards against reentry
* irq->pending accumulates pending requests
* if pending is raised the irq_handler is re-run
* until pending is cleared
*/
if (irq->active) {
irq->active = false;
do {
irq->pending = false;
do_IRQ(irq->irq, regs);
} while (irq->pending);
irq->active = true;
} else {
irq->pending = true;
}
}
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
static void irq_event_handler(struct time_travel_event *ev)
{
struct irq_reg *reg = container_of(ev, struct irq_reg, event);
/* do nothing if suspended - just to cause a wakeup */
if (irqs_suspended)
return;
generic_handle_irq(reg->irq);
}
static bool irq_do_timetravel_handler(struct irq_entry *entry,
enum um_irq_type t)
{
struct irq_reg *reg = &entry->reg[t];
if (!reg->timetravel_handler)
return false;
/*
* Handle all messages - we might get multiple even while
* interrupts are already suspended, due to suspend order
* etc. Note that time_travel_add_irq_event() will not add
* an event twice, if it's pending already "first wins".
*/
reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event);
if (!reg->event.pending)
return false;
if (irqs_suspended)
reg->pending_on_resume = true;
return true;
}
#else
static bool irq_do_timetravel_handler(struct irq_entry *entry,
enum um_irq_type t)
{
return false;
}
#endif
static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
struct uml_pt_regs *regs,
bool timetravel_handlers_only)
{
struct irq_reg *reg = &entry->reg[t];
if (!reg->events)
return;
if (os_epoll_triggered(idx, reg->events) <= 0)
return;
if (irq_do_timetravel_handler(entry, t))
return;
/*
* If we're called to only run time-travel handlers then don't
* actually proceed but mark sigio as pending (if applicable).
* For suspend/resume, timetravel_handlers_only may be true
* despite time-travel not being configured and used.
*/
if (timetravel_handlers_only) {
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
mark_sigio_pending();
#endif
return;
}
irq_io_loop(reg, regs);
}
static void _sigio_handler(struct uml_pt_regs *regs,
bool timetravel_handlers_only)
{
struct irq_entry *irq_entry;
int n, i;
if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
return;
while (1) {
/* This is now lockless - epoll keeps back-referencesto the irqs
* which have trigger it so there is no need to walk the irq
* list and lock it every time. We avoid locking by turning off
* IO for a specific fd by executing os_del_epoll_fd(fd) before
* we do any changes to the actual data structures
*/
n = os_waiting_for_events_epoll();
if (n <= 0) {
if (n == -EINTR)
continue;
else
break;
}
for (i = 0; i < n ; i++) {
enum um_irq_type t;
irq_entry = os_epoll_get_data_pointer(i);
for (t = 0; t < NUM_IRQ_TYPES; t++)
sigio_reg_handler(i, irq_entry, t, regs,
timetravel_handlers_only);
}
}
if (!timetravel_handlers_only)
free_irqs();
}
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
_sigio_handler(regs, irqs_suspended);
}
static struct irq_entry *get_irq_entry_by_fd(int fd)
{
struct irq_entry *walk;
lockdep_assert_held(&irq_lock);
list_for_each_entry(walk, &active_fds, list) {
if (walk->fd == fd)
return walk;
}
return NULL;
}
static void free_irq_entry(struct irq_entry *to_free, bool remove)
{
if (!to_free)
return;
if (remove)
os_del_epoll_fd(to_free->fd);
list_del(&to_free->list);
kfree(to_free);
}
static bool update_irq_entry(struct irq_entry *entry)
{
enum um_irq_type i;
int events = 0;
for (i = 0; i < NUM_IRQ_TYPES; i++)
events |= entry->reg[i].events;
if (events) {
/* will modify (instead of add) if needed */
os_add_epoll_fd(events, entry->fd, entry);
return true;
}
os_del_epoll_fd(entry->fd);
return false;
}
static void update_or_free_irq_entry(struct irq_entry *entry)
{
if (!update_irq_entry(entry))
free_irq_entry(entry, false);
}
static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *))
{
struct irq_entry *irq_entry;
int err, events = os_event_mask(type);
unsigned long flags;
err = os_set_fd_async(fd);
if (err < 0)
goto out;
spin_lock_irqsave(&irq_lock, flags);
irq_entry = get_irq_entry_by_fd(fd);
if (irq_entry) {
/* cannot register the same FD twice with the same type */
if (WARN_ON(irq_entry->reg[type].events)) {
err = -EALREADY;
goto out_unlock;
}
/* temporarily disable to avoid IRQ-side locking */
os_del_epoll_fd(fd);
} else {
irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
if (!irq_entry) {
err = -ENOMEM;
goto out_unlock;
}
irq_entry->fd = fd;
list_add_tail(&irq_entry->list, &active_fds);
maybe_sigio_broken(fd);
}
irq_entry->reg[type].id = dev_id;
irq_entry->reg[type].irq = irq;
irq_entry->reg[type].active = true;
irq_entry->reg[type].events = events;
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
if (um_irq_timetravel_handler_used()) {
irq_entry->reg[type].timetravel_handler = timetravel_handler;
irq_entry->reg[type].event.fn = irq_event_handler;
}
#endif
WARN_ON(!update_irq_entry(irq_entry));
spin_unlock_irqrestore(&irq_lock, flags);
return 0;
out_unlock:
spin_unlock_irqrestore(&irq_lock, flags);
out:
return err;
}
/*
* Remove the entry or entries for a specific FD, if you
* don't want to remove all the possible entries then use
* um_free_irq() or deactivate_fd() instead.
*/
void free_irq_by_fd(int fd)
{
struct irq_entry *to_free;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
to_free = get_irq_entry_by_fd(fd);
free_irq_entry(to_free, true);
spin_unlock_irqrestore(&irq_lock, flags);
}
EXPORT_SYMBOL(free_irq_by_fd);
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
{
struct irq_entry *entry;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type i;
for (i = 0; i < NUM_IRQ_TYPES; i++) {
struct irq_reg *reg = &entry->reg[i];
if (!reg->events)
continue;
if (reg->irq != irq)
continue;
if (reg->id != dev)
continue;
os_del_epoll_fd(entry->fd);
reg->events = 0;
update_or_free_irq_entry(entry);
goto out;
}
}
out:
spin_unlock_irqrestore(&irq_lock, flags);
}
void deactivate_fd(int fd, int irqnum)
{
struct irq_entry *entry;
unsigned long flags;
enum um_irq_type i;
os_del_epoll_fd(fd);
spin_lock_irqsave(&irq_lock, flags);
entry = get_irq_entry_by_fd(fd);
if (!entry)
goto out;
for (i = 0; i < NUM_IRQ_TYPES; i++) {
if (!entry->reg[i].events)
continue;
if (entry->reg[i].irq == irqnum)
entry->reg[i].events = 0;
}
update_or_free_irq_entry(entry);
out:
spin_unlock_irqrestore(&irq_lock, flags);
ignore_sigio_fd(fd);
}
EXPORT_SYMBOL(deactivate_fd);
/*
* Called just before shutdown in order to provide a clean exec
* environment in case the system is rebooting. No locking because
* that would cause a pointless shutdown hang if something hadn't
* released the lock.
*/
int deactivate_all_fds(void)
{
struct irq_entry *entry;
/* Stop IO. The IRQ loop has no lock so this is our
* only way of making sure we are safe to dispose
* of all IRQ handlers
*/
os_set_ioignore();
/* we can no longer call kfree() here so just deactivate */
list_for_each_entry(entry, &active_fds, list)
os_del_epoll_fd(entry->fd);
os_close_epoll_fd();
return 0;
}
/*
* do_IRQ handles all normal device IRQs (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
return 1;
}
void um_free_irq(int irq, void *dev)
{
if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
"freeing invalid irq %d", irq))
return;
free_irq_by_irq_and_dev(irq, dev);
free_irq(irq, dev);
clear_bit(irq, irqs_allocated);
}
EXPORT_SYMBOL(um_free_irq);
static int
_um_request_irq(int irq, int fd, enum um_irq_type type,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id,
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *))
{
int err;
if (irq == UM_IRQ_ALLOC) {
int i;
for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
if (!test_and_set_bit(i, irqs_allocated)) {
irq = i;
break;
}
}
}
if (irq < 0)
return -ENOSPC;
if (fd != -1) {
err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
if (err)
goto error;
}
err = request_irq(irq, handler, irqflags, devname, dev_id);
if (err < 0)
goto error;
return irq;
error:
clear_bit(irq, irqs_allocated);
return err;
}
int um_request_irq(int irq, int fd, enum um_irq_type type,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id)
{
return _um_request_irq(irq, fd, type, handler, irqflags,
devname, dev_id, NULL);
}
EXPORT_SYMBOL(um_request_irq);
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id,
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *))
{
return _um_request_irq(irq, fd, type, handler, irqflags,
devname, dev_id, timetravel_handler);
}
EXPORT_SYMBOL(um_request_irq_tt);
void sigio_run_timetravel_handlers(void)
{
_sigio_handler(NULL, true);
}
#endif
#ifdef CONFIG_PM_SLEEP
void um_irqs_suspend(void)
{
struct irq_entry *entry;
unsigned long flags;
irqs_suspended = true;
spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
bool clear = true;
for (t = 0; t < NUM_IRQ_TYPES; t++) {
if (!entry->reg[t].events)
continue;
/*
* For the SIGIO_WRITE_IRQ, which is used to handle the
* SIGIO workaround thread, we need special handling:
* enable wake for it itself, but below we tell it about
* any FDs that should be suspended.
*/
if (entry->reg[t].wakeup ||
entry->reg[t].irq == SIGIO_WRITE_IRQ
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
|| entry->reg[t].timetravel_handler
#endif
) {
clear = false;
break;
}
}
if (clear) {
entry->suspended = true;
os_clear_fd_async(entry->fd);
entry->sigio_workaround =
!__ignore_sigio_fd(entry->fd);
}
}
spin_unlock_irqrestore(&irq_lock, flags);
}
void um_irqs_resume(void)
{
struct irq_entry *entry;
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
/*
* We don't need to lock anything here since we're in resume
* and nothing else is running, but have disabled IRQs so we
* don't try anything else with the interrupt list from there.
*/
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
for (t = 0; t < NUM_IRQ_TYPES; t++) {
struct irq_reg *reg = &entry->reg[t];
if (reg->pending_on_resume) {
irq_enter();
generic_handle_irq(reg->irq);
irq_exit();
reg->pending_on_resume = false;
}
}
}
#endif
spin_lock(&irq_lock);
list_for_each_entry(entry, &active_fds, list) {
if (entry->suspended) {
int err = os_set_fd_async(entry->fd);
WARN(err < 0, "os_set_fd_async returned %d\n", err);
entry->suspended = false;
if (entry->sigio_workaround) {
err = __add_sigio_fd(entry->fd);
WARN(err < 0, "add_sigio_returned %d\n", err);
}
}
}
spin_unlock_irqrestore(&irq_lock, flags);
irqs_suspended = false;
send_sigio_to_self();
}
static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_entry *entry;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
for (t = 0; t < NUM_IRQ_TYPES; t++) {
if (!entry->reg[t].events)
continue;
if (entry->reg[t].irq != d->irq)
continue;
entry->reg[t].wakeup = on;
goto unlock;
}
}
unlock:
spin_unlock_irqrestore(&irq_lock, flags);
return 0;
}
#else
#define normal_irq_set_wake NULL
#endif
/*
* irq_chip must define at least enable/disable and ack when
* the edge handler is used.
*/
static void dummy(struct irq_data *d)
{
}
/* This is used for everything other than the timer. */
static struct irq_chip normal_irq_type = {
.name = "SIGIO",
.irq_disable = dummy,
.irq_enable = dummy,
.irq_ack = dummy,
.irq_mask = dummy,
.irq_unmask = dummy,
.irq_set_wake = normal_irq_set_wake,
};
static struct irq_chip alarm_irq_type = {
.name = "SIGALRM",
.irq_disable = dummy,
.irq_enable = dummy,
.irq_ack = dummy,
.irq_mask = dummy,
.irq_unmask = dummy,
};
void __init init_IRQ(void)
{
int i;
irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
/* Initialize EPOLL Loop */
os_setup_epoll();
}
/*
* IRQ stack entry and exit:
*
* Unlike i386, UML doesn't receive IRQs on the normal kernel stack
* and switch over to the IRQ stack after some preparation. We use
* sigaltstack to receive signals on a separate stack from the start.
* These two functions make sure the rest of the kernel won't be too
* upset by being on a different stack. The IRQ stack has a
* thread_info structure at the bottom so that current et al continue
* to work.
*
* to_irq_stack copies the current task's thread_info to the IRQ stack
* thread_info and sets the tasks's stack to point to the IRQ stack.
*
* from_irq_stack copies the thread_info struct back (flags may have
* been modified) and resets the task's stack pointer.
*
* Tricky bits -
*
* What happens when two signals race each other? UML doesn't block
* signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
* could arrive while a previous one is still setting up the
* thread_info.
*
* There are three cases -
* The first interrupt on the stack - sets up the thread_info and
* handles the interrupt
* A nested interrupt interrupting the copying of the thread_info -
* can't handle the interrupt, as the stack is in an unknown state
* A nested interrupt not interrupting the copying of the
* thread_info - doesn't do any setup, just handles the interrupt
*
* The first job is to figure out whether we interrupted stack setup.
* This is done by xchging the signal mask with thread_info->pending.
* If the value that comes back is zero, then there is no setup in
* progress, and the interrupt can be handled. If the value is
* non-zero, then there is stack setup in progress. In order to have
* the interrupt handled, we leave our signal in the mask, and it will
* be handled by the upper handler after it has set up the stack.
*
* Next is to figure out whether we are the outer handler or a nested
* one. As part of setting up the stack, thread_info->real_thread is
* set to non-NULL (and is reset to NULL on exit). This is the
* nesting indicator. If it is non-NULL, then the stack is already
* set up and the handler can run.
*/
static unsigned long pending_mask;
unsigned long to_irq_stack(unsigned long *mask_out)
{
struct thread_info *ti;
unsigned long mask, old;
int nested;
mask = xchg(&pending_mask, *mask_out);
if (mask != 0) {
/*
* If any interrupts come in at this point, we want to
* make sure that their bits aren't lost by our
* putting our bit in. So, this loop accumulates bits
* until xchg returns the same value that we put in.
* When that happens, there were no new interrupts,
* and pending_mask contains a bit for each interrupt
* that came in.
*/
old = *mask_out;
do {
old |= mask;
mask = xchg(&pending_mask, old);
} while (mask != old);
return 1;
}
ti = current_thread_info();
nested = (ti->real_thread != NULL);
if (!nested) {
struct task_struct *task;
struct thread_info *tti;
task = cpu_tasks[ti->cpu].task;
tti = task_thread_info(task);
*ti = *tti;
ti->real_thread = tti;
task->stack = ti;
}
mask = xchg(&pending_mask, 0);
*mask_out |= mask | nested;
return 0;
}
unsigned long from_irq_stack(int nested)
{
struct thread_info *ti, *to;
unsigned long mask;
ti = current_thread_info();
pending_mask = 1;
to = ti->real_thread;
current->stack = to;
ti->real_thread = NULL;
*to = *ti;
mask = xchg(&pending_mask, 0);
return mask & ~1;
}
| linux-master | arch/um/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <as-layout.h>
#include <mem_user.h>
#include <registers.h>
#include <skas.h>
#include <os.h>
void flush_thread(void)
{
void *data = NULL;
int ret;
arch_flush_thread(¤t->thread.arch);
ret = unmap(¤t->mm->context.id, 0, TASK_SIZE, 1, &data);
if (ret) {
printk(KERN_ERR "%s - clearing address space failed, err = %d\n",
__func__, ret);
force_sig(SIGKILL);
}
get_safe_registers(current_pt_regs()->regs.gp,
current_pt_regs()->regs.fp);
__switch_mm(¤t->mm->context.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
clear_thread_flag(TIF_SINGLESTEP);
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(regs->regs);
#endif
}
EXPORT_SYMBOL(start_thread);
| linux-master | arch/um/kernel/exec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Richard Weinberger <[email protected]>
*/
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <os.h>
bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
{
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
if ((unsigned long)src < PAGE_SIZE || size <= 0)
return false;
if (os_mincore(psrc, size + src - psrc) <= 0)
return false;
return true;
}
| linux-master | arch/um/kernel/maccess.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <as-layout.h>
#include <init.h>
#include <kern.h>
#include <mem_user.h>
#include <os.h>
static int physmem_fd = -1;
/* Changed during early boot */
unsigned long high_physmem;
EXPORT_SYMBOL(high_physmem);
extern unsigned long long physmem_size;
void __init mem_total_pages(unsigned long physmem, unsigned long iomem,
unsigned long highmem)
{
unsigned long phys_pages, highmem_pages;
unsigned long iomem_pages, total_pages;
phys_pages = physmem >> PAGE_SHIFT;
iomem_pages = iomem >> PAGE_SHIFT;
highmem_pages = highmem >> PAGE_SHIFT;
total_pages = phys_pages + iomem_pages + highmem_pages;
max_mapnr = total_pages;
}
void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
__u64 offset;
int fd, err;
fd = phys_mapping(phys, &offset);
err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
if (err) {
if (err == -ENOMEM)
printk(KERN_ERR "try increasing the host's "
"/proc/sys/vm/max_map_count to <physical "
"memory size>/4096\n");
panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
"err = %d\n", virt, fd, offset, len, r, w, x, err);
}
}
/**
* setup_physmem() - Setup physical memory for UML
* @start: Start address of the physical kernel memory,
* i.e start address of the executable image.
* @reserve_end: end address of the physical kernel memory.
* @len: Length of total physical memory that should be mapped/made
* available, in bytes.
* @highmem: Number of highmem bytes that should be mapped/made available.
*
* Creates an unlinked temporary file of size (len + highmem) and memory maps
* it on the last executable image address (uml_reserved).
*
* The offset is needed as the length of the total physical memory
* (len + highmem) includes the size of the memory used be the executable image,
* but the mapped-to address is the last address of the executable image
* (uml_reserved == end address of executable image).
*
* The memory mapped memory of the temporary file is used as backing memory
* of all user space processes/kernel tasks.
*/
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
long map_size = len - reserve;
int err;
if(map_size <= 0) {
os_warn("Too few physical memory! Needed=%lu, given=%lu\n",
reserve, len);
exit(1);
}
physmem_fd = create_mem_file(len + highmem);
err = os_map_memory((void *) reserve_end, physmem_fd, reserve,
map_size, 1, 1, 1);
if (err < 0) {
os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p "
"failed - errno = %d\n", map_size,
(void *) reserve_end, err);
exit(1);
}
/*
* Special kludge - This page will be mapped in to userspace processes
* from physmem_fd, so it needs to be written out there.
*/
os_seek_file(physmem_fd, __pa(__syscall_stub_start));
os_write_file(physmem_fd, __syscall_stub_start, PAGE_SIZE);
os_fsync_file(physmem_fd);
memblock_add(__pa(start), len + highmem);
memblock_reserve(__pa(start), reserve);
min_low_pfn = PFN_UP(__pa(reserve_end));
max_low_pfn = min_low_pfn + (map_size >> PAGE_SHIFT);
}
int phys_mapping(unsigned long phys, unsigned long long *offset_out)
{
int fd = -1;
if (phys < physmem_size) {
fd = physmem_fd;
*offset_out = phys;
}
else if (phys < __pa(end_iomem)) {
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if ((phys >= region->phys) &&
(phys < region->phys + region->size)) {
fd = region->fd;
*offset_out = phys - region->phys;
break;
}
region = region->next;
}
}
else if (phys < __pa(end_iomem) + highmem) {
fd = physmem_fd;
*offset_out = phys - iomem_size;
}
return fd;
}
EXPORT_SYMBOL(phys_mapping);
static int __init uml_mem_setup(char *line, int *add)
{
char *retptr;
physmem_size = memparse(line,&retptr);
return 0;
}
__uml_setup("mem=", uml_mem_setup,
"mem=<Amount of desired ram>\n"
" This controls how much \"physical\" memory the kernel allocates\n"
" for the system. The size is specified as a number followed by\n"
" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
" This is not related to the amount of memory in the host. It can\n"
" be more, and the excess, if it's ever used, will just be swapped out.\n"
" Example: mem=64M\n\n"
);
extern int __init parse_iomem(char *str, int *add);
__uml_setup("iomem=", parse_iomem,
"iomem=<name>,<file>\n"
" Configure <file> as an IO memory region named <name>.\n\n"
);
/*
* This list is constructed in parse_iomem and addresses filled in
* setup_iomem, both of which run during early boot. Afterwards, it's
* unchanged.
*/
struct iomem_region *iomem_regions;
/* Initialized in parse_iomem and unchanged thereafter */
int iomem_size;
unsigned long find_iomem(char *driver, unsigned long *len_out)
{
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if (!strcmp(region->driver, driver)) {
*len_out = region->size;
return region->virt;
}
region = region->next;
}
return 0;
}
EXPORT_SYMBOL(find_iomem);
static int setup_iomem(void)
{
struct iomem_region *region = iomem_regions;
unsigned long iomem_start = high_physmem + PAGE_SIZE;
int err;
while (region != NULL) {
err = os_map_memory((void *) iomem_start, region->fd, 0,
region->size, 1, 1, 0);
if (err)
printk(KERN_ERR "Mapping iomem region for driver '%s' "
"failed, errno = %d\n", region->driver, -err);
else {
region->virt = iomem_start;
region->phys = __pa(region->virt);
}
iomem_start += region->size + PAGE_SIZE;
region = region->next;
}
return 0;
}
__initcall(setup_iomem);
| linux-master | arch/um/kernel/physmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/ptrace-abi.h>
void user_enable_single_step(struct task_struct *child)
{
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
SUBARCH_SET_SINGLESTEPPING(child, 1);
#endif
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
SUBARCH_SET_SINGLESTEPPING(child, 0);
#endif
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int i, ret;
unsigned long __user *p = (void __user *)data;
void __user *vp = p;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = peek_user(child, addr, data);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = poke_user(child, addr, data);
break;
case PTRACE_SYSEMU:
case PTRACE_SYSEMU_SINGLESTEP:
ret = -EIO;
break;
#ifdef PTRACE_GETREGS
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
if (!access_ok(p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
__put_user(getreg(child, i), p);
p++;
}
ret = 0;
break;
}
#endif
#ifdef PTRACE_SETREGS
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
unsigned long tmp = 0;
if (!access_ok(p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
__get_user(tmp, p);
putreg(child, i, tmp);
p++;
}
ret = 0;
break;
}
#endif
case PTRACE_GET_THREAD_AREA:
ret = ptrace_get_thread_area(child, addr, vp);
break;
case PTRACE_SET_THREAD_AREA:
ret = ptrace_set_thread_area(child, addr, vp);
break;
default:
ret = ptrace_request(child, request, addr, data);
if (ret == -EIO)
ret = subarch_ptrace(child, request, addr, data);
break;
}
return ret;
}
static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
{
/* Send us the fake SIGTRAP */
force_sig_fault(SIGTRAP, TRAP_BRKPT,
/* User-mode eip? */
UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL);
}
/*
* XXX Check TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
int syscall_trace_enter(struct pt_regs *regs)
{
audit_syscall_entry(UPT_SYSCALL_NR(®s->regs),
UPT_SYSCALL_ARG1(®s->regs),
UPT_SYSCALL_ARG2(®s->regs),
UPT_SYSCALL_ARG3(®s->regs),
UPT_SYSCALL_ARG4(®s->regs));
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return 0;
return ptrace_report_syscall_entry(regs);
}
void syscall_trace_leave(struct pt_regs *regs)
{
int ptraced = current->ptrace;
audit_syscall_exit(regs);
/* Fake a debug trap */
if (test_thread_flag(TIF_SINGLESTEP))
send_sigtrap(®s->regs, 0);
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
ptrace_report_syscall_exit(regs, 0);
/* force do_signal() --> is_syscall() */
if (ptraced & PT_PTRACED)
set_thread_flag(TIF_SIGPENDING);
}
| linux-master | arch/um/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Intel Corporation
* Author: Johannes Berg <[email protected]>
*/
#include <asm/iomap.h>
#include <asm-generic/pci_iomap.h>
void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr)
{
return NULL;
}
| linux-master | arch/um/kernel/ioport.c |
#include <sysdep/kernel-offsets.h>
| linux-master | arch/um/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <as-layout.h>
#include <init.h>
#include <kern.h>
#include <kern_util.h>
#include <mem_user.h>
#include <os.h>
#include <linux/sched/task.h>
#ifdef CONFIG_KASAN
int kasan_um_is_ready;
void kasan_init(void)
{
/*
* kasan_map_memory will map all of the required address space and
* the host machine will allocate physical memory as necessary.
*/
kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
init_task.kasan_depth = 0;
kasan_um_is_ready = true;
}
static void (*kasan_init_ptr)(void)
__section(".kasan_init") __used
= kasan_init;
#endif
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
EXPORT_SYMBOL(empty_zero_page);
/*
* Initialized during boot, and readonly for initializing page tables
* afterwards
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Initialized at boot time, and readonly after that */
unsigned long long highmem;
EXPORT_SYMBOL(highmem);
int kmalloc_ok = 0;
/* Used during early boot */
static unsigned long brk_end;
void __init mem_init(void)
{
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* Map in the area just after the brk now that kmalloc is about
* to be turned on.
*/
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
/* this will put all low memory onto the freelists */
memblock_free_all();
max_low_pfn = totalram_pages();
max_pfn = max_low_pfn;
kmalloc_ok = 1;
}
/*
* Create a page table and place a pointer to it in a middle page
* directory entry.
*/
static void __init one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
}
static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pmd_table)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
BUG_ON(pmd_table != pmd_offset(pud, 0));
#endif
}
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
int i, j;
unsigned long vaddr;
vaddr = start;
i = pgd_index(vaddr);
j = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
p4d = p4d_offset(pgd, vaddr);
pud = pud_offset(p4d, vaddr);
if (pud_none(*pud))
one_md_table_init(pud);
pmd = pmd_offset(pud, vaddr);
for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
one_page_table_init(pmd);
vaddr += PMD_SIZE;
}
j = 0;
}
}
static void __init fixaddr_user_init( void)
{
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
pte_t *pte;
phys_t p;
unsigned long v, vaddr = FIXADDR_USER_START;
if (!size)
return;
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
if (!v)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, size, PAGE_SIZE);
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
p += PAGE_SIZE) {
pte = virt_to_kpte(vaddr);
pte_set_val(*pte, p, PAGE_READONLY);
}
#endif
}
void __init paging_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
unsigned long vaddr;
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
fixaddr_user_init();
}
/*
* This can't do anything because nothing in the kernel image can be freed
* since it's not in kernel physical memory.
*/
void free_initmem(void)
{
}
/* Allocate and free page tables. */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
}
void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY,
[VM_EXEC] = PAGE_READONLY,
[VM_EXEC | VM_READ] = PAGE_READONLY,
[VM_EXEC | VM_WRITE] = PAGE_COPY,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
[VM_SHARED | VM_EXEC] = PAGE_READONLY,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/um/kernel/mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <asm/errno.h>
#include <init.h>
#include <kern.h>
#include <os.h>
/* Changed by set_umid_arg */
static int umid_inited;
static int __init set_umid_arg(char *name, int *add)
{
int err;
if (umid_inited) {
os_warn("umid already set\n");
return 0;
}
*add = 0;
err = set_umid(name);
if (err == -EEXIST)
os_warn("umid '%s' already in use\n", name);
else if (!err)
umid_inited = 1;
return 0;
}
__uml_setup("umid=", set_umid_arg,
"umid=<name>\n"
" This is used to assign a unique identity to this UML machine and\n"
" is used for naming the pid file and management console socket.\n\n"
);
| linux-master | arch/um/kernel/umid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <asm/types.h>
#include <init.h>
#include <os.h>
#include "um_arch.h"
/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
int __init read_initrd(void)
{
unsigned long long size;
void *area;
if (!initrd)
return 0;
area = uml_load_file(initrd, &size);
if (!area)
return 0;
initrd_start = (unsigned long) area;
initrd_end = initrd_start + size;
return 0;
}
static int __init uml_initrd_setup(char *line, int *add)
{
initrd = line;
return 0;
}
__uml_setup("initrd=", uml_initrd_setup,
"initrd=<initrd image>\n"
" This is used to boot UML from an initrd image. The argument is the\n"
" name of the file containing the image.\n\n"
);
| linux-master | arch/um/kernel/initrd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/module.h>
extern void mcount(void);
EXPORT_SYMBOL(mcount);
| linux-master | arch/um/kernel/gprof_syms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/uaccess.h>
/*
* If read and write race, the read will still atomically read a valid
* value.
*/
int uml_exitcode = 0;
static int exitcode_proc_show(struct seq_file *m, void *v)
{
int val;
/*
* Save uml_exitcode in a local so that we don't need to guarantee
* that sprintf accesses it atomically.
*/
val = uml_exitcode;
seq_printf(m, "%d\n", val);
return 0;
}
static int exitcode_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, exitcode_proc_show, NULL);
}
static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
size_t size;
int tmp;
size = min(count, sizeof(buf));
if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
}
static const struct proc_ops exitcode_proc_ops = {
.proc_open = exitcode_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = exitcode_proc_write,
};
static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_ops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
return 0;
}
__initcall(make_proc_exitcode);
| linux-master | arch/um/kernel/exitcode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/oom.h>
#include <kern_util.h>
#include <os.h>
#include <skas.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static void kill_off_processes(void)
{
struct task_struct *p;
int pid;
read_lock(&tasklist_lock);
for_each_process(p) {
struct task_struct *t;
t = find_lock_task_mm(p);
if (!t)
continue;
pid = t->mm->context.id.u.pid;
task_unlock(t);
os_kill_ptraced_process(pid, 1);
}
read_unlock(&tasklist_lock);
}
void uml_cleanup(void)
{
kmalloc_ok = 0;
do_uml_exitcalls();
kill_off_processes();
}
void machine_restart(char * __unused)
{
uml_cleanup();
reboot_skas();
}
void machine_power_off(void)
{
uml_cleanup();
halt_skas();
}
void machine_halt(void)
{
machine_power_off();
}
| linux-master | arch/um/kernel/reboot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/memblock.h>
#include <os.h>
#include "um_arch.h"
static int __init __uml_load_file(const char *filename, void *buf, int size)
{
int fd, n;
fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
if (fd < 0) {
printk(KERN_ERR "Opening '%s' failed - err = %d\n", filename,
-fd);
return -1;
}
n = os_read_file(fd, buf, size);
if (n != size) {
printk(KERN_ERR "Read of %d bytes from '%s' failed, "
"err = %d\n", size,
filename, -n);
return -1;
}
os_close_file(fd);
return 0;
}
void *uml_load_file(const char *filename, unsigned long long *size)
{
void *area;
int err;
*size = 0;
if (!filename)
return NULL;
err = os_file_size(filename, size);
if (err)
return NULL;
if (*size == 0) {
printk(KERN_ERR "\"%s\" is empty\n", filename);
return NULL;
}
area = memblock_alloc(*size, SMP_CACHE_BYTES);
if (!area)
panic("%s: Failed to allocate %llu bytes\n", __func__, *size);
if (__uml_load_file(filename, area, *size)) {
memblock_free(area, *size);
return NULL;
}
return area;
}
| linux-master | arch/um/kernel/load_file.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/module.h>
#include <os.h>
EXPORT_SYMBOL(um_set_signals);
EXPORT_SYMBOL(signals_enabled);
EXPORT_SYMBOL(os_stat_fd);
EXPORT_SYMBOL(os_stat_file);
EXPORT_SYMBOL(os_access);
EXPORT_SYMBOL(os_set_exec_close);
EXPORT_SYMBOL(os_getpid);
EXPORT_SYMBOL(os_open_file);
EXPORT_SYMBOL(os_read_file);
EXPORT_SYMBOL(os_write_file);
EXPORT_SYMBOL(os_seek_file);
EXPORT_SYMBOL(os_lock_file);
EXPORT_SYMBOL(os_ioctl_generic);
EXPORT_SYMBOL(os_pipe);
EXPORT_SYMBOL(os_file_type);
EXPORT_SYMBOL(os_file_mode);
EXPORT_SYMBOL(os_file_size);
EXPORT_SYMBOL(os_flush_stdout);
EXPORT_SYMBOL(os_close_file);
EXPORT_SYMBOL(os_set_fd_async);
EXPORT_SYMBOL(os_set_fd_block);
EXPORT_SYMBOL(helper_wait);
EXPORT_SYMBOL(os_shutdown_socket);
EXPORT_SYMBOL(os_create_unix_socket);
EXPORT_SYMBOL(os_connect_socket);
EXPORT_SYMBOL(os_accept_connection);
EXPORT_SYMBOL(os_rcv_fd);
EXPORT_SYMBOL(run_helper);
EXPORT_SYMBOL(os_major);
EXPORT_SYMBOL(os_minor);
EXPORT_SYMBOL(os_makedev);
EXPORT_SYMBOL(os_eventfd);
EXPORT_SYMBOL(os_sendmsg_fds);
EXPORT_SYMBOL(add_sigio_fd);
EXPORT_SYMBOL(ignore_sigio_fd);
EXPORT_SYMBOL(sigio_broken);
EXPORT_SYMBOL(syscall);
| linux-master | arch/um/kernel/ksyms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <asm/current.h>
#include <asm/tlbflush.h>
#include <arch.h>
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
#include <skas.h>
/*
* Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
* segv().
*/
int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
unsigned int flags = FAULT_FLAG_DEFAULT;
*code_out = SEGV_MAPERR;
/*
* If the fault was with pagefaults disabled, don't take the fault, just
* fail.
*/
if (faulthandler_disabled())
goto out_nosemaphore;
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto out;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
if (is_user && !ARCH_IS_STACKGROW(address))
goto out;
vma = expand_stack(mm, address);
if (!vma)
goto out_nosemaphore;
good_area:
*code_out = SEGV_ACCERR;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto out;
flags |= FAULT_FLAG_WRITE;
} else {
/* Don't require VM_READ|VM_EXEC for write faults! */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out;
}
do {
vm_fault_t fault;
fault = handle_mm_fault(vma, address, flags, NULL);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore;
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return 0;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
}
BUG();
}
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
pmd = pmd_off(mm, address);
pte = pte_offset_kernel(pmd, address);
} while (!pte_present(*pte));
err = 0;
/*
* The below warning was added in place of
* pte_mkyoung(); if (is_write) pte_mkdirty();
* If it's triggered, we'd see normally a hang here (a clean pte is
* marked read-only to emulate the dirty bit).
* However, the generic code can mark a PTE writable but clean on a
* concurrent read fault, triggering this harmlessly. So comment it out.
*/
#if 0
WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
flush_tlb_page(vma, address);
out:
mmap_read_unlock(mm);
out_nosemaphore:
return err;
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
mmap_read_unlock(mm);
if (!is_user)
goto out_nosemaphore;
pagefault_out_of_memory();
return 0;
}
static void show_segv_info(struct uml_pt_regs *regs)
{
struct task_struct *tsk = current;
struct faultinfo *fi = UPT_FAULTINFO(regs);
if (!unhandled_signal(tsk, SIGSEGV))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
(void *)UPT_IP(regs), (void *)UPT_SP(regs),
fi->error_code);
print_vma_addr(KERN_CONT " in ", UPT_IP(regs));
printk(KERN_CONT "\n");
}
static void bad_segv(struct faultinfo fi, unsigned long ip)
{
current->thread.arch.faultinfo = fi;
force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *) FAULT_ADDRESS(fi));
}
void fatal_sigsegv(void)
{
force_fatal_sig(SIGSEGV);
do_signal(¤t->thread.regs);
/*
* This is to tell gcc that we're not returning - do_signal
* can, in general, return, but in this case, it's not, since
* we just got a fatal SIGSEGV queued.
*/
os_dump_core();
}
/**
* segv_handler() - the SIGSEGV handler
* @sig: the signal number
* @unused_si: the signal info struct; unused in this handler
* @regs: the ptrace register information
*
* The handler first extracts the faultinfo from the UML ptrace regs struct.
* If the userfault did not happen in an UML userspace process, bad_segv is called.
* Otherwise the signal did happen in a cloned userspace process, handle it.
*/
void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) {
show_segv_info(regs);
bad_segv(*fi, UPT_IP(regs));
return;
}
segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
}
/*
* We give a *copy* of the faultinfo in the regs to segv.
* This must be done, since nesting SEGVs could overwrite
* the info in the regs. A pointer to the info then would
* give us bad data!
*/
unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
struct uml_pt_regs *regs)
{
jmp_buf *catcher;
int si_code;
int err;
int is_write = FAULT_WRITE(fi);
unsigned long address = FAULT_ADDRESS(fi);
if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
if (!is_user && (address >= start_vm) && (address < end_vm)) {
flush_tlb_kernel_vm();
goto out;
}
else if (current->mm == NULL) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
address, ip);
}
if (SEGV_IS_FIXABLE(&fi))
err = handle_page_fault(address, ip, is_write, is_user,
&si_code);
else {
err = -EFAULT;
/*
* A thread accessed NULL, we get a fault, but CR2 is invalid.
* This code is used in __do_copy_from_user() of TT mode.
* XXX tt mode is gone, so maybe this isn't needed any more
*/
address = 0;
}
catcher = current->thread.fault_catcher;
if (!err)
goto out;
else if (catcher != NULL) {
current->thread.fault_addr = (void *) address;
UML_LONGJMP(catcher, 1);
}
else if (current->thread.fault_addr != NULL)
panic("fault_addr set but no fault catcher");
else if (!is_user && arch_fixup(ip, regs))
goto out;
if (!is_user) {
show_regs(container_of(regs, struct pt_regs, regs));
panic("Kernel mode fault at addr 0x%lx, ip 0x%lx",
address, ip);
}
show_segv_info(regs);
if (err == -EACCES) {
current->thread.arch.faultinfo = fi;
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
} else {
BUG_ON(err != -EFAULT);
current->thread.arch.faultinfo = fi;
force_sig_fault(SIGSEGV, si_code, (void __user *) address);
}
out:
if (regs)
current->thread.segv_regs = NULL;
return 0;
}
void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
{
int code, err;
if (!UPT_IS_USER(regs)) {
if (sig == SIGBUS)
printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
"mount likely just ran out of space\n");
panic("Kernel mode signal %d", sig);
}
arch_examine_signal(sig, regs);
/* Is the signal layout for the signal known?
* Signal data must be scrubbed to prevent information leaks.
*/
code = si->si_code;
err = si->si_errno;
if ((err == 0) && (siginfo_layout(sig, code) == SIL_FAULT)) {
struct faultinfo *fi = UPT_FAULTINFO(regs);
current->thread.arch.faultinfo = *fi;
force_sig_fault(sig, code, (void __user *)FAULT_ADDRESS(*fi));
} else {
printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d) with errno %d\n",
sig, code, err);
force_sig(sig);
}
}
void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs)
{
if (current->thread.fault_catcher != NULL)
UML_LONGJMP(current->thread.fault_catcher, 1);
else
relay_signal(sig, si, regs);
}
void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
do_IRQ(WINCH_IRQ, regs);
}
| linux-master | arch/um/kernel/trap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2012-2014 Cisco Systems
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2019 Intel Corporation
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <asm/irq.h>
#include <asm/param.h>
#include <kern_util.h>
#include <os.h>
#include <linux/time-internal.h>
#include <linux/um_timetravel.h>
#include <shared/init.h>
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
enum time_travel_mode time_travel_mode;
EXPORT_SYMBOL_GPL(time_travel_mode);
static bool time_travel_start_set;
static unsigned long long time_travel_start;
static unsigned long long time_travel_time;
static LIST_HEAD(time_travel_events);
static LIST_HEAD(time_travel_irqs);
static unsigned long long time_travel_timer_interval;
static unsigned long long time_travel_next_event;
static struct time_travel_event time_travel_timer_event;
static int time_travel_ext_fd = -1;
static unsigned int time_travel_ext_waiting;
static bool time_travel_ext_prev_request_valid;
static unsigned long long time_travel_ext_prev_request;
static bool time_travel_ext_free_until_valid;
static unsigned long long time_travel_ext_free_until;
static void time_travel_set_time(unsigned long long ns)
{
if (unlikely(ns < time_travel_time))
panic("time-travel: time goes backwards %lld -> %lld\n",
time_travel_time, ns);
else if (unlikely(ns >= S64_MAX))
panic("The system was going to sleep forever, aborting");
time_travel_time = ns;
}
enum time_travel_message_handling {
TTMH_IDLE,
TTMH_POLL,
TTMH_READ,
};
static void time_travel_handle_message(struct um_timetravel_msg *msg,
enum time_travel_message_handling mode)
{
struct um_timetravel_msg resp = {
.op = UM_TIMETRAVEL_ACK,
};
int ret;
/*
* We can't unlock here, but interrupt signals with a timetravel_handler
* (see um_request_irq_tt) get to the timetravel_handler anyway.
*/
if (mode != TTMH_READ) {
BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
while (os_poll(1, &time_travel_ext_fd) != 0) {
/* nothing */
}
}
ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
if (ret == 0)
panic("time-travel external link is broken\n");
if (ret != sizeof(*msg))
panic("invalid time-travel message - %d bytes\n", ret);
switch (msg->op) {
default:
WARN_ONCE(1, "time-travel: unexpected message %lld\n",
(unsigned long long)msg->op);
break;
case UM_TIMETRAVEL_ACK:
return;
case UM_TIMETRAVEL_RUN:
time_travel_set_time(msg->time);
break;
case UM_TIMETRAVEL_FREE_UNTIL:
time_travel_ext_free_until_valid = true;
time_travel_ext_free_until = msg->time;
break;
}
resp.seq = msg->seq;
os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
}
static u64 time_travel_ext_req(u32 op, u64 time)
{
static int seq;
int mseq = ++seq;
struct um_timetravel_msg msg = {
.op = op,
.time = time,
.seq = mseq,
};
/*
* We need to block even the timetravel handlers of SIGIO here and
* only restore their use when we got the ACK - otherwise we may
* (will) get interrupted by that, try to queue the IRQ for future
* processing and thus send another request while we're still waiting
* for an ACK, but the peer doesn't know we got interrupted and will
* send the ACKs in the same order as the message, but we'd need to
* see them in the opposite order ...
*
* This wouldn't matter *too* much, but some ACKs carry the
* current time (for UM_TIMETRAVEL_GET) and getting another
* ACK without a time would confuse us a lot!
*
* The sequence number assignment that happens here lets us
* debug such message handling issues more easily.
*/
block_signals_hard();
os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
while (msg.op != UM_TIMETRAVEL_ACK)
time_travel_handle_message(&msg, TTMH_READ);
if (msg.seq != mseq)
panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
msg.op, msg.seq, mseq, msg.time);
if (op == UM_TIMETRAVEL_GET)
time_travel_set_time(msg.time);
unblock_signals_hard();
return msg.time;
}
void __time_travel_wait_readable(int fd)
{
int fds[2] = { fd, time_travel_ext_fd };
int ret;
if (time_travel_mode != TT_MODE_EXTERNAL)
return;
while ((ret = os_poll(2, fds))) {
struct um_timetravel_msg msg;
if (ret == 1)
time_travel_handle_message(&msg, TTMH_READ);
}
}
EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
static void time_travel_ext_update_request(unsigned long long time)
{
if (time_travel_mode != TT_MODE_EXTERNAL)
return;
/* asked for exactly this time previously */
if (time_travel_ext_prev_request_valid &&
time == time_travel_ext_prev_request)
return;
/*
* if we're running and are allowed to run past the request
* then we don't need to update it either
*/
if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
time < time_travel_ext_free_until)
return;
time_travel_ext_prev_request = time;
time_travel_ext_prev_request_valid = true;
time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
}
void __time_travel_propagate_time(void)
{
static unsigned long long last_propagated;
if (last_propagated == time_travel_time)
return;
time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
last_propagated = time_travel_time;
}
EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
/* returns true if we must do a wait to the simtime device */
static bool time_travel_ext_request(unsigned long long time)
{
/*
* If we received an external sync point ("free until") then we
* don't have to request/wait for anything until then, unless
* we're already waiting.
*/
if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
time < time_travel_ext_free_until)
return false;
time_travel_ext_update_request(time);
return true;
}
static void time_travel_ext_wait(bool idle)
{
struct um_timetravel_msg msg = {
.op = UM_TIMETRAVEL_ACK,
};
time_travel_ext_prev_request_valid = false;
time_travel_ext_free_until_valid = false;
time_travel_ext_waiting++;
time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
/*
* Here we are deep in the idle loop, so we have to break out of the
* kernel abstraction in a sense and implement this in terms of the
* UML system waiting on the VQ interrupt while sleeping, when we get
* the signal it'll call time_travel_ext_vq_notify_done() completing the
* call.
*/
while (msg.op != UM_TIMETRAVEL_RUN)
time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
time_travel_ext_waiting--;
/* we might request more stuff while polling - reset when we run */
time_travel_ext_prev_request_valid = false;
}
static void time_travel_ext_get_time(void)
{
time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
}
static void __time_travel_update_time(unsigned long long ns, bool idle)
{
if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
time_travel_ext_wait(idle);
else
time_travel_set_time(ns);
}
static struct time_travel_event *time_travel_first_event(void)
{
return list_first_entry_or_null(&time_travel_events,
struct time_travel_event,
list);
}
static void __time_travel_add_event(struct time_travel_event *e,
unsigned long long time)
{
struct time_travel_event *tmp;
bool inserted = false;
unsigned long flags;
if (e->pending)
return;
e->pending = true;
e->time = time;
local_irq_save(flags);
list_for_each_entry(tmp, &time_travel_events, list) {
/*
* Add the new entry before one with higher time,
* or if they're equal and both on stack, because
* in that case we need to unwind the stack in the
* right order, and the later event (timer sleep
* or such) must be dequeued first.
*/
if ((tmp->time > e->time) ||
(tmp->time == e->time && tmp->onstack && e->onstack)) {
list_add_tail(&e->list, &tmp->list);
inserted = true;
break;
}
}
if (!inserted)
list_add_tail(&e->list, &time_travel_events);
tmp = time_travel_first_event();
time_travel_ext_update_request(tmp->time);
time_travel_next_event = tmp->time;
local_irq_restore(flags);
}
static void time_travel_add_event(struct time_travel_event *e,
unsigned long long time)
{
if (WARN_ON(!e->fn))
return;
__time_travel_add_event(e, time);
}
void time_travel_add_event_rel(struct time_travel_event *e,
unsigned long long delay_ns)
{
time_travel_add_event(e, time_travel_time + delay_ns);
}
void time_travel_periodic_timer(struct time_travel_event *e)
{
time_travel_add_event(&time_travel_timer_event,
time_travel_time + time_travel_timer_interval);
deliver_alarm();
}
void deliver_time_travel_irqs(void)
{
struct time_travel_event *e;
unsigned long flags;
/*
* Don't do anything for most cases. Note that because here we have
* to disable IRQs (and re-enable later) we'll actually recurse at
* the end of the function, so this is strictly necessary.
*/
if (likely(list_empty(&time_travel_irqs)))
return;
local_irq_save(flags);
irq_enter();
while ((e = list_first_entry_or_null(&time_travel_irqs,
struct time_travel_event,
list))) {
list_del(&e->list);
e->pending = false;
e->fn(e);
}
irq_exit();
local_irq_restore(flags);
}
static void time_travel_deliver_event(struct time_travel_event *e)
{
if (e == &time_travel_timer_event) {
/*
* deliver_alarm() does the irq_enter/irq_exit
* by itself, so must handle it specially here
*/
e->fn(e);
} else if (irqs_disabled()) {
list_add_tail(&e->list, &time_travel_irqs);
/*
* set pending again, it was set to false when the
* event was deleted from the original list, but
* now it's still pending until we deliver the IRQ.
*/
e->pending = true;
} else {
unsigned long flags;
local_irq_save(flags);
irq_enter();
e->fn(e);
irq_exit();
local_irq_restore(flags);
}
}
bool time_travel_del_event(struct time_travel_event *e)
{
unsigned long flags;
if (!e->pending)
return false;
local_irq_save(flags);
list_del(&e->list);
e->pending = false;
local_irq_restore(flags);
return true;
}
static void time_travel_update_time(unsigned long long next, bool idle)
{
struct time_travel_event ne = {
.onstack = true,
};
struct time_travel_event *e;
bool finished = idle;
/* add it without a handler - we deal with that specifically below */
__time_travel_add_event(&ne, next);
do {
e = time_travel_first_event();
BUG_ON(!e);
__time_travel_update_time(e->time, idle);
/* new events may have been inserted while we were waiting */
if (e == time_travel_first_event()) {
BUG_ON(!time_travel_del_event(e));
BUG_ON(time_travel_time != e->time);
if (e == &ne) {
finished = true;
} else {
if (e->onstack)
panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
time_travel_time, e->time, e);
time_travel_deliver_event(e);
}
}
e = time_travel_first_event();
if (e)
time_travel_ext_update_request(e->time);
} while (ne.pending && !finished);
time_travel_del_event(&ne);
}
void time_travel_ndelay(unsigned long nsec)
{
time_travel_update_time(time_travel_time + nsec, false);
}
EXPORT_SYMBOL(time_travel_ndelay);
void time_travel_add_irq_event(struct time_travel_event *e)
{
BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
time_travel_ext_get_time();
/*
* We could model interrupt latency here, for now just
* don't have any latency at all and request the exact
* same time (again) to run the interrupt...
*/
time_travel_add_event(e, time_travel_time);
}
EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
static void time_travel_oneshot_timer(struct time_travel_event *e)
{
deliver_alarm();
}
void time_travel_sleep(void)
{
/*
* Wait "forever" (using S64_MAX because there are some potential
* wrapping issues, especially with the current TT_MODE_EXTERNAL
* controller application.
*/
unsigned long long next = S64_MAX;
if (time_travel_mode == TT_MODE_BASIC)
os_timer_disable();
time_travel_update_time(next, true);
if (time_travel_mode == TT_MODE_BASIC &&
time_travel_timer_event.pending) {
if (time_travel_timer_event.fn == time_travel_periodic_timer) {
/*
* This is somewhat wrong - we should get the first
* one sooner like the os_timer_one_shot() below...
*/
os_timer_set_interval(time_travel_timer_interval);
} else {
os_timer_one_shot(time_travel_timer_event.time - next);
}
}
}
static void time_travel_handle_real_alarm(void)
{
time_travel_set_time(time_travel_next_event);
time_travel_del_event(&time_travel_timer_event);
if (time_travel_timer_event.fn == time_travel_periodic_timer)
time_travel_add_event(&time_travel_timer_event,
time_travel_time +
time_travel_timer_interval);
}
static void time_travel_set_interval(unsigned long long interval)
{
time_travel_timer_interval = interval;
}
static int time_travel_connect_external(const char *socket)
{
const char *sep;
unsigned long long id = (unsigned long long)-1;
int rc;
if ((sep = strchr(socket, ':'))) {
char buf[25] = {};
if (sep - socket > sizeof(buf) - 1)
goto invalid_number;
memcpy(buf, socket, sep - socket);
if (kstrtoull(buf, 0, &id)) {
invalid_number:
panic("time-travel: invalid external ID in string '%s'\n",
socket);
return -EINVAL;
}
socket = sep + 1;
}
rc = os_connect_socket(socket);
if (rc < 0) {
panic("time-travel: failed to connect to external socket %s\n",
socket);
return rc;
}
time_travel_ext_fd = rc;
time_travel_ext_req(UM_TIMETRAVEL_START, id);
return 1;
}
static void time_travel_set_start(void)
{
if (time_travel_start_set)
return;
switch (time_travel_mode) {
case TT_MODE_EXTERNAL:
time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
/* controller gave us the *current* time, so adjust by that */
time_travel_ext_get_time();
time_travel_start -= time_travel_time;
break;
case TT_MODE_INFCPU:
case TT_MODE_BASIC:
if (!time_travel_start_set)
time_travel_start = os_persistent_clock_emulation();
break;
case TT_MODE_OFF:
/* we just read the host clock with os_persistent_clock_emulation() */
break;
}
time_travel_start_set = true;
}
#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
#define time_travel_time 0
#define time_travel_ext_waiting 0
static inline void time_travel_update_time(unsigned long long ns, bool retearly)
{
}
static inline void time_travel_handle_real_alarm(void)
{
}
static void time_travel_set_interval(unsigned long long interval)
{
}
static inline void time_travel_set_start(void)
{
}
/* fail link if this actually gets used */
extern u64 time_travel_ext_req(u32 op, u64 time);
/* these are empty macros so the struct/fn need not exist */
#define time_travel_add_event(e, time) do { } while (0)
/* externally not usable - redefine here so we can */
#undef time_travel_del_event
#define time_travel_del_event(e) do { } while (0)
#endif
void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
unsigned long flags;
/*
* In basic time-travel mode we still get real interrupts
* (signals) but since we don't read time from the OS, we
* must update the simulated time here to the expiry when
* we get a signal.
* This is not the case in inf-cpu mode, since there we
* never get any real signals from the OS.
*/
if (time_travel_mode == TT_MODE_BASIC)
time_travel_handle_real_alarm();
local_irq_save(flags);
do_IRQ(TIMER_IRQ, regs);
local_irq_restore(flags);
}
static int itimer_shutdown(struct clock_event_device *evt)
{
if (time_travel_mode != TT_MODE_OFF)
time_travel_del_event(&time_travel_timer_event);
if (time_travel_mode != TT_MODE_INFCPU &&
time_travel_mode != TT_MODE_EXTERNAL)
os_timer_disable();
return 0;
}
static int itimer_set_periodic(struct clock_event_device *evt)
{
unsigned long long interval = NSEC_PER_SEC / HZ;
if (time_travel_mode != TT_MODE_OFF) {
time_travel_del_event(&time_travel_timer_event);
time_travel_set_event_fn(&time_travel_timer_event,
time_travel_periodic_timer);
time_travel_set_interval(interval);
time_travel_add_event(&time_travel_timer_event,
time_travel_time + interval);
}
if (time_travel_mode != TT_MODE_INFCPU &&
time_travel_mode != TT_MODE_EXTERNAL)
os_timer_set_interval(interval);
return 0;
}
static int itimer_next_event(unsigned long delta,
struct clock_event_device *evt)
{
delta += 1;
if (time_travel_mode != TT_MODE_OFF) {
time_travel_del_event(&time_travel_timer_event);
time_travel_set_event_fn(&time_travel_timer_event,
time_travel_oneshot_timer);
time_travel_add_event(&time_travel_timer_event,
time_travel_time + delta);
}
if (time_travel_mode != TT_MODE_INFCPU &&
time_travel_mode != TT_MODE_EXTERNAL)
return os_timer_one_shot(delta);
return 0;
}
static int itimer_one_shot(struct clock_event_device *evt)
{
return itimer_next_event(0, evt);
}
static struct clock_event_device timer_clockevent = {
.name = "posix-timer",
.rating = 250,
.cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = itimer_shutdown,
.set_state_periodic = itimer_set_periodic,
.set_state_oneshot = itimer_one_shot,
.set_next_event = itimer_next_event,
.shift = 0,
.max_delta_ns = 0xffffffff,
.max_delta_ticks = 0xffffffff,
.min_delta_ns = TIMER_MIN_DELTA,
.min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
.irq = 0,
.mult = 1,
};
static irqreturn_t um_timer(int irq, void *dev)
{
if (get_current()->mm != NULL)
{
/* userspace - relay signal, results in correct userspace timers */
os_alarm_process(get_current()->mm->context.id.u.pid);
}
(*timer_clockevent.event_handler)(&timer_clockevent);
return IRQ_HANDLED;
}
static u64 timer_read(struct clocksource *cs)
{
if (time_travel_mode != TT_MODE_OFF) {
/*
* We make reading the timer cost a bit so that we don't get
* stuck in loops that expect time to move more than the
* exact requested sleep amount, e.g. python's socket server,
* see https://bugs.python.org/issue37026.
*
* However, don't do that when we're in interrupt or such as
* then we might recurse into our own processing, and get to
* even more waiting, and that's not good - it messes up the
* "what do I do next" and onstack event we use to know when
* to return from time_travel_update_time().
*/
if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
!time_travel_ext_waiting)
time_travel_update_time(time_travel_time +
TIMER_MULTIPLIER,
false);
return time_travel_time / TIMER_MULTIPLIER;
}
return os_nsecs() / TIMER_MULTIPLIER;
}
static struct clocksource timer_clocksource = {
.name = "timer",
.rating = 300,
.read = timer_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init um_timer_setup(void)
{
int err;
err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
if (err != 0)
printk(KERN_ERR "register_timer : request_irq failed - "
"errno = %d\n", -err);
err = os_timer_create();
if (err != 0) {
printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
return;
}
err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
if (err) {
printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
return;
}
clockevents_register_device(&timer_clockevent);
}
void read_persistent_clock64(struct timespec64 *ts)
{
long long nsecs;
time_travel_set_start();
if (time_travel_mode != TT_MODE_OFF)
nsecs = time_travel_start + time_travel_time;
else
nsecs = os_persistent_clock_emulation();
set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
nsecs % NSEC_PER_SEC);
}
void __init time_init(void)
{
timer_set_signal_handler();
late_time_init = um_timer_setup;
}
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
unsigned long calibrate_delay_is_known(void)
{
if (time_travel_mode == TT_MODE_INFCPU ||
time_travel_mode == TT_MODE_EXTERNAL)
return 1;
return 0;
}
int setup_time_travel(char *str)
{
if (strcmp(str, "=inf-cpu") == 0) {
time_travel_mode = TT_MODE_INFCPU;
timer_clockevent.name = "time-travel-timer-infcpu";
timer_clocksource.name = "time-travel-clock";
return 1;
}
if (strncmp(str, "=ext:", 5) == 0) {
time_travel_mode = TT_MODE_EXTERNAL;
timer_clockevent.name = "time-travel-timer-external";
timer_clocksource.name = "time-travel-clock-external";
return time_travel_connect_external(str + 5);
}
if (!*str) {
time_travel_mode = TT_MODE_BASIC;
timer_clockevent.name = "time-travel-timer";
timer_clocksource.name = "time-travel-clock";
return 1;
}
return -EINVAL;
}
__setup("time-travel", setup_time_travel);
__uml_help(setup_time_travel,
"time-travel\n"
"This option just enables basic time travel mode, in which the clock/timers\n"
"inside the UML instance skip forward when there's nothing to do, rather than\n"
"waiting for real time to elapse. However, instance CPU speed is limited by\n"
"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
"clock (but quicker when there's nothing to do).\n"
"\n"
"time-travel=inf-cpu\n"
"This enables time travel mode with infinite processing power, in which there\n"
"are no wall clock timers, and any CPU processing happens - as seen from the\n"
"guest - instantly. This can be useful for accurate simulation regardless of\n"
"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
"\n"
"time-travel=ext:[ID:]/path/to/socket\n"
"This enables time travel mode similar to =inf-cpu, except the system will\n"
"use the given socket to coordinate with a central scheduler, in order to\n"
"have more than one system simultaneously be on simulated time. The virtio\n"
"driver code in UML knows about this so you can also simulate networks and\n"
"devices using it, assuming the device has the right capabilities.\n"
"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
int setup_time_travel_start(char *str)
{
int err;
err = kstrtoull(str, 0, &time_travel_start);
if (err)
return err;
time_travel_start_set = 1;
return 1;
}
__setup("time-travel-start", setup_time_travel_start);
__uml_help(setup_time_travel_start,
"time-travel-start=<seconds>\n"
"Configure the UML instance's wall clock to start at this value rather than\n"
"the host's wall clock at the time of UML boot.\n");
#endif
| linux-master | arch/um/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/utsname.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/kmsg_dump.h>
#include <linux/suspend.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <as-layout.h>
#include <arch.h>
#include <init.h>
#include <kern.h>
#include <kern_util.h>
#include <mem_user.h>
#include <os.h>
#include "um_arch.h"
#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
static void __init add_arg(char *arg)
{
if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) {
os_warn("add_arg: Too many command line arguments!\n");
exit(1);
}
if (strlen(command_line) > 0)
strcat(command_line, " ");
strcat(command_line, arg);
}
/*
* These fields are initialized at boot time and not changed.
* XXX This structure is used only in the non-SMP case. Maybe this
* should be moved to smp.c.
*/
struct cpuinfo_um boot_cpu_data = {
.loops_per_jiffy = 0,
.ipi_pipe = { -1, -1 },
.cache_alignment = L1_CACHE_BYTES,
.x86_capability = { 0 }
};
EXPORT_SYMBOL(boot_cpu_data);
union thread_union cpu0_irqstack
__section(".data..init_irqstack") =
{ .thread_info = INIT_THREAD_INFO(init_task) };
/* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5];
static int show_cpuinfo(struct seq_file *m, void *v)
{
int i = 0;
seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "vendor_id\t: User Mode Linux\n");
seq_printf(m, "model name\t: UML\n");
seq_printf(m, "mode\t\t: skas\n");
seq_printf(m, "host\t\t: %s\n", host_info);
seq_printf(m, "fpu\t\t: %s\n", cpu_has(&boot_cpu_data, X86_FEATURE_FPU) ? "yes" : "no");
seq_printf(m, "flags\t\t:");
for (i = 0; i < 32*NCAPINTS; i++)
if (cpu_has(&boot_cpu_data, i) && (x86_cap_flags[i] != NULL))
seq_printf(m, " %s", x86_cap_flags[i]);
seq_printf(m, "\n");
seq_printf(m, "cache_alignment\t: %d\n", boot_cpu_data.cache_alignment);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? &boot_cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
/* Set in linux_main */
unsigned long uml_physmem;
EXPORT_SYMBOL(uml_physmem);
unsigned long uml_reserved; /* Also modified in mem_init */
unsigned long start_vm;
unsigned long end_vm;
/* Set in uml_ncpus_setup */
int ncpus = 1;
/* Set in early boot */
static int have_root __initdata;
static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 64 * 1024 * 1024;
EXPORT_SYMBOL(physmem_size);
static const char *usage_string =
"User Mode Linux v%s\n"
" available at http://user-mode-linux.sourceforge.net/\n\n";
static int __init uml_version_setup(char *line, int *add)
{
/* Explicitly use printf() to show version in stdout */
printf("%s\n", init_utsname()->release);
exit(0);
return 0;
}
__uml_setup("--version", uml_version_setup,
"--version\n"
" Prints the version number of the kernel.\n\n"
);
static int __init uml_root_setup(char *line, int *add)
{
have_root = 1;
return 0;
}
__uml_setup("root=", uml_root_setup,
"root=<file containing the root fs>\n"
" This is actually used by the generic kernel in exactly the same\n"
" way as in any other kernel. If you configure a number of block\n"
" devices and want to boot off something other than ubd0, you \n"
" would use something like:\n"
" root=/dev/ubd5\n\n"
);
static int __init no_skas_debug_setup(char *line, int *add)
{
os_warn("'debug' is not necessary to gdb UML in skas mode - run\n");
os_warn("'gdb linux'\n");
return 0;
}
__uml_setup("debug", no_skas_debug_setup,
"debug\n"
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
static int __init uml_console_setup(char *line, int *add)
{
have_console = 1;
return 0;
}
__uml_setup("console=", uml_console_setup,
"console=<preferred console>\n"
" Specify the preferred console output driver\n\n"
);
static int __init Usage(char *line, int *add)
{
const char **p;
printf(usage_string, init_utsname()->release);
p = &__uml_help_start;
/* Explicitly use printf() to show help in stdout */
while (p < &__uml_help_end) {
printf("%s", *p);
p++;
}
exit(0);
return 0;
}
__uml_setup("--help", Usage,
"--help\n"
" Prints this message.\n\n"
);
static void __init uml_checksetup(char *line, int *add)
{
struct uml_param *p;
p = &__uml_setup_start;
while (p < &__uml_setup_end) {
size_t n;
n = strlen(p->str);
if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
return;
p++;
}
}
static void __init uml_postsetup(void)
{
initcall_t *p;
p = &__uml_postsetup_start;
while (p < &__uml_postsetup_end) {
(*p)();
p++;
}
return;
}
static int panic_exit(struct notifier_block *self, unsigned long unused1,
void *unused2)
{
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(1);
bust_spinlocks(0);
uml_exitcode = 1;
os_dump_core();
return NOTIFY_DONE;
}
static struct notifier_block panic_exit_notifier = {
.notifier_call = panic_exit,
.priority = INT_MAX - 1, /* run as 2nd notifier, won't return */
};
void uml_finishsetup(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&panic_exit_notifier);
uml_postsetup();
new_thread_handler();
}
/* Set during early boot */
unsigned long stub_start;
unsigned long task_size;
EXPORT_SYMBOL(task_size);
unsigned long host_task_size;
unsigned long brk_start;
unsigned long end_iomem;
EXPORT_SYMBOL(end_iomem);
#define MIN_VMALLOC (32 * 1024 * 1024)
static void parse_host_cpu_flags(char *line)
{
int i;
for (i = 0; i < 32*NCAPINTS; i++) {
if ((x86_cap_flags[i] != NULL) && strstr(line, x86_cap_flags[i]))
set_cpu_cap(&boot_cpu_data, i);
}
}
static void parse_cache_line(char *line)
{
long res;
char *to_parse = strstr(line, ":");
if (to_parse) {
to_parse++;
while (*to_parse != 0 && isspace(*to_parse)) {
to_parse++;
}
if (kstrtoul(to_parse, 10, &res) == 0 && is_power_of_2(res))
boot_cpu_data.cache_alignment = res;
else
boot_cpu_data.cache_alignment = L1_CACHE_BYTES;
}
}
int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
unsigned long virtmem_size, max_physmem;
unsigned long stack;
unsigned int i;
int add;
for (i = 1; i < argc; i++) {
if ((i == 1) && (argv[i][0] == ' '))
continue;
add = 1;
uml_checksetup(argv[i], &add);
if (add)
add_arg(argv[i]);
}
if (have_root == 0)
add_arg(DEFAULT_COMMAND_LINE_ROOT);
if (have_console == 0)
add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
/* reserve a few pages for the stubs (taking care of data alignment) */
/* align the data portion */
BUILD_BUG_ON(!is_power_of_2(STUB_DATA_PAGES));
stub_start = (host_task_size - 1) & ~(STUB_DATA_PAGES * PAGE_SIZE - 1);
/* another page for the code portion */
stub_start -= PAGE_SIZE;
host_task_size = stub_start;
/*
* TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
* out
*/
task_size = host_task_size & PGDIR_MASK;
/* OS sanity checks that need to happen before the kernel runs */
os_early_checks();
get_host_cpu_features(parse_host_cpu_flags, parse_cache_line);
brk_start = (unsigned long) sbrk(0);
/*
* Increase physical memory size for exec-shield users
* so they actually get what they asked for. This should
* add zero for non-exec shield users
*/
diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
if (diff > 1024 * 1024) {
os_info("Adding %ld bytes to physical memory to account for "
"exec-shield gap\n", diff);
physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
}
uml_physmem = (unsigned long) __binary_start & PAGE_MASK;
/* Reserve up to 4M after the current brk */
uml_reserved = ROUND_4M(brk_start) + (1 << 22);
setup_machinename(init_utsname()->machine);
highmem = 0;
iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;
/*
* Zones have to begin on a 1 << MAX_ORDER page boundary,
* so this makes sure that's true for highmem
*/
max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
if (physmem_size + iomem_size > max_physmem) {
highmem = physmem_size + iomem_size - max_physmem;
physmem_size -= highmem;
}
high_physmem = uml_physmem + physmem_size;
end_iomem = high_physmem + iomem_size;
high_memory = (void *) end_iomem;
start_vm = VMALLOC_START;
virtmem_size = physmem_size;
stack = (unsigned long) argv;
stack &= ~(1024 * 1024 - 1);
avail = stack - start_vm;
if (physmem_size > avail)
virtmem_size = avail;
end_vm = start_vm + virtmem_size;
if (virtmem_size < physmem_size)
os_info("Kernel virtual memory size shrunk to %lu bytes\n",
virtmem_size);
os_flush_stdout();
return start_uml();
}
int __init __weak read_initrd(void)
{
return 0;
}
void __init setup_arch(char **cmdline_p)
{
u8 rng_seed[32];
stack_protections((unsigned long) &init_thread_info);
setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
mem_total_pages(physmem_size, iomem_size, highmem);
uml_dtb_init();
read_initrd();
paging_init();
strscpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
setup_hostinfo(host_info, sizeof host_info);
if (os_getrandom(rng_seed, sizeof(rng_seed), 0) == sizeof(rng_seed)) {
add_bootloader_randomness(rng_seed, sizeof(rng_seed));
memzero_explicit(rng_seed, sizeof(rng_seed));
}
}
void __init arch_cpu_finalize_init(void)
{
arch_check_bugs();
os_check_bugs();
}
void apply_seal_endbr(s32 *start, s32 *end)
{
}
void apply_retpolines(s32 *start, s32 *end)
{
}
void apply_returns(s32 *start, s32 *end)
{
}
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
s32 *start_cfi, s32 *end_cfi)
{
}
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}
void *text_poke(void *addr, const void *opcode, size_t len)
{
/*
* In UML, the only reference to this function is in
* apply_relocate_add(), which shouldn't ever actually call this
* because UML doesn't have live patching.
*/
WARN_ON(1);
return memcpy(addr, opcode, len);
}
void text_poke_sync(void)
{
}
void uml_pm_wake(void)
{
pm_system_wakeup();
}
#ifdef CONFIG_PM_SLEEP
static int um_suspend_valid(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
static int um_suspend_prepare(void)
{
um_irqs_suspend();
return 0;
}
static int um_suspend_enter(suspend_state_t state)
{
if (WARN_ON(state != PM_SUSPEND_MEM))
return -EINVAL;
/*
* This is identical to the idle sleep, but we've just
* (during suspend) turned off all interrupt sources
* except for the ones we want, so now we can only wake
* up on something we actually want to wake up on. All
* timing has also been suspended.
*/
um_idle_sleep();
return 0;
}
static void um_suspend_finish(void)
{
um_irqs_resume();
}
const struct platform_suspend_ops um_suspend_ops = {
.valid = um_suspend_valid,
.prepare = um_suspend_prepare,
.enter = um_suspend_enter,
.finish = um_suspend_finish,
};
static int init_pm_wake_signal(void)
{
/*
* In external time-travel mode we can't use signals to wake up
* since that would mess with the scheduling. We'll have to do
* some additional work to support wakeup on virtio devices or
* similar, perhaps implementing a fake RTC controller that can
* trigger wakeup (and request the appropriate scheduling from
* the external scheduler when going to suspend.)
*/
if (time_travel_mode != TT_MODE_EXTERNAL)
register_pm_wake_signal();
suspend_set_ops(&um_suspend_ops);
return 0;
}
late_initcall(init_pm_wake_signal);
#endif
| linux-master | arch/um/kernel/um_arch.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2013 Richard Weinberger <[email protected]>
* Copyright (C) 2014 Google Inc., Author: Daniel Walter <[email protected]>
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
void dump_trace(struct task_struct *tsk,
const struct stacktrace_ops *ops,
void *data)
{
int reliable = 0;
unsigned long *sp, bp, addr;
struct pt_regs *segv_regs = tsk->thread.segv_regs;
struct stack_frame *frame;
bp = get_frame_pointer(tsk, segv_regs);
sp = get_stack_pointer(tsk, segv_regs);
frame = (struct stack_frame *)bp;
while (((long) sp & (THREAD_SIZE-1)) != 0) {
addr = READ_ONCE_NOCHECK(*sp);
if (__kernel_text_address(addr)) {
reliable = 0;
if ((unsigned long) sp == bp + sizeof(long)) {
frame = frame ? frame->next_frame : NULL;
bp = (unsigned long)frame;
reliable = 1;
}
ops->address(data, addr, reliable);
}
sp++;
}
}
static void save_addr(void *data, unsigned long address, int reliable)
{
struct stack_trace *trace = data;
if (!reliable)
return;
if (trace->nr_entries >= trace->max_entries)
return;
trace->entries[trace->nr_entries++] = address;
}
static const struct stacktrace_ops dump_ops = {
.address = save_addr
};
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, &dump_ops, trace);
}
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
| linux-master | arch/um/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/ftrace.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <frame_kern.h>
#include <kern_util.h>
#include <os.h>
EXPORT_SYMBOL(block_signals);
EXPORT_SYMBOL(unblock_signals);
void block_signals_trace(void)
{
block_signals();
if (current_thread_info())
trace_hardirqs_off();
}
void unblock_signals_trace(void)
{
if (current_thread_info())
trace_hardirqs_on();
unblock_signals();
}
void um_trace_signals_on(void)
{
if (current_thread_info())
trace_hardirqs_on();
}
void um_trace_signals_off(void)
{
if (current_thread_info())
trace_hardirqs_off();
}
/*
* OK, we're invoking a handler
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int singlestep = 0;
unsigned long sp;
int err;
if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
singlestep = 1;
/* Did we come from a system call? */
if (PT_REGS_SYSCALL_NR(regs) >= 0) {
/* If so, check system call restarting.. */
switch (PT_REGS_SYSCALL_RET(regs)) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
case -ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
PT_REGS_RESTART_SYSCALL(regs);
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
break;
}
}
sp = PT_REGS_SP(regs);
if ((ksig->ka.sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
#ifdef CONFIG_ARCH_HAS_SC_SIGNALS
if (!(ksig->ka.sa.sa_flags & SA_SIGINFO))
err = setup_signal_stack_sc(sp, ksig, regs, oldset);
else
#endif
err = setup_signal_stack_si(sp, ksig, regs, oldset);
signal_setup_done(err, ksig, singlestep);
}
void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
int handled_sig = 0;
while (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
}
/* Did we come from a system call? */
if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) {
/* Restart the system call - no handlers present */
switch (PT_REGS_SYSCALL_RET(regs)) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
PT_REGS_RESTART_SYSCALL(regs);
break;
case -ERESTART_RESTARTBLOCK:
PT_REGS_ORIG_SYSCALL(regs) = __NR_restart_syscall;
PT_REGS_RESTART_SYSCALL(regs);
break;
}
}
/*
* This closes a way to execute a system call on the host. If
* you set a breakpoint on a system call instruction and singlestep
* from it, the tracing thread used to PTRACE_SINGLESTEP the process
* rather than PTRACE_SYSCALL it, allowing the system call to execute
* on the host. The tracing thread will check this flag and
* PTRACE_SYSCALL if necessary.
*/
if (test_thread_flag(TIF_SINGLESTEP))
current->thread.singlestep_syscall =
is_syscall(PT_REGS_IP(¤t->thread.regs));
/*
* if there's no signal to deliver, we just put the saved sigmask
* back
*/
if (!handled_sig)
restore_saved_sigmask();
}
| linux-master | arch/um/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/spinlock.h>
#include <linux/console.h>
#include <linux/string.h>
#include <shared/init.h>
#include <shared/kern.h>
#include <os.h>
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
static struct kmsg_dump_iter iter;
static DEFINE_SPINLOCK(lock);
static char line[1024];
struct console *con;
unsigned long flags;
size_t len = 0;
int cookie;
/*
* If no consoles are available to output crash information, dump
* the kmsg buffer to stdout.
*/
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
/*
* The ttynull console and disabled consoles are ignored
* since they cannot output. All other consoles are
* expected to output the crash information.
*/
if (strcmp(con->name, "ttynull") != 0 &&
(console_srcu_read_flags(con) & CON_ENABLED)) {
break;
}
}
console_srcu_read_unlock(cookie);
if (con)
return;
if (!spin_trylock_irqsave(&lock, flags))
return;
kmsg_dump_rewind(&iter);
printf("kmsg_dump:\n");
while (kmsg_dump_get_line(&iter, true, line, sizeof(line), &len)) {
line[len] = '\0';
printf("%s", line);
}
spin_unlock_irqrestore(&lock, flags);
}
static struct kmsg_dumper kmsg_dumper = {
.dump = kmsg_dumper_stdout
};
int __init kmsg_dumper_stdout_init(void)
{
return kmsg_dump_register(&kmsg_dumper);
}
__uml_postsetup(kmsg_dumper_stdout_init);
| linux-master | arch/um/kernel/kmsg_dump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
*/
#include <linux/interrupt.h>
#include <irq_kern.h>
#include <os.h>
#include <sigio.h>
/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
static irqreturn_t sigio_interrupt(int irq, void *data)
{
char c;
os_read_file(sigio_irq_fd, &c, sizeof(c));
return IRQ_HANDLED;
}
int write_sigio_irq(int fd)
{
int err;
err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
0, "write sigio", NULL);
if (err < 0) {
printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
"err = %d\n", err);
return -1;
}
sigio_irq_fd = fd;
return 0;
}
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
static DEFINE_MUTEX(sigio_mutex);
void sigio_lock(void)
{
mutex_lock(&sigio_mutex);
}
void sigio_unlock(void)
{
mutex_unlock(&sigio_mutex);
}
| linux-master | arch/um/kernel/sigio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <asm/tlbflush.h>
#include <as-layout.h>
#include <mem_user.h>
#include <os.h>
#include <skas.h>
#include <kern_util.h>
struct host_vm_change {
struct host_vm_op {
enum { NONE, MMAP, MUNMAP, MPROTECT } type;
union {
struct {
unsigned long addr;
unsigned long len;
unsigned int prot;
int fd;
__u64 offset;
} mmap;
struct {
unsigned long addr;
unsigned long len;
} munmap;
struct {
unsigned long addr;
unsigned long len;
unsigned int prot;
} mprotect;
} u;
} ops[1];
int userspace;
int index;
struct mm_struct *mm;
void *data;
int force;
};
#define INIT_HVC(mm, force, userspace) \
((struct host_vm_change) \
{ .ops = { { .type = NONE } }, \
.mm = mm, \
.data = NULL, \
.userspace = userspace, \
.index = 0, \
.force = force })
static void report_enomem(void)
{
printk(KERN_ERR "UML ran out of memory on the host side! "
"This can happen due to a memory limitation or "
"vm.max_map_count has been reached.\n");
}
static int do_ops(struct host_vm_change *hvc, int end,
int finished)
{
struct host_vm_op *op;
int i, ret = 0;
for (i = 0; i < end && !ret; i++) {
op = &hvc->ops[i];
switch (op->type) {
case MMAP:
if (hvc->userspace)
ret = map(&hvc->mm->context.id, op->u.mmap.addr,
op->u.mmap.len, op->u.mmap.prot,
op->u.mmap.fd,
op->u.mmap.offset, finished,
&hvc->data);
else
map_memory(op->u.mmap.addr, op->u.mmap.offset,
op->u.mmap.len, 1, 1, 1);
break;
case MUNMAP:
if (hvc->userspace)
ret = unmap(&hvc->mm->context.id,
op->u.munmap.addr,
op->u.munmap.len, finished,
&hvc->data);
else
ret = os_unmap_memory(
(void *) op->u.munmap.addr,
op->u.munmap.len);
break;
case MPROTECT:
if (hvc->userspace)
ret = protect(&hvc->mm->context.id,
op->u.mprotect.addr,
op->u.mprotect.len,
op->u.mprotect.prot,
finished, &hvc->data);
else
ret = os_protect_memory(
(void *) op->u.mprotect.addr,
op->u.mprotect.len,
1, 1, 1);
break;
default:
printk(KERN_ERR "Unknown op type %d in do_ops\n",
op->type);
BUG();
break;
}
}
if (ret == -ENOMEM)
report_enomem();
return ret;
}
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
unsigned int prot, struct host_vm_change *hvc)
{
__u64 offset;
struct host_vm_op *last;
int fd = -1, ret = 0;
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
offset = phys;
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MMAP) &&
(last->u.mmap.addr + last->u.mmap.len == virt) &&
(last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
(last->u.mmap.offset + last->u.mmap.len == offset)) {
last->u.mmap.len += len;
return 0;
}
}
if (hvc->index == ARRAY_SIZE(hvc->ops)) {
ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
hvc->index = 0;
}
hvc->ops[hvc->index++] = ((struct host_vm_op)
{ .type = MMAP,
.u = { .mmap = { .addr = virt,
.len = len,
.prot = prot,
.fd = fd,
.offset = offset }
} });
return ret;
}
static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_change *hvc)
{
struct host_vm_op *last;
int ret = 0;
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
(last->u.munmap.addr + last->u.mmap.len == addr)) {
last->u.munmap.len += len;
return 0;
}
}
if (hvc->index == ARRAY_SIZE(hvc->ops)) {
ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
hvc->index = 0;
}
hvc->ops[hvc->index++] = ((struct host_vm_op)
{ .type = MUNMAP,
.u = { .munmap = { .addr = addr,
.len = len } } });
return ret;
}
static int add_mprotect(unsigned long addr, unsigned long len,
unsigned int prot, struct host_vm_change *hvc)
{
struct host_vm_op *last;
int ret = 0;
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
(last->u.mprotect.addr + last->u.mprotect.len == addr) &&
(last->u.mprotect.prot == prot)) {
last->u.mprotect.len += len;
return 0;
}
}
if (hvc->index == ARRAY_SIZE(hvc->ops)) {
ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
hvc->index = 0;
}
hvc->ops[hvc->index++] = ((struct host_vm_op)
{ .type = MPROTECT,
.u = { .mprotect = { .addr = addr,
.len = len,
.prot = prot } } });
return ret;
}
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
pte_t *pte;
int r, w, x, prot, ret = 0;
pte = pte_offset_kernel(pmd, addr);
do {
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
if (!pte_young(*pte)) {
r = 0;
w = 0;
} else if (!pte_dirty(*pte))
w = 0;
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
if (hvc->force || pte_newpage(*pte)) {
if (pte_present(*pte)) {
if (pte_newpage(*pte))
ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, prot, hvc);
} else
ret = add_munmap(addr, PAGE_SIZE, hvc);
} else if (pte_newprot(*pte))
ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
*pte = pte_mkuptodate(*pte);
} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
return ret;
}
static inline int update_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
pmd_t *pmd;
unsigned long next;
int ret = 0;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (!pmd_present(*pmd)) {
if (hvc->force || pmd_newpage(*pmd)) {
ret = add_munmap(addr, next - addr, hvc);
pmd_mkuptodate(*pmd);
}
}
else ret = update_pte_range(pmd, addr, next, hvc);
} while (pmd++, addr = next, ((addr < end) && !ret));
return ret;
}
static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
pud_t *pud;
unsigned long next;
int ret = 0;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (!pud_present(*pud)) {
if (hvc->force || pud_newpage(*pud)) {
ret = add_munmap(addr, next - addr, hvc);
pud_mkuptodate(*pud);
}
}
else ret = update_pmd_range(pud, addr, next, hvc);
} while (pud++, addr = next, ((addr < end) && !ret));
return ret;
}
static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end,
struct host_vm_change *hvc)
{
p4d_t *p4d;
unsigned long next;
int ret = 0;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (!p4d_present(*p4d)) {
if (hvc->force || p4d_newpage(*p4d)) {
ret = add_munmap(addr, next - addr, hvc);
p4d_mkuptodate(*p4d);
}
} else
ret = update_pud_range(p4d, addr, next, hvc);
} while (p4d++, addr = next, ((addr < end) && !ret));
return ret;
}
static void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
pgd_t *pgd;
struct host_vm_change hvc;
unsigned long addr = start_addr, next;
int ret = 0, userspace = 1;
hvc = INIT_HVC(mm, force, userspace);
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end_addr);
if (!pgd_present(*pgd)) {
if (force || pgd_newpage(*pgd)) {
ret = add_munmap(addr, next - addr, &hvc);
pgd_mkuptodate(*pgd);
}
} else
ret = update_p4d_range(pgd, addr, next, &hvc);
} while (pgd++, addr = next, ((addr < end_addr) && !ret));
if (!ret)
ret = do_ops(&hvc, hvc.index, 1);
/* This is not an else because ret is modified above */
if (ret) {
struct mm_id *mm_idp = ¤t->mm->context.id;
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
mm_idp->kill = 1;
}
}
static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
{
struct mm_struct *mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long addr, last;
int updated = 0, err = 0, force = 0, userspace = 0;
struct host_vm_change hvc;
mm = &init_mm;
hvc = INIT_HVC(mm, force, userspace);
for (addr = start; addr < end;) {
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd)) {
last = ADD_ROUND(addr, PGDIR_SIZE);
if (last > end)
last = end;
if (pgd_newpage(*pgd)) {
updated = 1;
err = add_munmap(addr, last - addr, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr = last;
continue;
}
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d)) {
last = ADD_ROUND(addr, P4D_SIZE);
if (last > end)
last = end;
if (p4d_newpage(*p4d)) {
updated = 1;
err = add_munmap(addr, last - addr, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr = last;
continue;
}
pud = pud_offset(p4d, addr);
if (!pud_present(*pud)) {
last = ADD_ROUND(addr, PUD_SIZE);
if (last > end)
last = end;
if (pud_newpage(*pud)) {
updated = 1;
err = add_munmap(addr, last - addr, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr = last;
continue;
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
last = ADD_ROUND(addr, PMD_SIZE);
if (last > end)
last = end;
if (pmd_newpage(*pmd)) {
updated = 1;
err = add_munmap(addr, last - addr, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr = last;
continue;
}
pte = pte_offset_kernel(pmd, addr);
if (!pte_present(*pte) || pte_newpage(*pte)) {
updated = 1;
err = add_munmap(addr, PAGE_SIZE, &hvc);
if (err < 0)
panic("munmap failed, errno = %d\n",
-err);
if (pte_present(*pte))
err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, 0, &hvc);
}
else if (pte_newprot(*pte)) {
updated = 1;
err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
}
addr += PAGE_SIZE;
}
if (!err)
err = do_ops(&hvc, hvc.index, 1);
if (err < 0)
panic("flush_tlb_kernel failed, errno = %d\n", err);
return updated;
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
struct mm_struct *mm = vma->vm_mm;
void *flush = NULL;
int r, w, x, prot, err = 0;
struct mm_id *mm_id;
address &= PAGE_MASK;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
goto kill;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto kill;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
goto kill;
pte = pte_offset_kernel(pmd, address);
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
if (!pte_young(*pte)) {
r = 0;
w = 0;
} else if (!pte_dirty(*pte)) {
w = 0;
}
mm_id = &mm->context.id;
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
if (pte_newpage(*pte)) {
if (pte_present(*pte)) {
unsigned long long offset;
int fd;
fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
1, &flush);
}
else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
}
else if (pte_newprot(*pte))
err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
if (err) {
if (err == -ENOMEM)
report_enomem();
goto kill;
}
*pte = pte_mkuptodate(*pte);
return;
kill:
printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
force_sig(SIGKILL);
}
void flush_tlb_all(void)
{
/*
* Don't bother flushing if this address space is about to be
* destroyed.
*/
if (atomic_read(¤t->mm->mm_users) == 0)
return;
flush_tlb_mm(current->mm);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_tlb_kernel_range_common(start, end);
}
void flush_tlb_kernel_vm(void)
{
flush_tlb_kernel_range_common(start_vm, end_vm);
}
void __flush_tlb_one(unsigned long addr)
{
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
}
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
/*
* Don't bother flushing if this address space is about to be
* destroyed.
*/
if (atomic_read(&mm->mm_users) == 0)
return;
fix_range_common(mm, start_addr, end_addr, force);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
if (vma->vm_mm == NULL)
flush_tlb_kernel_range_common(start, end);
else fix_range(vma->vm_mm, start, end, 0);
}
EXPORT_SYMBOL(flush_tlb_range);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
fix_range(mm, start, end, 0);
}
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma)
fix_range(mm, vma->vm_start, vma->vm_end, 0);
}
void force_flush_all(void)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
for_each_vma(vmi, vma)
fix_range(mm, vma->vm_start, vma->vm_end, 1);
mmap_read_unlock(mm);
}
| linux-master | arch/um/kernel/tlb.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/init.h>
#include <linux/of_fdt.h>
#include <linux/printk.h>
#include <linux/memblock.h>
#include <init.h>
#include "um_arch.h"
static char *dtb __initdata;
void uml_dtb_init(void)
{
long long size;
void *area;
area = uml_load_file(dtb, &size);
if (!area)
return;
if (!early_init_dt_scan(area)) {
pr_err("invalid DTB %s\n", dtb);
memblock_free(area, size);
return;
}
early_init_fdt_scan_reserved_mem();
unflatten_device_tree();
}
static int __init uml_dtb_setup(char *line, int *add)
{
dtb = line;
return 0;
}
__uml_setup("dtb=", uml_dtb_setup,
"dtb=<file>\n"
" Boot the kernel with the devicetree blob from the specified file.\n"
);
| linux-master | arch/um/kernel/dtb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/task.h>
#include <as-layout.h>
#include <kern.h>
#include <os.h>
#include <skas.h>
extern void start_kernel(void);
static int __init start_kernel_proc(void *unused)
{
int pid;
block_signals_trace();
pid = os_getpid();
cpu_tasks[0].pid = pid;
cpu_tasks[0].task = current;
start_kernel();
return 0;
}
extern int userspace_pid[];
extern char cpu0_irqstack[];
int __init start_uml(void)
{
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
init_new_thread_signals();
init_task.thread.request.u.thread.proc = start_kernel_proc;
init_task.thread.request.u.thread.arg = NULL;
return start_idle_thread(task_stack_page(&init_task),
&init_task.thread.switch_buf);
}
unsigned long current_stub_stack(void)
{
if (current->mm == NULL)
return 0;
return current->mm->context.id.stack;
}
| linux-master | arch/um/kernel/skas/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/seccomp.h>
#include <kern_util.h>
#include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h>
#include <sysdep/syscalls.h>
#include <linux/time-internal.h>
#include <asm/unistd.h>
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
int syscall;
/*
* If we have infinite CPU resources, then make every syscall also a
* preemption point, since we don't have any other preemption in this
* case, and kernel threads would basically never run until userspace
* went to sleep, even if said userspace interacts with the kernel in
* various ways.
*/
if (time_travel_mode == TT_MODE_INFCPU ||
time_travel_mode == TT_MODE_EXTERNAL)
schedule();
/* Initialize the syscall number and default return value. */
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
if (syscall_trace_enter(regs))
goto out;
/* Do the seccomp check after ptrace; failures should be fast. */
if (secure_computing() == -1)
goto out;
syscall = UPT_SYSCALL_NR(r);
if (syscall >= 0 && syscall < __NR_syscalls)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
out:
syscall_trace_leave(regs);
}
| linux-master | arch/um/kernel/skas/syscall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <signal.h>
#include <sched.h>
#include <asm/unistd.h>
#include <sys/time.h>
#include <as-layout.h>
#include <ptrace_user.h>
#include <stub-data.h>
#include <sysdep/stub.h>
/*
* This is in a separate file because it needs to be compiled with any
* extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled
*
* Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize
* on some systems.
*/
void __attribute__ ((__section__ (".__syscall_stub")))
stub_clone_handler(void)
{
struct stub_data *data = get_stub_data();
long err;
err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
(unsigned long)data +
STUB_DATA_PAGES * UM_KERN_PAGE_SIZE / 2);
if (err) {
data->parent_err = err;
goto done;
}
err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
if (err) {
data->child_err = err;
goto done;
}
remap_stack_and_trap();
done:
trap_myself();
}
| linux-master | arch/um/kernel/skas/clone.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/page.h>
#include <kern_util.h>
#include <asm/futex.h>
#include <os.h>
pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (mm == NULL)
return NULL;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
return NULL;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return NULL;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
return NULL;
return pte_offset_kernel(pmd, addr);
}
static pte_t *maybe_map(unsigned long virt, int is_write)
{
pte_t *pte = virt_to_pte(current->mm, virt);
int err, dummy_code;
if ((pte == NULL) || !pte_present(*pte) ||
(is_write && !pte_write(*pte))) {
err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
if (err)
return NULL;
pte = virt_to_pte(current->mm, virt);
}
if (!pte_present(*pte))
pte = NULL;
return pte;
}
static int do_op_one_page(unsigned long addr, int len, int is_write,
int (*op)(unsigned long addr, int len, void *arg), void *arg)
{
struct page *page;
pte_t *pte;
int n;
pte = maybe_map(addr, is_write);
if (pte == NULL)
return -1;
page = pte_page(*pte);
#ifdef CONFIG_64BIT
pagefault_disable();
addr = (unsigned long) page_address(page) +
(addr & ~PAGE_MASK);
#else
addr = (unsigned long) kmap_atomic(page) +
(addr & ~PAGE_MASK);
#endif
n = (*op)(addr, len, arg);
#ifdef CONFIG_64BIT
pagefault_enable();
#else
kunmap_atomic((void *)addr);
#endif
return n;
}
static long buffer_op(unsigned long addr, int len, int is_write,
int (*op)(unsigned long, int, void *), void *arg)
{
long size, remain, n;
size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
remain = len;
n = do_op_one_page(addr, size, is_write, op, arg);
if (n != 0) {
remain = (n < 0 ? remain : 0);
goto out;
}
addr += size;
remain -= size;
if (remain == 0)
goto out;
while (addr < ((addr + remain) & PAGE_MASK)) {
n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
if (n != 0) {
remain = (n < 0 ? remain : 0);
goto out;
}
addr += PAGE_SIZE;
remain -= PAGE_SIZE;
}
if (remain == 0)
goto out;
n = do_op_one_page(addr, remain, is_write, op, arg);
if (n != 0) {
remain = (n < 0 ? remain : 0);
goto out;
}
return 0;
out:
return remain;
}
static int copy_chunk_from_user(unsigned long from, int len, void *arg)
{
unsigned long *to_ptr = arg, to = *to_ptr;
memcpy((void *) to, (void *) from, len);
*to_ptr += len;
return 0;
}
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
}
EXPORT_SYMBOL(raw_copy_from_user);
static int copy_chunk_to_user(unsigned long to, int len, void *arg)
{
unsigned long *from_ptr = arg, from = *from_ptr;
memcpy((void *) to, (void *) from, len);
*from_ptr += len;
return 0;
}
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
}
EXPORT_SYMBOL(raw_copy_to_user);
static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
{
char **to_ptr = arg, *to = *to_ptr;
int n;
strncpy(to, (void *) from, len);
n = strnlen(to, len);
*to_ptr += n;
if (n < len)
return 1;
return 0;
}
long strncpy_from_user(char *dst, const char __user *src, long count)
{
long n;
char *ptr = dst;
if (!access_ok(src, 1))
return -EFAULT;
n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
&ptr);
if (n != 0)
return -EFAULT;
return strnlen(dst, count);
}
EXPORT_SYMBOL(strncpy_from_user);
static int clear_chunk(unsigned long addr, int len, void *unused)
{
memset((void *) addr, 0, len);
return 0;
}
unsigned long __clear_user(void __user *mem, unsigned long len)
{
return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
}
EXPORT_SYMBOL(__clear_user);
static int strnlen_chunk(unsigned long str, int len, void *arg)
{
int *len_ptr = arg, n;
n = strnlen((void *) str, len);
*len_ptr += n;
if (n < len)
return 1;
return 0;
}
long strnlen_user(const char __user *str, long len)
{
int count = 0, n;
if (!access_ok(str, 1))
return -EFAULT;
n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
if (n == 0)
return count + 1;
return 0;
}
EXPORT_SYMBOL(strnlen_user);
/**
* arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
* @encoded_op: encoded operation to execute
* @uaddr: pointer to user space address
*
* Return:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Operation not supported
*/
int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval, ret;
struct page *page;
unsigned long addr = (unsigned long) uaddr;
pte_t *pte;
ret = -EFAULT;
if (!access_ok(uaddr, sizeof(*uaddr)))
return -EFAULT;
preempt_disable();
pte = maybe_map(addr, 1);
if (pte == NULL)
goto out_inuser;
page = pte_page(*pte);
#ifdef CONFIG_64BIT
pagefault_disable();
addr = (unsigned long) page_address(page) +
(((unsigned long) addr) & ~PAGE_MASK);
#else
addr = (unsigned long) kmap_atomic(page) +
((unsigned long) addr & ~PAGE_MASK);
#endif
uaddr = (u32 *) addr;
oldval = *uaddr;
ret = 0;
switch (op) {
case FUTEX_OP_SET:
*uaddr = oparg;
break;
case FUTEX_OP_ADD:
*uaddr += oparg;
break;
case FUTEX_OP_OR:
*uaddr |= oparg;
break;
case FUTEX_OP_ANDN:
*uaddr &= ~oparg;
break;
case FUTEX_OP_XOR:
*uaddr ^= oparg;
break;
default:
ret = -ENOSYS;
}
#ifdef CONFIG_64BIT
pagefault_enable();
#else
kunmap_atomic((void *)addr);
#endif
out_inuser:
preempt_enable();
if (ret == 0)
*oval = oldval;
return ret;
}
EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
/**
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
* uaddr with newval if the current value is
* oldval.
* @uval: pointer to store content of @uaddr
* @uaddr: pointer to user space address
* @oldval: old value
* @newval: new value to store to @uaddr
*
* Return:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
*/
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
struct page *page;
pte_t *pte;
int ret = -EFAULT;
if (!access_ok(uaddr, sizeof(*uaddr)))
return -EFAULT;
preempt_disable();
pte = maybe_map((unsigned long) uaddr, 1);
if (pte == NULL)
goto out_inatomic;
page = pte_page(*pte);
#ifdef CONFIG_64BIT
pagefault_disable();
uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
#else
uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
#endif
*uval = *uaddr;
ret = cmpxchg(uaddr, oldval, newval);
#ifdef CONFIG_64BIT
pagefault_enable();
#else
kunmap_atomic(uaddr);
#endif
ret = 0;
out_inatomic:
preempt_enable();
return ret;
}
EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);
| linux-master | arch/um/kernel/skas/uaccess.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Thomas Meyer ([email protected])
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <as-layout.h>
#include <os.h>
#include <skas.h>
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mm_context *from_mm = NULL;
struct mm_context *to_mm = &mm->context;
unsigned long stack = 0;
int ret = -ENOMEM;
stack = __get_free_pages(GFP_KERNEL | __GFP_ZERO, ilog2(STUB_DATA_PAGES));
if (stack == 0)
goto out;
to_mm->id.stack = stack;
if (current->mm != NULL && current->mm != &init_mm)
from_mm = ¤t->mm->context;
block_signals_trace();
if (from_mm)
to_mm->id.u.pid = copy_context_skas0(stack,
from_mm->id.u.pid);
else to_mm->id.u.pid = start_userspace(stack);
unblock_signals_trace();
if (to_mm->id.u.pid < 0) {
ret = to_mm->id.u.pid;
goto out_free;
}
ret = init_new_ldt(to_mm, from_mm);
if (ret < 0) {
printk(KERN_ERR "init_new_context_skas - init_ldt"
" failed, errno = %d\n", ret);
goto out_free;
}
return 0;
out_free:
if (to_mm->id.stack != 0)
free_pages(to_mm->id.stack, ilog2(STUB_DATA_PAGES));
out:
return ret;
}
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
/*
* If init_new_context wasn't called, this will be
* zero, resulting in a kill(0), which will result in the
* whole UML suddenly dying. Also, cover negative and
* 1 cases, since they shouldn't happen either.
*/
if (mmu->id.u.pid < 2) {
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
mmu->id.u.pid);
return;
}
os_kill_ptraced_process(mmu->id.u.pid, 1);
free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES));
free_ldt(mmu);
}
| linux-master | arch/um/kernel/skas/mmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* loongson-specific suspend support
*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <asm/loongarch.h>
#include <asm/loongson.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/tlbflush.h>
u64 loongarch_suspend_addr;
struct saved_registers {
u32 ecfg;
u32 euen;
u64 pgd;
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
};
static struct saved_registers saved_regs;
void loongarch_common_suspend(void)
{
save_counter();
saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL);
saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH);
saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0);
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL);
csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
}
int loongarch_acpi_suspend(void)
{
enable_gpe_wakeup();
enable_pci_wakeup();
loongarch_common_suspend();
/* processor specific suspend */
loongarch_suspend_enter();
loongarch_common_resume();
return 0;
}
| linux-master | arch/loongarch/power/suspend.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
void enable_gpe_wakeup(void)
{
if (acpi_disabled)
return;
if (acpi_gbl_reduced_hardware)
return;
acpi_enable_all_wakeup_gpes();
}
void enable_pci_wakeup(void)
{
if (acpi_disabled)
return;
if (acpi_gbl_reduced_hardware)
return;
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_STATUS, 1);
if (acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE)
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0);
}
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
uint64_t suspend_addr = 0;
if (acpi_disabled || acpi_gbl_reduced_hardware)
return 0;
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
pr_err("ACPI S3 is not support!\n");
return -1;
}
loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
#endif
return 0;
}
device_initcall(loongson3_acpi_suspend_init);
| linux-master | arch/loongarch/power/platform.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
static u32 saved_crmd;
static u32 saved_prmd;
static u32 saved_euen;
static u32 saved_ecfg;
static u64 saved_pcpu_base;
struct pt_regs saved_regs;
void save_processor_state(void)
{
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_pcpu_base = csr_read64(PERCPU_BASE_KS);
if (is_fpu_owner())
save_fp(current);
}
void restore_processor_state(void)
{
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
csr_write64(saved_pcpu_base, PERCPU_BASE_KS);
if (is_fpu_owner())
restore_fp(current);
}
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
extern int swsusp_asm_suspend(void);
int swsusp_arch_suspend(void)
{
enable_pci_wakeup();
return swsusp_asm_suspend();
}
extern int swsusp_asm_resume(void);
int swsusp_arch_resume(void)
{
/* Avoid TLB mismatch during and after kernel resume */
local_flush_tlb_all();
return swsusp_asm_resume();
}
| linux-master | arch/loongarch/power/hibernate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* crc32.c - CRC32 and CRC32C using LoongArch crc* instructions
*
* Module based on mips/crypto/crc32-mips.c
*
* Copyright (C) 2014 Linaro Ltd <[email protected]>
* Copyright (C) 2018 MIPS Tech, LLC
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
*/
#include <linux/module.h>
#include <crypto/internal/hash.h>
#include <asm/cpu-features.h>
#include <asm/unaligned.h>
#define _CRC32(crc, value, size, type) \
do { \
__asm__ __volatile__( \
#type ".w." #size ".w" " %0, %1, %0\n\t"\
: "+r" (crc) \
: "r" (value) \
: "memory"); \
} while (0)
#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32(crc, value, b);
}
return crc;
}
static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32C(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32C(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32C(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32C(crc, value, b);
}
return crc;
}
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
u32 key;
};
struct chksum_desc_ctx {
u32 crc;
};
static int chksum_init(struct shash_desc *desc)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = mctx->key;
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set the seed.
*/
static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
if (keylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = get_unaligned_le32(key);
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32_loongarch_hw(ctx->crc, data, length);
return 0;
}
static int chksumc_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32c_loongarch_hw(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(ctx->crc, out);
return 0;
}
static int chksumc_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(crc32_loongarch_hw(crc, data, len), out);
return 0;
}
static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~crc32c_loongarch_hw(crc, data, len), out);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksumc_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksumc_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksum_finup(mctx->key, data, length, out);
}
static int chksumc_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksumc_finup(mctx->key, data, length, out);
}
static int chksum_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = 0;
return 0;
}
static int chksumc_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static struct shash_alg crc32_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-loongarch",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksum_cra_init,
}
};
static struct shash_alg crc32c_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
.final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-loongarch",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksumc_cra_init,
}
};
static int __init crc32_mod_init(void)
{
int err;
if (!cpu_has(CPU_FEATURE_CRC32))
return 0;
err = crypto_register_shash(&crc32_alg);
if (err)
return err;
err = crypto_register_shash(&crc32c_alg);
if (err)
return err;
return 0;
}
static void __exit crc32_mod_exit(void)
{
if (!cpu_has(CPU_FEATURE_CRC32))
return;
crypto_unregister_shash(&crc32_alg);
crypto_unregister_shash(&crc32c_alg);
}
module_init(crc32_mod_init);
module_exit(crc32_mod_exit);
MODULE_AUTHOR("Min Zhou <[email protected]>");
MODULE_AUTHOR("Huacai Chen <[email protected]>");
MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
MODULE_LICENSE("GPL v2");
| linux-master | arch/loongarch/crypto/crc32-loongarch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/cacheflush.h>
#include <asm/loongson.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
{
struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
if (bus_tmp)
return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val);
return -EINVAL;
}
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val)
{
struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
if (bus_tmp)
return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val);
return -EINVAL;
}
phys_addr_t mcfg_addr_init(int node)
{
return (((u64)node << 44) | MCFG_EXT_PCICFG_BASE);
}
static int __init pcibios_init(void)
{
unsigned int lsize;
/*
* Set PCI cacheline size to that of the last level in the
* cache hierarchy.
*/
lsize = cpu_last_level_cache_line_size();
BUG_ON(!lsize);
pci_dfl_cache_line_size = lsize >> 2;
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
return 0;
}
subsys_initcall(pcibios_init);
int pcibios_device_add(struct pci_dev *dev)
{
int id;
struct irq_domain *dom;
id = pci_domain_nr(dev->bus);
dom = irq_find_matching_fwnode(get_pch_msi_handle(id), DOMAIN_BUS_PCI_MSI);
dev_set_msi_domain(&dev->dev, dom);
return 0;
}
int pcibios_alloc_irq(struct pci_dev *dev)
{
if (acpi_disabled)
return 0;
if (pci_dev_msi_enabled(dev))
return 0;
return acpi_pci_irq_enable(dev);
}
static void pci_fixup_vgadev(struct pci_dev *pdev)
{
struct pci_dev *devp = NULL;
while ((devp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, devp))) {
if (devp->vendor != PCI_VENDOR_ID_LOONGSON) {
vga_set_default_device(devp);
dev_info(&pdev->dev,
"Overriding boot device as %X:%X\n",
devp->vendor, devp->device);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
| linux-master | arch/loongarch/pci/pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <asm/pci.h>
#include <asm/numa.h>
#include <asm/loongson.h>
struct pci_root_info {
struct acpi_pci_root_info common;
struct pci_config_window *cfg;
};
void pcibios_add_bus(struct pci_bus *bus)
{
acpi_pci_add_bus(bus);
}
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct acpi_device *adev = NULL;
struct device *bus_dev = &bridge->bus->dev;
struct pci_config_window *cfg = bridge->bus->sysdata;
if (!acpi_disabled)
adev = to_acpi_device(cfg->parent);
ACPI_COMPANION_SET(&bridge->dev, adev);
set_dev_node(bus_dev, pa_to_nid(cfg->res.start));
return 0;
}
int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{
struct pci_config_window *cfg = bus->sysdata;
struct acpi_device *adev = to_acpi_device(cfg->parent);
struct acpi_pci_root *root = acpi_driver_data(adev);
return root->segment;
}
static void acpi_release_root_info(struct acpi_pci_root_info *ci)
{
struct pci_root_info *info;
info = container_of(ci, struct pci_root_info, common);
pci_ecam_free(info->cfg);
kfree(ci->ops);
kfree(info);
}
static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci)
{
int status;
struct resource_entry *entry, *tmp;
struct acpi_device *device = ci->bridge;
status = acpi_pci_probe_root_resources(ci);
if (status > 0) {
resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
if (entry->res->flags & IORESOURCE_MEM) {
entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40);
entry->res->start |= entry->offset;
entry->res->end |= entry->offset;
}
}
return status;
}
resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
dev_dbg(&device->dev,
"host bridge window %pR (ignored)\n", entry->res);
resource_list_destroy_entry(entry);
}
return 0;
}
/*
* Create a PCI config space window
* - reserve mem region
* - alloc struct pci_config_window with space for all mappings
* - ioremap the config space
*/
static struct pci_config_window *arch_pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr, const struct pci_ecam_ops *ops)
{
int bsz, bus_range, err;
struct resource *conflict;
struct pci_config_window *cfg;
if (busr->start > busr->end)
return ERR_PTR(-EINVAL);
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return ERR_PTR(-ENOMEM);
cfg->parent = dev;
cfg->ops = ops;
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
cfg->busr.flags = IORESOURCE_BUS;
bus_range = resource_size(cfgres) >> ops->bus_shift;
bsz = 1 << ops->bus_shift;
cfg->res.start = cfgres->start;
cfg->res.end = cfgres->end;
cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
cfg->res.name = "PCI ECAM";
conflict = request_resource_conflict(&iomem_resource, &cfg->res);
if (conflict) {
err = -EBUSY;
dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n",
&cfg->res, conflict->name, conflict);
goto err_exit;
}
cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
if (!cfg->win)
goto err_exit_iomap;
if (ops->init) {
err = ops->init(cfg);
if (err)
goto err_exit;
}
dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr);
return cfg;
err_exit_iomap:
err = -ENOMEM;
dev_err(dev, "ECAM ioremap failed\n");
err_exit:
pci_ecam_free(cfg);
return ERR_PTR(err);
}
/*
* Lookup the bus range for the domain in MCFG, and set up config space
* mapping.
*/
static struct pci_config_window *
pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
{
int ret, bus_shift;
u16 seg = root->segment;
struct device *dev = &root->device->dev;
struct resource cfgres;
struct resource *bus_res = &root->secondary;
struct pci_config_window *cfg;
const struct pci_ecam_ops *ecam_ops;
ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
if (ret < 0) {
dev_err(dev, "%04x:%pR ECAM region not found, use default value\n", seg, bus_res);
ecam_ops = &loongson_pci_ecam_ops;
root->mcfg_addr = mcfg_addr_init(0);
}
bus_shift = ecam_ops->bus_shift ? : 20;
if (bus_shift == 20)
cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
else {
cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift);
cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1;
cfgres.end |= BIT(28) + (((PCI_CFG_SPACE_EXP_SIZE - 1) & 0xf00) << 16);
cfgres.flags = IORESOURCE_MEM;
cfg = arch_pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
}
if (IS_ERR(cfg)) {
dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg));
return NULL;
}
return cfg;
}
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct pci_bus *bus;
struct pci_root_info *info;
struct acpi_pci_root_ops *root_ops;
int domain = root->segment;
int busnum = root->secondary.start;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_warn("pci_bus %04x:%02x: ignored (out of memory)\n", domain, busnum);
return NULL;
}
root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
if (!root_ops) {
kfree(info);
return NULL;
}
info->cfg = pci_acpi_setup_ecam_mapping(root);
if (!info->cfg) {
kfree(info);
kfree(root_ops);
return NULL;
}
root_ops->release_info = acpi_release_root_info;
root_ops->prepare_resources = acpi_prepare_root_resources;
root_ops->pci_ops = (struct pci_ops *)&info->cfg->ops->pci_ops;
bus = pci_find_bus(domain, busnum);
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
} else {
struct pci_bus *child;
bus = acpi_pci_root_create(root, root_ops,
&info->common, info->cfg);
if (!bus) {
kfree(info);
kfree(root_ops);
return NULL;
}
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
return bus;
}
| linux-master | arch/loongarch/pci/acpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BPF JIT compiler for LoongArch
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include "bpf_jit.h"
#define REG_TCC LOONGARCH_GPR_A6
#define TCC_SAVED LOONGARCH_GPR_S5
#define SAVE_RA BIT(0)
#define SAVE_TCC BIT(1)
static const int regmap[] = {
/* return value from in-kernel function, and exit value for eBPF program */
[BPF_REG_0] = LOONGARCH_GPR_A5,
/* arguments from eBPF program to in-kernel function */
[BPF_REG_1] = LOONGARCH_GPR_A0,
[BPF_REG_2] = LOONGARCH_GPR_A1,
[BPF_REG_3] = LOONGARCH_GPR_A2,
[BPF_REG_4] = LOONGARCH_GPR_A3,
[BPF_REG_5] = LOONGARCH_GPR_A4,
/* callee saved registers that in-kernel function will preserve */
[BPF_REG_6] = LOONGARCH_GPR_S0,
[BPF_REG_7] = LOONGARCH_GPR_S1,
[BPF_REG_8] = LOONGARCH_GPR_S2,
[BPF_REG_9] = LOONGARCH_GPR_S3,
/* read-only frame pointer to access stack */
[BPF_REG_FP] = LOONGARCH_GPR_S4,
/* temporary register for blinding constants */
[BPF_REG_AX] = LOONGARCH_GPR_T0,
};
static void mark_call(struct jit_ctx *ctx)
{
ctx->flags |= SAVE_RA;
}
static void mark_tail_call(struct jit_ctx *ctx)
{
ctx->flags |= SAVE_TCC;
}
static bool seen_call(struct jit_ctx *ctx)
{
return (ctx->flags & SAVE_RA);
}
static bool seen_tail_call(struct jit_ctx *ctx)
{
return (ctx->flags & SAVE_TCC);
}
static u8 tail_call_reg(struct jit_ctx *ctx)
{
if (seen_call(ctx))
return TCC_SAVED;
return REG_TCC;
}
/*
* eBPF prog stack layout:
*
* high
* original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
* | $ra |
* +-------------------------+
* | $fp |
* +-------------------------+
* | $s0 |
* +-------------------------+
* | $s1 |
* +-------------------------+
* | $s2 |
* +-------------------------+
* | $s3 |
* +-------------------------+
* | $s4 |
* +-------------------------+
* | $s5 |
* +-------------------------+ <--BPF_REG_FP
* | prog->aux->stack_depth |
* | (optional) |
* current $sp -------------> +-------------------------+
* low
*/
static void build_prologue(struct jit_ctx *ctx)
{
int stack_adjust = 0, store_offset, bpf_stack_adjust;
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
/* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
stack_adjust += sizeof(long) * 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
/*
* First instruction initializes the tail call count (TCC).
* On tail call we skip this instruction, and the TCC is
* passed in REG_TCC from the caller.
*/
emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
store_offset = stack_adjust - sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
store_offset -= sizeof(long);
emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
if (bpf_stack_adjust)
emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
/*
* Program contains calls and tail calls, so REG_TCC need
* to be saved across calls.
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
ctx->stack_size = stack_adjust;
}
static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
{
int stack_adjust = ctx->stack_size;
int load_offset;
load_offset = stack_adjust - sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
load_offset -= sizeof(long);
emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
if (!is_tail_call) {
/* Set return value */
move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
/* Return to the caller */
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
} else {
/*
* Call the next bpf prog and skip the first instruction
* of TCC initialization.
*/
emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
}
}
static void build_epilogue(struct jit_ctx *ctx)
{
__build_epilogue(ctx, false);
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
/* initialized on the first pass of build_body() */
static int out_offset = -1;
static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
int off;
u8 tcc = tail_call_reg(ctx);
u8 a1 = LOONGARCH_GPR_A1;
u8 a2 = LOONGARCH_GPR_A2;
u8 t1 = LOONGARCH_GPR_T1;
u8 t2 = LOONGARCH_GPR_T2;
u8 t3 = LOONGARCH_GPR_T3;
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
#define jmp_offset (out_offset - (cur_offset))
/*
* a0: &ctx
* a1: &array
* a2: index
*
* if (index >= array->map.max_entries)
* goto out;
*/
off = offsetof(struct bpf_array, map.max_entries);
emit_insn(ctx, ldwu, t1, a1, off);
/* bgeu $a2, $t1, jmp_offset */
if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
goto toofar;
/*
* if (--TCC < 0)
* goto out;
*/
emit_insn(ctx, addid, REG_TCC, tcc, -1);
if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
goto toofar;
/*
* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
emit_insn(ctx, alsld, t2, a2, a1, 2);
off = offsetof(struct bpf_array, ptrs);
emit_insn(ctx, ldd, t2, t2, off);
/* beq $t2, $zero, jmp_offset */
if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
goto toofar;
/* goto *(prog->bpf_func + 4); */
off = offsetof(struct bpf_prog, bpf_func);
emit_insn(ctx, ldd, t3, t2, off);
__build_epilogue(ctx, true);
/* out: */
if (out_offset == -1)
out_offset = cur_offset;
if (cur_offset != out_offset) {
pr_err_once("tail_call out_offset = %d, expected %d!\n",
cur_offset, out_offset);
return -1;
}
return 0;
toofar:
pr_info_once("tail_call: jump too far\n");
return -1;
#undef cur_offset
#undef jmp_offset
}
static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 t1 = LOONGARCH_GPR_T1;
const u8 t2 = LOONGARCH_GPR_T2;
const u8 t3 = LOONGARCH_GPR_T3;
const u8 r0 = regmap[BPF_REG_0];
const u8 src = regmap[insn->src_reg];
const u8 dst = regmap[insn->dst_reg];
const s16 off = insn->off;
const s32 imm = insn->imm;
const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
move_imm(ctx, t1, off, false);
emit_insn(ctx, addd, t1, dst, t1);
move_reg(ctx, t3, src);
switch (imm) {
/* lock *(size *)(dst + off) <op>= src */
case BPF_ADD:
if (isdw)
emit_insn(ctx, amaddd, t2, t1, src);
else
emit_insn(ctx, amaddw, t2, t1, src);
break;
case BPF_AND:
if (isdw)
emit_insn(ctx, amandd, t2, t1, src);
else
emit_insn(ctx, amandw, t2, t1, src);
break;
case BPF_OR:
if (isdw)
emit_insn(ctx, amord, t2, t1, src);
else
emit_insn(ctx, amorw, t2, t1, src);
break;
case BPF_XOR:
if (isdw)
emit_insn(ctx, amxord, t2, t1, src);
else
emit_insn(ctx, amxorw, t2, t1, src);
break;
/* src = atomic_fetch_<op>(dst + off, src) */
case BPF_ADD | BPF_FETCH:
if (isdw) {
emit_insn(ctx, amaddd, src, t1, t3);
} else {
emit_insn(ctx, amaddw, src, t1, t3);
emit_zext_32(ctx, src, true);
}
break;
case BPF_AND | BPF_FETCH:
if (isdw) {
emit_insn(ctx, amandd, src, t1, t3);
} else {
emit_insn(ctx, amandw, src, t1, t3);
emit_zext_32(ctx, src, true);
}
break;
case BPF_OR | BPF_FETCH:
if (isdw) {
emit_insn(ctx, amord, src, t1, t3);
} else {
emit_insn(ctx, amorw, src, t1, t3);
emit_zext_32(ctx, src, true);
}
break;
case BPF_XOR | BPF_FETCH:
if (isdw) {
emit_insn(ctx, amxord, src, t1, t3);
} else {
emit_insn(ctx, amxorw, src, t1, t3);
emit_zext_32(ctx, src, true);
}
break;
/* src = atomic_xchg(dst + off, src); */
case BPF_XCHG:
if (isdw) {
emit_insn(ctx, amswapd, src, t1, t3);
} else {
emit_insn(ctx, amswapw, src, t1, t3);
emit_zext_32(ctx, src, true);
}
break;
/* r0 = atomic_cmpxchg(dst + off, r0, src); */
case BPF_CMPXCHG:
move_reg(ctx, t2, r0);
if (isdw) {
emit_insn(ctx, lld, r0, t1, 0);
emit_insn(ctx, bne, t2, r0, 4);
move_reg(ctx, t3, src);
emit_insn(ctx, scd, t3, t1, 0);
emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
} else {
emit_insn(ctx, llw, r0, t1, 0);
emit_zext_32(ctx, t2, true);
emit_zext_32(ctx, r0, true);
emit_insn(ctx, bne, t2, r0, 4);
move_reg(ctx, t3, src);
emit_insn(ctx, scw, t3, t1, 0);
emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
emit_zext_32(ctx, r0, true);
}
break;
}
}
static bool is_signed_bpf_cond(u8 cond)
{
return cond == BPF_JSGT || cond == BPF_JSLT ||
cond == BPF_JSGE || cond == BPF_JSLE;
}
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
regs->regs[dst_reg] = 0;
regs->csr_era = (unsigned long)&ex->fixup - offset;
return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
static int add_exception_handler(const struct bpf_insn *insn,
struct jit_ctx *ctx,
int dst_reg)
{
unsigned long pc;
off_t offset;
struct exception_table_entry *ex;
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
return 0;
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->num_exentries];
pc = (unsigned long)&ctx->image[ctx->idx - 1];
offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
* to keep things simple, and put the destination register in the upper
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
*/
offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
return -ERANGE;
ex->type = EX_TYPE_BPF;
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ctx->num_exentries++;
return 0;
}
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
{
u8 tm = -1;
u64 func_addr;
bool func_addr_fixed;
int i = insn - ctx->prog->insnsi;
int ret, jmp_offset;
const u8 code = insn->code;
const u8 cond = BPF_OP(code);
const u8 t1 = LOONGARCH_GPR_T1;
const u8 t2 = LOONGARCH_GPR_T2;
const u8 src = regmap[insn->src_reg];
const u8 dst = regmap[insn->dst_reg];
const s16 off = insn->off;
const s32 imm = insn->imm;
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
move_reg(ctx, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K:
move_imm(ctx, dst, imm, is32);
break;
/* dst = dst + src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit_insn(ctx, addd, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst + imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (is_signed_imm12(imm)) {
emit_insn(ctx, addid, dst, dst, imm);
} else {
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, addd, dst, dst, t1);
}
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst - src */
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
emit_insn(ctx, subd, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst - imm */
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
if (is_signed_imm12(-imm)) {
emit_insn(ctx, addid, dst, dst, -imm);
} else {
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, subd, dst, dst, t1);
}
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst * src */
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit_insn(ctx, muld, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst * imm */
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, muld, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst / src */
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst / imm */
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst % src */
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst % imm */
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
break;
/* dst = -dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst & src */
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit_insn(ctx, and, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst & imm */
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
if (is_unsigned_imm12(imm)) {
emit_insn(ctx, andi, dst, dst, imm);
} else {
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, and, dst, dst, t1);
}
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst | src */
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit_insn(ctx, or, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst | imm */
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
if (is_unsigned_imm12(imm)) {
emit_insn(ctx, ori, dst, dst, imm);
} else {
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, or, dst, dst, t1);
}
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst ^ src */
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit_insn(ctx, xor, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst ^ imm */
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
if (is_unsigned_imm12(imm)) {
emit_insn(ctx, xori, dst, dst, imm);
} else {
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, xor, dst, dst, t1);
}
emit_zext_32(ctx, dst, is32);
break;
/* dst = dst << src (logical) */
case BPF_ALU | BPF_LSH | BPF_X:
emit_insn(ctx, sllw, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_LSH | BPF_X:
emit_insn(ctx, slld, dst, dst, src);
break;
/* dst = dst << imm (logical) */
case BPF_ALU | BPF_LSH | BPF_K:
emit_insn(ctx, slliw, dst, dst, imm);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_LSH | BPF_K:
emit_insn(ctx, sllid, dst, dst, imm);
break;
/* dst = dst >> src (logical) */
case BPF_ALU | BPF_RSH | BPF_X:
emit_insn(ctx, srlw, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_RSH | BPF_X:
emit_insn(ctx, srld, dst, dst, src);
break;
/* dst = dst >> imm (logical) */
case BPF_ALU | BPF_RSH | BPF_K:
emit_insn(ctx, srliw, dst, dst, imm);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_RSH | BPF_K:
emit_insn(ctx, srlid, dst, dst, imm);
break;
/* dst = dst >> src (arithmetic) */
case BPF_ALU | BPF_ARSH | BPF_X:
emit_insn(ctx, sraw, dst, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit_insn(ctx, srad, dst, dst, src);
break;
/* dst = dst >> imm (arithmetic) */
case BPF_ALU | BPF_ARSH | BPF_K:
emit_insn(ctx, sraiw, dst, dst, imm);
emit_zext_32(ctx, dst, is32);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit_insn(ctx, sraid, dst, dst, imm);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
break;
case 32:
/* zero-extend 32 bits into 64 bits */
emit_zext_32(ctx, dst, is32);
break;
case 64:
/* do nothing */
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
case 16:
emit_insn(ctx, revb2h, dst, dst);
/* zero-extend 16 bits into 64 bits */
emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
break;
case 32:
emit_insn(ctx, revb2w, dst, dst);
/* zero-extend 32 bits into 64 bits */
emit_zext_32(ctx, dst, is32);
break;
case 64:
emit_insn(ctx, revbd, dst, dst);
break;
}
break;
/* PC += off if dst cond src */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
jmp_offset = bpf2la_offset(i, off, ctx);
move_reg(ctx, t1, dst);
move_reg(ctx, t2, src);
if (is_signed_bpf_cond(BPF_OP(code))) {
emit_sext_32(ctx, t1, is32);
emit_sext_32(ctx, t2, is32);
} else {
emit_zext_32(ctx, t1, is32);
emit_zext_32(ctx, t2, is32);
}
if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
goto toofar;
break;
/* PC += off if dst cond imm */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
jmp_offset = bpf2la_offset(i, off, ctx);
if (imm) {
move_imm(ctx, t1, imm, false);
tm = t1;
} else {
/* If imm is 0, simply use zero register. */
tm = LOONGARCH_GPR_ZERO;
}
move_reg(ctx, t2, dst);
if (is_signed_bpf_cond(BPF_OP(code))) {
emit_sext_32(ctx, tm, is32);
emit_sext_32(ctx, t2, is32);
} else {
emit_zext_32(ctx, tm, is32);
emit_zext_32(ctx, t2, is32);
}
if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
goto toofar;
break;
/* PC += off if dst & src */
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
jmp_offset = bpf2la_offset(i, off, ctx);
emit_insn(ctx, and, t1, dst, src);
emit_zext_32(ctx, t1, is32);
if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
goto toofar;
break;
/* PC += off if dst & imm */
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
jmp_offset = bpf2la_offset(i, off, ctx);
move_imm(ctx, t1, imm, is32);
emit_insn(ctx, and, t1, dst, t1);
emit_zext_32(ctx, t1, is32);
if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
goto toofar;
break;
/* PC += off */
case BPF_JMP | BPF_JA:
jmp_offset = bpf2la_offset(i, off, ctx);
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
goto toofar;
break;
/* function call */
case BPF_JMP | BPF_CALL:
mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
break;
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
mark_tail_call(ctx);
if (emit_bpf_tail_call(ctx) < 0)
return -EINVAL;
break;
/* function return */
case BPF_JMP | BPF_EXIT:
emit_sext_32(ctx, regmap[BPF_REG_0], true);
if (i == ctx->prog->len - 1)
break;
jmp_offset = epilogue_offset(ctx);
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
goto toofar;
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
move_imm(ctx, dst, imm64, is32);
return 1;
/* dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldbu, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxbu, dst, src, t1);
}
break;
case BPF_H:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldhu, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxhu, dst, src, t1);
}
break;
case BPF_W:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldwu, dst, src, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, ldptrw, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxwu, dst, src, t1);
}
break;
case BPF_DW:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldd, dst, src, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, ldptrd, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxd, dst, src, t1);
}
break;
}
ret = add_exception_handler(insn, ctx, dst);
if (ret)
return ret;
break;
/* *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
switch (BPF_SIZE(code)) {
case BPF_B:
move_imm(ctx, t1, imm, is32);
if (is_signed_imm12(off)) {
emit_insn(ctx, stb, t1, dst, off);
} else {
move_imm(ctx, t2, off, is32);
emit_insn(ctx, stxb, t1, dst, t2);
}
break;
case BPF_H:
move_imm(ctx, t1, imm, is32);
if (is_signed_imm12(off)) {
emit_insn(ctx, sth, t1, dst, off);
} else {
move_imm(ctx, t2, off, is32);
emit_insn(ctx, stxh, t1, dst, t2);
}
break;
case BPF_W:
move_imm(ctx, t1, imm, is32);
if (is_signed_imm12(off)) {
emit_insn(ctx, stw, t1, dst, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, stptrw, t1, dst, off);
} else {
move_imm(ctx, t2, off, is32);
emit_insn(ctx, stxw, t1, dst, t2);
}
break;
case BPF_DW:
move_imm(ctx, t1, imm, is32);
if (is_signed_imm12(off)) {
emit_insn(ctx, std, t1, dst, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, stptrd, t1, dst, off);
} else {
move_imm(ctx, t2, off, is32);
emit_insn(ctx, stxd, t1, dst, t2);
}
break;
}
break;
/* *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_signed_imm12(off)) {
emit_insn(ctx, stb, src, dst, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, stxb, src, dst, t1);
}
break;
case BPF_H:
if (is_signed_imm12(off)) {
emit_insn(ctx, sth, src, dst, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, stxh, src, dst, t1);
}
break;
case BPF_W:
if (is_signed_imm12(off)) {
emit_insn(ctx, stw, src, dst, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, stptrw, src, dst, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, stxw, src, dst, t1);
}
break;
case BPF_DW:
if (is_signed_imm12(off)) {
emit_insn(ctx, std, src, dst, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, stptrd, src, dst, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, stxd, src, dst, t1);
}
break;
}
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
emit_atomic(insn, ctx);
break;
/* Speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
default:
pr_err("bpf_jit: unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
toofar:
pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
return -E2BIG;
}
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
int i;
const struct bpf_prog *prog = ctx->prog;
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
continue;
}
if (ret)
return ret;
}
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
return 0;
}
/* Fill space with break instructions */
static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
/* We are guaranteed to have aligned memory */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = INSN_BREAK;
}
static int validate_code(struct jit_ctx *ctx)
{
int i;
union loongarch_instruction insn;
for (i = 0; i < ctx->idx; i++) {
insn = ctx->image[i];
/* Check INSN_BREAK */
if (insn.word == INSN_BREAK)
return -1;
}
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
return -1;
return 0;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
bool tmp_blinded = false, extra_pass = false;
u8 *image_ptr;
int image_size, prog_size, extable_size;
struct jit_ctx ctx;
struct jit_data *jit_data;
struct bpf_binary_header *header;
struct bpf_prog *tmp, *orig_prog = prog;
/*
* If BPF JIT was not enabled then we must fall back to
* the interpreter.
*/
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
/*
* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter. Otherwise, we save
* the new JITed code.
*/
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
image_ptr = jit_data->image;
header = jit_data->header;
extra_pass = true;
prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx;
}
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_offset;
}
/* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
build_prologue(&ctx);
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_offset;
}
ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx);
extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
/* Now we know the actual image size.
* As each LoongArch instruction is of length 32bit,
* we are translating number of JITed intructions into
* the size required to store these JITed code.
*/
prog_size = sizeof(u32) * ctx.idx;
image_size = prog_size + extable_size;
/* Now we know the size of the structure to make */
header = bpf_jit_binary_alloc(image_size, &image_ptr,
sizeof(u32), jit_fill_hole);
if (header == NULL) {
prog = orig_prog;
goto out_offset;
}
/* 2. Now, the actual pass to generate final JIT code */
ctx.image = (union loongarch_instruction *)image_ptr;
if (extable_size)
prog->aux->extable = (void *)image_ptr + prog_size;
skip_init_ctx:
ctx.idx = 0;
ctx.num_exentries = 0;
build_prologue(&ctx);
if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
}
build_epilogue(&ctx);
/* 3. Extra pass to validate JITed code */
if (validate_code(&ctx)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_offset;
}
/* And we're done */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
/* Update the icache */
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_offset;
}
bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
jit_data->header = header;
}
prog->jited = 1;
prog->jited_len = prog_size;
prog->bpf_func = (void *)ctx.image;
if (!prog->is_func || extra_pass) {
int i;
/* offset[prog->len] is the size of program */
for (i = 0; i <= prog->len; i++)
ctx.offset[i] *= LOONGARCH_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_offset:
kvfree(ctx.offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
out_offset = -1;
return prog;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
| linux-master | arch/loongarch/net/bpf_jit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fast user context implementation of getcpu()
*/
#include <asm/vdso.h>
#include <linux/getcpu.h>
static __always_inline int read_cpu_id(void)
{
int cpu_id;
__asm__ __volatile__(
" rdtime.d $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
return cpu_id;
}
static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
{
return (struct vdso_pcpu_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START * PAGE_SIZE);
}
extern
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
if (node) {
data = get_pcpu_data();
*node = data[cpu_id].node;
}
return 0;
}
| linux-master | arch/loongarch/vdso/vgetcpu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LoongArch userspace implementations of gettimeofday() and similar.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/types.h>
extern
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
extern
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
extern
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
{
return __cvdso_clock_getres(clock_id, res);
}
| linux-master | arch/loongarch/vdso/vgettimeofday.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019-2020 Arm Ltd.
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kernel.h>
#include <net/checksum.h>
static u64 accumulate(u64 sum, u64 data)
{
sum += data;
if (sum < data)
sum += 1;
return sum;
}
/*
* We over-read the buffer and this makes KASAN unhappy. Instead, disable
* instrumentation and call kasan explicitly.
*/
unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
{
unsigned int offset, shift, sum;
const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len == 0))
return 0;
offset = (unsigned long)buff & 7;
/*
* This is to all intents and purposes safe, since rounding down cannot
* result in a different page or cache line being accessed, and @buff
* should absolutely not be pointing to anything read-sensitive. We do,
* however, have to be careful not to piss off KASAN, which means using
* unchecked reads to accommodate the head and tail, for which we'll
* compensate with an explicit check up-front.
*/
kasan_check_read(buff, len);
ptr = (u64 *)(buff - offset);
len = len + offset - 8;
/*
* Head: zero out any excess leading bytes. Shifting back by the same
* amount should be at least as fast as any other way of handling the
* odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
data = *ptr++;
data = (data >> shift) << shift;
/*
* Body: straightforward aligned loads from here on (the paired loads
* underlying the quadword type still only need dword alignment). The
* main loop strictly excludes the tail, so the second loop will always
* run at least once.
*/
while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
tmp1 = *(__uint128_t *)ptr;
tmp2 = *(__uint128_t *)(ptr + 2);
tmp3 = *(__uint128_t *)(ptr + 4);
tmp4 = *(__uint128_t *)(ptr + 6);
len -= 64;
ptr += 8;
/* This is the "don't dump the carry flag into a GPR" idiom */
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp2 += (tmp2 >> 64) | (tmp2 << 64);
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
tmp4 += (tmp4 >> 64) | (tmp4 << 64);
tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp1 = ((tmp1 >> 64) << 64) | sum64;
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
sum64 = tmp1 >> 64;
}
while (len > 8) {
__uint128_t tmp;
sum64 = accumulate(sum64, data);
tmp = *(__uint128_t *)ptr;
len -= 16;
ptr += 2;
data = tmp >> 64;
sum64 = accumulate(sum64, tmp);
}
if (len > 0) {
sum64 = accumulate(sum64, data);
data = *ptr;
len -= 8;
}
/*
* Tail: zero any over-read bytes similarly to the head, again
* preserving odd/even alignment.
*/
shift = len * -8;
data = (data << shift) >> shift;
sum64 = accumulate(sum64, data);
/* Finally, folding */
sum64 += (sum64 >> 32) | (sum64 << 32);
sum = sum64 >> 32;
sum += (sum >> 16) | (sum << 16);
if (offset & 1)
return (u16)swab32(sum);
return sum >> 16;
}
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum csum)
{
__uint128_t src, dst;
u64 sum = (__force u64)csum;
src = *(const __uint128_t *)saddr->s6_addr;
dst = *(const __uint128_t *)daddr->s6_addr;
sum += (__force u32)htonl(len);
sum += (u32)proto << 24;
src += (src >> 64) | (src << 64);
dst += (dst >> 64) | (dst << 64);
sum = accumulate(sum, src >> 64);
sum = accumulate(sum, dst >> 64);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force __wsum)(sum >> 32));
}
EXPORT_SYMBOL(csum_ipv6_magic);
| linux-master | arch/loongarch/lib/csum.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LoongArch SIMD XOR operations
*
* Copyright (C) 2023 WANG Xuerui <[email protected]>
*/
#include "xor_simd.h"
/*
* Process one cache line (64 bytes) per loop. This is assuming all future
* popular LoongArch cores are similar performance-characteristics-wise to the
* current models.
*/
#define LINE_WIDTH 64
#ifdef CONFIG_CPU_HAS_LSX
#define LD(reg, base, offset) \
"vld $vr" #reg ", %[" #base "], " #offset "\n\t"
#define ST(reg, base, offset) \
"vst $vr" #reg ", %[" #base "], " #offset "\n\t"
#define XOR(dj, k) "vxor.v $vr" #dj ", $vr" #dj ", $vr" #k "\n\t"
#define LD_INOUT_LINE(base) \
LD(0, base, 0) \
LD(1, base, 16) \
LD(2, base, 32) \
LD(3, base, 48)
#define LD_AND_XOR_LINE(base) \
LD(4, base, 0) \
LD(5, base, 16) \
LD(6, base, 32) \
LD(7, base, 48) \
XOR(0, 4) \
XOR(1, 5) \
XOR(2, 6) \
XOR(3, 7)
#define ST_LINE(base) \
ST(0, base, 0) \
ST(1, base, 16) \
ST(2, base, 32) \
ST(3, base, 48)
#define XOR_FUNC_NAME(nr) __xor_lsx_##nr
#include "xor_template.c"
#undef LD
#undef ST
#undef XOR
#undef LD_INOUT_LINE
#undef LD_AND_XOR_LINE
#undef ST_LINE
#undef XOR_FUNC_NAME
#endif /* CONFIG_CPU_HAS_LSX */
#ifdef CONFIG_CPU_HAS_LASX
#define LD(reg, base, offset) \
"xvld $xr" #reg ", %[" #base "], " #offset "\n\t"
#define ST(reg, base, offset) \
"xvst $xr" #reg ", %[" #base "], " #offset "\n\t"
#define XOR(dj, k) "xvxor.v $xr" #dj ", $xr" #dj ", $xr" #k "\n\t"
#define LD_INOUT_LINE(base) \
LD(0, base, 0) \
LD(1, base, 32)
#define LD_AND_XOR_LINE(base) \
LD(2, base, 0) \
LD(3, base, 32) \
XOR(0, 2) \
XOR(1, 3)
#define ST_LINE(base) \
ST(0, base, 0) \
ST(1, base, 32)
#define XOR_FUNC_NAME(nr) __xor_lasx_##nr
#include "xor_template.c"
#undef LD
#undef ST
#undef XOR
#undef LD_INOUT_LINE
#undef LD_AND_XOR_LINE
#undef ST_LINE
#undef XOR_FUNC_NAME
#endif /* CONFIG_CPU_HAS_LASX */
| linux-master | arch/loongarch/lib/xor_simd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/timex.h>
#include <asm/processor.h>
void __delay(unsigned long cycles)
{
u64 t0 = get_cycles();
while ((unsigned long)(get_cycles() - t0) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
/*
* Division by multiplication: you don't have to worry about
* loss of precision.
*
* Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
void __udelay(unsigned long us)
{
__delay((us * 0x000010c7ull * HZ * lpj_fine) >> 32);
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long ns)
{
__delay((ns * 0x00000005ull * HZ * lpj_fine) >> 32);
}
EXPORT_SYMBOL(__ndelay);
| linux-master | arch/loongarch/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2023 WANG Xuerui <[email protected]>
*
* Template for XOR operations, instantiated in xor_simd.c.
*
* Expected preprocessor definitions:
*
* - LINE_WIDTH
* - XOR_FUNC_NAME(nr)
* - LD_INOUT_LINE(buf)
* - LD_AND_XOR_LINE(buf)
* - ST_LINE(buf)
*/
void XOR_FUNC_NAME(2)(unsigned long bytes,
unsigned long * __restrict v1,
const unsigned long * __restrict v2)
{
unsigned long lines = bytes / LINE_WIDTH;
do {
__asm__ __volatile__ (
LD_INOUT_LINE(v1)
LD_AND_XOR_LINE(v2)
ST_LINE(v1)
: : [v1] "r"(v1), [v2] "r"(v2) : "memory"
);
v1 += LINE_WIDTH / sizeof(unsigned long);
v2 += LINE_WIDTH / sizeof(unsigned long);
} while (--lines > 0);
}
void XOR_FUNC_NAME(3)(unsigned long bytes,
unsigned long * __restrict v1,
const unsigned long * __restrict v2,
const unsigned long * __restrict v3)
{
unsigned long lines = bytes / LINE_WIDTH;
do {
__asm__ __volatile__ (
LD_INOUT_LINE(v1)
LD_AND_XOR_LINE(v2)
LD_AND_XOR_LINE(v3)
ST_LINE(v1)
: : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3) : "memory"
);
v1 += LINE_WIDTH / sizeof(unsigned long);
v2 += LINE_WIDTH / sizeof(unsigned long);
v3 += LINE_WIDTH / sizeof(unsigned long);
} while (--lines > 0);
}
void XOR_FUNC_NAME(4)(unsigned long bytes,
unsigned long * __restrict v1,
const unsigned long * __restrict v2,
const unsigned long * __restrict v3,
const unsigned long * __restrict v4)
{
unsigned long lines = bytes / LINE_WIDTH;
do {
__asm__ __volatile__ (
LD_INOUT_LINE(v1)
LD_AND_XOR_LINE(v2)
LD_AND_XOR_LINE(v3)
LD_AND_XOR_LINE(v4)
ST_LINE(v1)
: : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4)
: "memory"
);
v1 += LINE_WIDTH / sizeof(unsigned long);
v2 += LINE_WIDTH / sizeof(unsigned long);
v3 += LINE_WIDTH / sizeof(unsigned long);
v4 += LINE_WIDTH / sizeof(unsigned long);
} while (--lines > 0);
}
void XOR_FUNC_NAME(5)(unsigned long bytes,
unsigned long * __restrict v1,
const unsigned long * __restrict v2,
const unsigned long * __restrict v3,
const unsigned long * __restrict v4,
const unsigned long * __restrict v5)
{
unsigned long lines = bytes / LINE_WIDTH;
do {
__asm__ __volatile__ (
LD_INOUT_LINE(v1)
LD_AND_XOR_LINE(v2)
LD_AND_XOR_LINE(v3)
LD_AND_XOR_LINE(v4)
LD_AND_XOR_LINE(v5)
ST_LINE(v1)
: : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4),
[v5] "r"(v5) : "memory"
);
v1 += LINE_WIDTH / sizeof(unsigned long);
v2 += LINE_WIDTH / sizeof(unsigned long);
v3 += LINE_WIDTH / sizeof(unsigned long);
v4 += LINE_WIDTH / sizeof(unsigned long);
v5 += LINE_WIDTH / sizeof(unsigned long);
} while (--lines > 0);
}
| linux-master | arch/loongarch/lib/xor_template.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LoongArch SIMD XOR operations
*
* Copyright (C) 2023 WANG Xuerui <[email protected]>
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/fpu.h>
#include <asm/xor_simd.h>
#include "xor_simd.h"
#define MAKE_XOR_GLUE_2(flavor) \
void xor_##flavor##_2(unsigned long bytes, unsigned long * __restrict p1, \
const unsigned long * __restrict p2) \
{ \
kernel_fpu_begin(); \
__xor_##flavor##_2(bytes, p1, p2); \
kernel_fpu_end(); \
} \
EXPORT_SYMBOL_GPL(xor_##flavor##_2)
#define MAKE_XOR_GLUE_3(flavor) \
void xor_##flavor##_3(unsigned long bytes, unsigned long * __restrict p1, \
const unsigned long * __restrict p2, \
const unsigned long * __restrict p3) \
{ \
kernel_fpu_begin(); \
__xor_##flavor##_3(bytes, p1, p2, p3); \
kernel_fpu_end(); \
} \
EXPORT_SYMBOL_GPL(xor_##flavor##_3)
#define MAKE_XOR_GLUE_4(flavor) \
void xor_##flavor##_4(unsigned long bytes, unsigned long * __restrict p1, \
const unsigned long * __restrict p2, \
const unsigned long * __restrict p3, \
const unsigned long * __restrict p4) \
{ \
kernel_fpu_begin(); \
__xor_##flavor##_4(bytes, p1, p2, p3, p4); \
kernel_fpu_end(); \
} \
EXPORT_SYMBOL_GPL(xor_##flavor##_4)
#define MAKE_XOR_GLUE_5(flavor) \
void xor_##flavor##_5(unsigned long bytes, unsigned long * __restrict p1, \
const unsigned long * __restrict p2, \
const unsigned long * __restrict p3, \
const unsigned long * __restrict p4, \
const unsigned long * __restrict p5) \
{ \
kernel_fpu_begin(); \
__xor_##flavor##_5(bytes, p1, p2, p3, p4, p5); \
kernel_fpu_end(); \
} \
EXPORT_SYMBOL_GPL(xor_##flavor##_5)
#define MAKE_XOR_GLUES(flavor) \
MAKE_XOR_GLUE_2(flavor); \
MAKE_XOR_GLUE_3(flavor); \
MAKE_XOR_GLUE_4(flavor); \
MAKE_XOR_GLUE_5(flavor)
#ifdef CONFIG_CPU_HAS_LSX
MAKE_XOR_GLUES(lsx);
#endif
#ifdef CONFIG_CPU_HAS_LASX
MAKE_XOR_GLUES(lasx);
#endif
| linux-master | arch/loongarch/lib/xor_simd_glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
* Copyright (C) 1999 by Silicon Graphics, Inc.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
void dump_tlb_regs(void)
{
const int field = 2 * sizeof(unsigned long);
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
{
unsigned long s_entryhi, entryhi, asid;
unsigned long long entrylo0, entrylo1, pa;
unsigned int index;
unsigned int s_index, s_asid;
unsigned int pagesize, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(¤t_cpu_data);
int pwidth = 16;
int vwidth = 16;
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
s_entryhi = read_csr_entryhi();
s_index = read_csr_tlbidx();
s_asid = read_csr_asid();
for (i = first; i <= last; i++) {
write_csr_index(i);
tlb_read();
pagesize = read_csr_pagesize();
entryhi = read_csr_entryhi();
entrylo0 = read_csr_entrylo0();
entrylo1 = read_csr_entrylo1();
index = read_csr_tlbidx();
asid = read_csr_asid();
/* EHINV bit marks entire entry as invalid */
if (index & CSR_TLBIDX_EHINV)
continue;
/*
* ASID takes effect in absence of G (global) bit.
*/
if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
asid != s_asid)
continue;
/*
* Only print entries in use
*/
pr_info("Index: %4d pgsize=0x%x ", i, (1 << pagesize));
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
pr_cont("va=0x%0*lx asid=0x%0*lx",
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
(entrylo0 & ENTRYLO_G) ? 1 : 0,
(entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,
(entrylo1 & ENTRYLO_G) ? 1 : 0,
(entrylo1 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
}
pr_info("\n");
write_csr_entryhi(s_entryhi);
write_csr_tlbidx(s_index);
write_csr_asid(s_asid);
}
void dump_tlb_all(void)
{
dump_tlb(0, current_cpu_data.tlbsize - 1);
}
| linux-master | arch/loongarch/lib/dump_tlb.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
instruction_pointer_set(regs, regs->regs[1]);
}
NOKPROBE_SYMBOL(override_function_with_return);
| linux-master | arch/loongarch/lib/error-inject.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
pud = pud_alloc(mm, p4d, addr);
if (pud)
pte = (pte_t *)pmd_alloc(mm, pud, addr);
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
p4d = p4d_offset(pgd, addr);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, addr);
if (pud_present(*pud))
pmd = pmd_offset(pud, addr);
}
}
return (pte_t *) pmd;
}
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
}
int pud_huge(pud_t pud)
{
return (pud_val(pud) & _PAGE_HUGE) != 0;
}
uint64_t pmd_to_entrylo(unsigned long pmd_val)
{
uint64_t val;
/* PMD as PTE. Must be huge page */
if (!pmd_huge(__pmd(pmd_val)))
panic("%s", __func__);
val = pmd_val ^ _PAGE_HUGE;
val |= ((val & _PAGE_HGLOBAL) >>
(_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
return val;
}
| linux-master | arch/loongarch/mm/hugetlbpage.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/kasan.h>
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm-generic/sections.h>
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
#ifdef __PAGETABLE_PUD_FOLDED
#define __p4d_none(early, p4d) (0)
#else
#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
#endif
#ifdef __PAGETABLE_PMD_FOLDED
#define __pud_none(early, pud) (0)
#else
#define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
(__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
#endif
#define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
(__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
#define __pte_none(early, pte) (early ? pte_none(pte) : \
((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
bool kasan_early_stage = true;
void *kasan_mem_to_shadow(const void *addr)
{
if (!kasan_arch_is_ready()) {
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
}
const void *kasan_shadow_to_mem(const void *shadow_addr)
{
unsigned long addr = (unsigned long)shadow_addr;
if (unlikely(addr > KASAN_SHADOW_END) ||
unlikely(addr < KASAN_SHADOW_START)) {
WARN_ON(1);
return NULL;
}
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
else {
WARN_ON(1);
return NULL;
}
}
/*
* Alloc memory for shadow memory page table.
*/
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
return __pa(p);
}
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
{
if (__pmd_none(early, READ_ONCE(*pmdp))) {
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early)
memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
}
return pte_offset_kernel(pmdp, addr);
}
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
{
if (__pud_none(early, READ_ONCE(*pudp))) {
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
if (!early)
memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
}
return pmd_offset(pudp, addr);
}
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
{
if (__p4d_none(early, READ_ONCE(*p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
if (!early)
memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
}
return pud_offset(p4dp, addr);
}
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
do {
phys_addr_t page_phys = early ?
__pa_symbol(kasan_early_shadow_page)
: kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
}
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
do {
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early);
} while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
}
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
do {
next = pud_addr_end(addr, end);
kasan_pmd_populate(pudp, addr, next, node, early);
} while (pudp++, addr = next, addr != end);
}
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
p4d_t *p4dp = p4d_offset(pgdp, addr);
do {
next = p4d_addr_end(addr, end);
kasan_pud_populate(p4dp, addr, next, node, early);
} while (p4dp++, addr = next, addr != end);
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
int node, bool early)
{
unsigned long next;
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
kasan_p4d_populate(pgdp, addr, next, node, early);
} while (pgdp++, addr = next, addr != end);
}
/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
static void __init kasan_map_populate(unsigned long start, unsigned long end,
int node)
{
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
}
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
}
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
{
WRITE_ONCE(*pgdp, pgdval);
}
static void __init clear_pgds(unsigned long start, unsigned long end)
{
/*
* Remove references to kasan page tables from
* swapper_pg_dir. pgd_clear() can't be used
* here because it's nop on 2,3-level pagetable setups
*/
for (; start < end; start += PGDIR_SIZE)
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
}
void __init kasan_init(void)
{
u64 i;
phys_addr_t pa_start, pa_end;
/*
* PGD was populated as invalid_pmd_table or invalid_pud_table
* in pagetable_init() which depends on how many levels of page
* table you are using, but we had to clean the gpd of kasan
* shadow memory, as the pgd value is none-zero.
* The assertion pgd_none is going to be false and the formal populate
* afterwards is not going to create any new pgd at all.
*/
memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
local_flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
/* Maps everything to a single page of zeroes */
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
kasan_early_stage = false;
/* Populate the linear mapping */
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)phys_to_virt(pa_start);
void *end = (void *)phys_to_virt(pa_end);
if (start >= end)
break;
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
}
/* Populate modules mapping */
kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
/*
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
* should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
local_flush_tlb_all();
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized.\n");
}
| linux-master | arch/loongarch/mm/kasan_init.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/uaccess.h>
#include <linux/kernel.h>
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
/* highest bit set means kernel space */
return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
}
| linux-master | arch/loongarch/mm/maccess.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mman.h>
#define SHM_ALIGN_MASK (SHMLBA - 1)
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
+ (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
/*
* There is no need to export this but sched.h declares the function as
* extern so making it static here results in an error.
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
int __virt_addr_valid(volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
return 0;
return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
/*
* Check whether addr is covered by a memory region without the
* MEMBLOCK_NOMAP attribute, and whether that region covers the
* entire range. In theory, this could lead to false negatives
* if the range is covered by distinct but adjacent memory regions
* that only differ in other attributes. However, few of such
* attributes have been defined, and it is debatable whether it
* follows that /dev/mem read() calls should be able traverse
* such boundaries.
*/
return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
}
/*
* Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
}
| linux-master | arch/loongarch/mm/mmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/memblock.h>
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <linux/mmzone.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to);
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
int __ref page_is_ram(unsigned long pfn)
{
unsigned long addr = PFN_PHYS(pfn);
return memblock_is_memory(addr) && !memblock_is_reserved(addr);
}
#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
}
void __init mem_init(void)
{
max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
}
#endif /* !CONFIG_NUMA */
void __ref free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
ret = __add_pages(nid, start_pfn, nr_pages, params);
if (ret)
pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
return ret;
}
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn);
/* With altmap the first mapped page is offset from @start */
if (altmap)
page += vmem_altmap_offset(altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
return pa_to_nid(start);
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
unsigned long addr, unsigned long next)
{
pmd_t entry;
entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
set_pmd_at(&init_mm, addr, pmd, entry);
}
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
int huge = pmd_val(*pmd) & _PAGE_HUGE;
if (huge)
vmemmap_verify((pte_t *)pmd, node, addr, next);
return huge;
}
int __meminit vmemmap_populate(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap)
{
#if CONFIG_PGTABLE_LEVELS == 2
return vmemmap_populate_basepages(start, end, node, NULL);
#else
return vmemmap_populate_hugepages(start, end, node, NULL);
#endif
}
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
{
}
#endif
#endif
pte_t * __init populate_kernel_pte(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud;
pmd_t *pmd;
if (p4d_none(*p4d)) {
pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pud)
panic("%s: Failed to allocate memory\n", __func__);
p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(pud);
#endif
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pmd)
panic("%s: Failed to allocate memory\n", __func__);
pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(pmd);
#endif
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
pte_t *pte;
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate memory\n", __func__);
pmd_populate_kernel(&init_mm, pmd, pte);
}
return pte_offset_kernel(pmd, addr);
}
void __init __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = populate_kernel_pte(addr);
if (!pte_none(*ptep)) {
pte_ERROR(*ptep);
return;
}
if (pgprot_val(flags))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
else {
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
}
/*
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
| linux-master | arch/loongarch/mm/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/branch.h>
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static inline void regs_set_gpr(struct pt_regs *regs,
unsigned int offset, unsigned long val)
{
if (offset && offset <= MAX_REG_OFFSET)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->csr_era = get_ex_fixup(ex);
return true;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->csr_era = get_ex_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
ex = search_exception_tables(exception_era(regs));
if (!ex)
return false;
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
}
BUG();
}
| linux-master | arch/loongarch/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/io.h>
#include <asm-generic/early_ioremap.h>
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
{
return ((void __iomem *)TO_CACHE(phys_addr));
}
void __init early_iounmap(void __iomem *addr, unsigned long size)
{
}
void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
{
return early_memremap(phys_addr, size);
}
void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return early_memremap(phys_addr, size);
}
| linux-master | arch/loongarch/mm/ioremap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
struct page *dmw_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(virt_to_pfn(kaddr));
}
EXPORT_SYMBOL_GPL(dmw_virt_to_page);
struct page *tlb_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
}
EXPORT_SYMBOL_GPL(tlb_virt_to_page);
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *init, *ret = NULL;
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
if (ptdesc) {
ret = (pgd_t *)ptdesc_address(ptdesc);
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
EXPORT_SYMBOL_GPL(pgd_alloc);
void pgd_init(void *addr)
{
unsigned long *p, *end;
unsigned long entry;
#if !defined(__PAGETABLE_PUD_FOLDED)
entry = (unsigned long)invalid_pud_table;
#elif !defined(__PAGETABLE_PMD_FOLDED)
entry = (unsigned long)invalid_pmd_table;
#else
entry = (unsigned long)invalid_pte_table;
#endif
p = (unsigned long *)addr;
end = p + PTRS_PER_PGD;
do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
p += 8;
p[-3] = entry;
p[-2] = entry;
p[-1] = entry;
} while (p != end);
}
EXPORT_SYMBOL_GPL(pgd_init);
#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(void *addr)
{
unsigned long *p, *end;
unsigned long pagetable = (unsigned long)invalid_pte_table;
p = (unsigned long *)addr;
end = p + PTRS_PER_PMD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
EXPORT_SYMBOL_GPL(pmd_init);
#endif
#ifndef __PAGETABLE_PUD_FOLDED
void pud_init(void *addr)
{
unsigned long *p, *end;
unsigned long pagetable = (unsigned long)invalid_pmd_table;
p = (unsigned long *)addr;
end = p + PTRS_PER_PUD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
EXPORT_SYMBOL_GPL(pud_init);
#endif
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
flush_tlb_all();
}
void __init pagetable_init(void)
{
/* Initialize the entire pgd. */
pgd_init(swapper_pg_dir);
pgd_init(invalid_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(invalid_pmd_table);
#endif
}
| linux-master | arch/loongarch/mm/pgtable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected])
* Copyright (C) 2007 MIPS Technologies, Inc.
*/
#include <linux/cacheinfo.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/loongarch.h>
#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/setup.h>
void cache_error_setup(void)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
/*
* LoongArch maintains ICache/DCache coherency by hardware,
* we just need "ibar" to avoid instruction hazard here.
*/
void local_flush_icache_range(unsigned long start, unsigned long end)
{
asm volatile ("\tibar 0\n"::);
}
EXPORT_SYMBOL(local_flush_icache_range);
static void flush_cache_leaf(unsigned int leaf)
{
int i, j, nr_nodes;
uint64_t addr = CSR_DMW0_BASE;
struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes;
do {
for (i = 0; i < cdesc->sets; i++) {
for (j = 0; j < cdesc->ways; j++) {
flush_cache_line(leaf, addr);
addr++;
}
addr -= cdesc->ways;
addr += cdesc->linesz;
}
addr += (1ULL << NODE_ADDRSPACE_SHIFT);
} while (--nr_nodes > 0);
}
asmlinkage __visible void __flush_cache_all(void)
{
int leaf;
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
flush_cache_leaf(leaf);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
flush_cache_leaf(leaf);
}
#define L1IUPRE (1 << 0)
#define L1IUUNIFY (1 << 1)
#define L1DPRE (1 << 2)
#define LXIUPRE (1 << 0)
#define LXIUUNIFY (1 << 1)
#define LXIUPRIV (1 << 2)
#define LXIUINCL (1 << 3)
#define LXDPRE (1 << 4)
#define LXDPRIV (1 << 5)
#define LXDINCL (1 << 6)
#define populate_cache_properties(cfg0, cdesc, level, leaf) \
do { \
unsigned int cfg1; \
\
cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
if (level == 1) { \
cdesc->flags |= CACHE_PRIVATE; \
} else { \
if (cfg0 & LXIUPRIV) \
cdesc->flags |= CACHE_PRIVATE; \
if (cfg0 & LXIUINCL) \
cdesc->flags |= CACHE_INCLUSIVE; \
} \
cdesc->level = level; \
cdesc->flags |= CACHE_PRESENT; \
cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
cdesc++; leaf++; \
} while (0)
void cpu_cache_init(void)
{
unsigned int leaf = 0, level = 1;
unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
if (config & L1IUPRE) {
if (config & L1IUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
populate_cache_properties(config, cdesc, level, leaf);
}
if (config & L1DPRE) {
cdesc->type = CACHE_TYPE_DATA;
populate_cache_properties(config, cdesc, level, leaf);
}
config = config >> 3;
for (level = 2; level <= CACHE_LEVEL_MAX; level++) {
if (!config)
break;
if (config & LXIUPRE) {
if (config & LXIUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
populate_cache_properties(config, cdesc, level, leaf);
}
if (config & LXDPRE) {
cdesc->type = CACHE_TYPE_DATA;
populate_cache_properties(config, cdesc, level, leaf);
}
config = config >> 7;
}
BUG_ON(leaf > CACHE_LEAVES_MAX);
current_cpu_data.cache_leaves_present = leaf;
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
_PAGE_PROTNONE | _PAGE_NO_EXEC |
_PAGE_NO_READ),
[VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC),
[VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC),
[VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC),
[VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
_PAGE_PROTNONE | _PAGE_NO_EXEC |
_PAGE_NO_READ),
[VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC),
[VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC | _PAGE_WRITE),
[VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_NO_EXEC | _PAGE_WRITE),
[VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT),
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_WRITE),
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
_PAGE_USER | _PAGE_PRESENT |
_PAGE_WRITE)
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/loongarch/mm/cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/ratelimit.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/branch.h>
#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
int show_unhandled_signals = 1;
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
if (kfence_handle_page_fault(address, write, regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, era == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->csr_era,
field, regs->regs[1]);
die("Oops", regs);
}
static void __kprobes do_out_of_memory(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
no_context(regs, write, address);
return;
}
pagefault_out_of_memory();
}
static void __kprobes do_sigbus(struct pt_regs *regs,
unsigned long write, unsigned long address, int si_code)
{
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, write, address);
return;
}
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
current->thread.csr_badvaddr = address;
current->thread.trap_nr = read_csr_excode();
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
}
static void __kprobes do_sigsegv(struct pt_regs *regs,
unsigned long write, unsigned long address, int si_code)
{
const int field = sizeof(unsigned long) * 2;
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, write, address);
return;
}
/* User mode accesses just cause a SIGSEGV */
current->thread.csr_badvaddr = address;
if (!write)
current->thread.error_code = 1;
else
current->thread.error_code = 2;
current->thread.trap_nr = read_csr_excode();
if (show_unhandled_signals &&
unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) {
pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
current->comm,
write ? "write access to" : "read access from",
field, address);
pr_info("era = %0*lx in", field,
(unsigned long) regs->csr_era);
print_vma_addr(KERN_CONT " ", regs->csr_era);
pr_cont("\n");
pr_info("ra = %0*lx in", field,
(unsigned long) regs->regs[1]);
print_vma_addr(KERN_CONT " ", regs->regs[1]);
pr_cont("\n");
}
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
static void __kprobes __do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
int si_code = SEGV_MAPERR;
unsigned int flags = FAULT_FLAG_DEFAULT;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
struct vm_area_struct *vma = NULL;
vm_fault_t fault;
if (kprobe_page_fault(regs, current->thread.trap_nr))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (address & __UA_LIMIT) {
if (!user_mode(regs))
no_context(regs, write, address);
else
do_sigsegv(regs, write, address, si_code);
return;
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (faulthandler_disabled() || !mm) {
do_sigsegv(regs, write, address, si_code);
return;
}
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
goto bad_area_nosemaphore;
goto good_area;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
do_sigsegv(regs, write, address, si_code);
return;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
if (write) {
flags |= FAULT_FLAG_WRITE;
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, write, address);
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
/*
* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
if (unlikely(fault & VM_FAULT_ERROR)) {
mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, write, address);
return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_sigsegv(regs, write, address, si_code);
return;
} else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
do_sigbus(regs, write, address, si_code);
return;
}
BUG();
}
mmap_read_unlock(mm);
}
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
irqentry_state_t state = irqentry_enter(regs);
/* Enable interrupt if enabled in parent context */
if (likely(regs->csr_prmd & CSR_PRMD_PIE))
local_irq_enable();
__do_page_fault(regs, write, address);
local_irq_disable();
irqentry_exit(regs, state);
}
| linux-master | arch/loongarch/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
void local_flush_tlb_all(void)
{
invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
}
EXPORT_SYMBOL(local_flush_tlb_all);
void local_flush_tlb_user(void)
{
invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
}
EXPORT_SYMBOL(local_flush_tlb_user);
void local_flush_tlb_kernel(void)
{
invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
}
EXPORT_SYMBOL(local_flush_tlb_kernel);
/*
* All entries common to a mm share an asid. To effectively flush
* these entries, we just bump the asid.
*/
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu;
preempt_disable();
cpu = smp_processor_id();
if (asid_valid(mm, cpu))
drop_mmu_context(mm, cpu);
else
cpumask_clear_cpu(cpu, mm_cpumask(mm));
preempt_enable();
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (asid_valid(mm, cpu)) {
unsigned long size, flags;
local_irq_save(flags);
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
if (size <= (current_cpu_data.tlbsizestlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
int asid = cpu_asid(cpu, mm);
while (start < end) {
invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
start += (PAGE_SIZE << 1);
}
} else {
drop_mmu_context(mm, cpu);
}
local_irq_restore(flags);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(mm));
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= (current_cpu_data.tlbsizestlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
while (start < end) {
invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
start += (PAGE_SIZE << 1);
}
} else {
local_flush_tlb_kernel();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
if (asid_valid(vma->vm_mm, cpu)) {
int newpid;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
page &= (PAGE_MASK << 1);
invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
}
static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
#ifdef CONFIG_HUGETLB_PAGE
int idx;
unsigned long lo;
unsigned long flags;
local_irq_save(flags);
address &= (PAGE_MASK << 1);
write_csr_entryhi(address);
tlb_probe();
idx = read_csr_tlbidx();
write_csr_pagesize(PS_HUGE_SIZE);
lo = pmd_to_entrylo(pte_val(*ptep));
write_csr_entrylo0(lo);
write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
write_csr_pagesize(PS_DEFAULT_SIZE);
local_irq_restore(flags);
#endif
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
int idx;
unsigned long flags;
if (cpu_has_ptw)
return;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
if (pte_val(*ptep) & _PAGE_HUGE) {
__update_hugetlb(vma, address, ptep);
return;
}
local_irq_save(flags);
if ((unsigned long)ptep & sizeof(pte_t))
ptep--;
address &= (PAGE_MASK << 1);
write_csr_entryhi(address);
tlb_probe();
idx = read_csr_tlbidx();
write_csr_pagesize(PS_DEFAULT_SIZE);
write_csr_entrylo0(pte_val(*ptep++));
write_csr_entrylo1(pte_val(*ptep));
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
local_irq_restore(flags);
}
static void setup_ptwalker(void)
{
unsigned long pwctl0, pwctl1;
unsigned long pgd_i = 0, pgd_w = 0;
unsigned long pud_i = 0, pud_w = 0;
unsigned long pmd_i = 0, pmd_w = 0;
unsigned long pte_i = 0, pte_w = 0;
pgd_i = PGDIR_SHIFT;
pgd_w = PAGE_SHIFT - 3;
#if CONFIG_PGTABLE_LEVELS > 3
pud_i = PUD_SHIFT;
pud_w = PAGE_SHIFT - 3;
#endif
#if CONFIG_PGTABLE_LEVELS > 2
pmd_i = PMD_SHIFT;
pmd_w = PAGE_SHIFT - 3;
#endif
pte_i = PAGE_SHIFT;
pte_w = PAGE_SHIFT - 3;
pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
pwctl1 = pgd_i | pgd_w << 6;
if (cpu_has_ptw)
pwctl1 |= CSR_PWCTL1_PTW;
csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
}
static void output_pgtable_bits_defines(void)
{
#define pr_define(fmt, ...) \
pr_debug("#define " fmt, ##__VA_ARGS__)
pr_debug("#include <asm/asm.h>\n");
pr_debug("#include <asm/regdef.h>\n");
pr_debug("\n");
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
#ifdef CONFIG_NUMA
unsigned long pcpu_handlers[NR_CPUS];
#endif
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
static void setup_tlb_handler(int cpu)
{
setup_ptwalker();
local_flush_tlb_all();
/* The tlb handlers are generated only once */
if (cpu == 0) {
memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
if (!cpu_has_ptw) {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
} else {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
}
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
}
#ifdef CONFIG_NUMA
else {
void *addr;
struct page *page;
const int vec_sz = sizeof(exception_handlers);
if (pcpu_handlers[cpu])
return;
page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
if (!page)
return;
addr = page_address(page);
pcpu_handlers[cpu] = (unsigned long)addr;
memcpy((void *)addr, (void *)eentry, vec_sz);
local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
}
#endif
}
void tlb_init(int cpu)
{
write_csr_pagesize(PS_DEFAULT_SIZE);
write_csr_stlbpgsize(PS_DEFAULT_SIZE);
write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
setup_tlb_handler(cpu);
output_pgtable_bits_defines();
}
| linux-master | arch/loongarch/mm/tlb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <asm/loongarch.h>
#include <asm/syscall.h>
#include <asm-generic/sections.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* As `call _mcount` follows LoongArch psABI, ra-saved operation and
* stack operation can be found before this insn.
*/
static int ftrace_get_parent_ra_addr(unsigned long insn_addr, int *ra_off)
{
int limit = 32;
union loongarch_instruction *insn;
insn = (union loongarch_instruction *)insn_addr;
do {
insn--;
limit--;
if (is_ra_save_ins(insn))
*ra_off = -((1 << 12) - insn->reg2i12_format.immediate);
} while (!is_stack_alloc_ins(insn) && limit);
if (!limit)
return -EINVAL;
return 0;
}
void prepare_ftrace_return(unsigned long self_addr,
unsigned long callsite_sp, unsigned long old)
{
int ra_off;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
if (ftrace_get_parent_ra_addr(self_addr, &ra_off))
goto out;
if (!function_graph_enter(old, self_addr, 0, NULL))
*(unsigned long *)(callsite_sp + ra_off) = return_hooker;
return;
out:
ftrace_graph_stop();
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
| linux-master | arch/loongarch/kernel/ftrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on arch/arm64/kernel/ftrace.c
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <asm/inst.h>
#include <asm/module.h>
static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
{
u32 replaced;
if (validate) {
if (larch_insn_read((void *)pc, &replaced))
return -EFAULT;
if (replaced != old)
return -EINVAL;
}
if (larch_insn_patch_text((void *)pc, new))
return -EPERM;
return 0;
}
#ifdef CONFIG_MODULES
static bool reachable_by_bl(unsigned long addr, unsigned long pc)
{
long offset = (long)addr - (long)pc;
return offset >= -SZ_128M && offset < SZ_128M;
}
static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
{
struct plt_entry *plt = mod->arch.ftrace_trampolines;
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
if (addr == FTRACE_REGS_ADDR &&
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
return NULL;
}
/*
* Find the address the callsite must branch to in order to reach '*addr'.
*
* Due to the limited range of 'bl' instruction, modules may be placed too far
* away to branch directly and we must use a PLT.
*
* Returns true when '*addr' contains a reachable target address, or has been
* modified to contain a PLT address. Returns false otherwise.
*/
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
{
unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
struct plt_entry *plt;
/*
* If a custom trampoline is unreachable, rely on the ftrace_regs_caller
* trampoline which knows how to indirectly reach that trampoline through
* ops->direct_call.
*/
if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
*addr = FTRACE_REGS_ADDR;
/*
* When the target is within range of the 'bl' instruction, use 'addr'
* as-is and branch to that directly.
*/
if (reachable_by_bl(*addr, pc))
return true;
/*
* 'mod' is only set at module load time, but if we end up
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*
* NOTE: __module_text_address() must be called with preemption
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code.
*/
if (!mod) {
preempt_disable();
mod = __module_text_address(pc);
preempt_enable();
}
if (WARN_ON(!mod))
return false;
plt = get_ftrace_plt(mod, *addr);
if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return false;
}
*addr = (unsigned long)plt;
return true;
}
#else /* !CONFIG_MODULES */
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
{
return true;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL;
new = larch_insn_gen_bl(pc, addr);
old = larch_insn_gen_bl(pc, old_addr);
return ftrace_modify_code(pc, old, new, true);
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
int ftrace_update_ftrace_func(ftrace_func_t func)
{
u32 new;
unsigned long pc;
pc = (unsigned long)&ftrace_call;
new = larch_insn_gen_bl(pc, (unsigned long)func);
return ftrace_modify_code(pc, 0, new, false);
}
/*
* The compiler has inserted 2 NOPs before the regular function prologue.
* T series registers are available and safe because of LoongArch's psABI.
*
* At runtime, we can replace nop with bl to enable ftrace call and replace bl
* with nop to disable ftrace call. The bl requires us to save the original RA
* value, so it saves RA at t0 here.
*
* Details are:
*
* | Compiled | Disabled | Enabled |
* +------------+------------------------+------------------------+
* | nop | move t0, ra | move t0, ra |
* | nop | nop | bl ftrace_caller |
* | func_body | func_body | func_body |
*
* The RA value will be recovered by ftrace_regs_entry, and restored into RA
* before returning to the regular function prologue. When a function is not
* being traced, the "move t0, ra" is not harmful.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
u32 old, new;
unsigned long pc;
pc = rec->ip;
old = larch_insn_gen_nop();
new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
old = larch_insn_gen_nop();
new = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
new = larch_insn_gen_nop();
old = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
void arch_ftrace_update_code(int command)
{
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
{
unsigned long old;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, parent))
*parent = return_hooker;
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct pt_regs *regs = &fregs->regs;
unsigned long *parent = (unsigned long *)®s->regs[1];
prepare_ftrace_return(ip, (unsigned long *)parent);
}
#else
static int ftrace_modify_graph_caller(bool enable)
{
u32 branch, nop;
unsigned long pc, func;
extern void ftrace_graph_call(void);
pc = (unsigned long)&ftrace_graph_call;
func = (unsigned long)&ftrace_graph_caller;
nop = larch_insn_gen_nop();
branch = larch_insn_gen_b(pc, func);
if (enable)
return ftrace_modify_code(pc, nop, branch, true);
else
return ftrace_modify_code(pc, branch, nop, true);
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KPROBES_ON_FTRACE
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
int bit;
struct pt_regs *regs;
struct kprobe *p;
struct kprobe_ctlblk *kcb;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
regs = ftrace_get_regs(fregs);
if (!regs)
goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
unsigned long orig_ip = instruction_pointer(regs);
instruction_pointer_set(regs, ip);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->csr_era)
* as if there is a nop
*/
instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
instruction_pointer_set(regs, orig_ip);
}
/*
* If pre_handler returns !0, it changes regs->csr_era. We have to
* skip emulating post_handler.
*/
__this_cpu_write(current_kprobe, NULL);
}
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif /* CONFIG_KPROBES_ON_FTRACE */
| linux-master | arch/loongarch/kernel/ftrace_dyn.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/cpumask.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <asm/inst.h>
#include <asm/loongson.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/unwind.h>
extern const int unwind_hint_ade;
extern const int unwind_hint_ale;
extern const int unwind_hint_bp;
extern const int unwind_hint_fpe;
extern const int unwind_hint_fpu;
extern const int unwind_hint_lsx;
extern const int unwind_hint_lasx;
extern const int unwind_hint_lbt;
extern const int unwind_hint_ri;
extern const int unwind_hint_watch;
extern unsigned long eentry;
#ifdef CONFIG_NUMA
extern unsigned long pcpu_handlers[NR_CPUS];
#endif
static inline bool scan_handlers(unsigned long entry_offset)
{
int idx, offset;
if (entry_offset >= EXCCODE_INT_START * VECSIZE)
return false;
idx = entry_offset / VECSIZE;
offset = entry_offset % VECSIZE;
switch (idx) {
case EXCCODE_ADE:
return offset == unwind_hint_ade;
case EXCCODE_ALE:
return offset == unwind_hint_ale;
case EXCCODE_BP:
return offset == unwind_hint_bp;
case EXCCODE_FPE:
return offset == unwind_hint_fpe;
case EXCCODE_FPDIS:
return offset == unwind_hint_fpu;
case EXCCODE_LSXDIS:
return offset == unwind_hint_lsx;
case EXCCODE_LASXDIS:
return offset == unwind_hint_lasx;
case EXCCODE_BTDIS:
return offset == unwind_hint_lbt;
case EXCCODE_INE:
return offset == unwind_hint_ri;
case EXCCODE_WATCH:
return offset == unwind_hint_watch;
default:
return false;
}
}
static inline bool fix_exception(unsigned long pc)
{
#ifdef CONFIG_NUMA
int cpu;
for_each_possible_cpu(cpu) {
if (!pcpu_handlers[cpu])
continue;
if (scan_handlers(pc - pcpu_handlers[cpu]))
return true;
}
#endif
return scan_handlers(pc - eentry);
}
/*
* As we meet ftrace_regs_entry, reset first flag like first doing
* tracing. Prologue analysis will stop soon because PC is at entry.
*/
static inline bool fix_ftrace(unsigned long pc)
{
#ifdef CONFIG_DYNAMIC_FTRACE
return pc == (unsigned long)ftrace_call + LOONGARCH_INSN_SIZE;
#else
return false;
#endif
}
static inline bool unwind_state_fixup(struct unwind_state *state)
{
if (!fix_exception(state->pc) && !fix_ftrace(state->pc))
return false;
state->reset = true;
return true;
}
/*
* LoongArch function prologue is like follows,
* [instructions not use stack var]
* addi.d sp, sp, -imm
* st.d xx, sp, offset <- save callee saved regs and
* st.d yy, sp, offset save ra if function is nest.
* [others instructions]
*/
static bool unwind_by_prologue(struct unwind_state *state)
{
long frame_ra = -1;
unsigned long frame_size = 0;
unsigned long size, offset, pc;
struct pt_regs *regs;
struct stack_info *info = &state->stack_info;
union loongarch_instruction *ip, *ip_end;
if (state->sp >= info->end || state->sp < info->begin)
return false;
if (state->reset) {
regs = (struct pt_regs *)state->sp;
state->first = true;
state->reset = false;
state->pc = regs->csr_era;
state->ra = regs->regs[1];
state->sp = regs->regs[3];
return true;
}
/*
* When first is not set, the PC is a return address in the previous frame.
* We need to adjust its value in case overflow to the next symbol.
*/
pc = state->pc - (state->first ? 0 : LOONGARCH_INSN_SIZE);
if (!kallsyms_lookup_size_offset(pc, &size, &offset))
return false;
ip = (union loongarch_instruction *)(pc - offset);
ip_end = (union loongarch_instruction *)pc;
while (ip < ip_end) {
if (is_stack_alloc_ins(ip)) {
frame_size = (1 << 12) - ip->reg2i12_format.immediate;
ip++;
break;
}
ip++;
}
/*
* Can't find stack alloc action, PC may be in a leaf function. Only the
* first being true is reasonable, otherwise indicate analysis is broken.
*/
if (!frame_size) {
if (state->first)
goto first;
return false;
}
while (ip < ip_end) {
if (is_ra_save_ins(ip)) {
frame_ra = ip->reg2i12_format.immediate;
break;
}
if (is_branch_ins(ip))
break;
ip++;
}
/* Can't find save $ra action, PC may be in a leaf function, too. */
if (frame_ra < 0) {
if (state->first) {
state->sp = state->sp + frame_size;
goto first;
}
return false;
}
state->pc = *(unsigned long *)(state->sp + frame_ra);
state->sp = state->sp + frame_size;
goto out;
first:
state->pc = state->ra;
out:
state->first = false;
return unwind_state_fixup(state) || __kernel_text_address(state->pc);
}
static bool next_frame(struct unwind_state *state)
{
unsigned long pc;
struct pt_regs *regs;
struct stack_info *info = &state->stack_info;
if (unwind_done(state))
return false;
do {
if (unwind_by_prologue(state)) {
state->pc = unwind_graph_addr(state, state->pc, state->sp);
return true;
}
if (info->type == STACK_TYPE_IRQ && info->end == state->sp) {
regs = (struct pt_regs *)info->next_sp;
pc = regs->csr_era;
if (user_mode(regs) || !__kernel_text_address(pc))
goto out;
state->first = true;
state->pc = pc;
state->ra = regs->regs[1];
state->sp = regs->regs[3];
get_stack_info(state->sp, state->task, info);
return true;
}
state->sp = info->next_sp;
} while (!get_stack_info(state->sp, state->task, info));
out:
state->error = true;
return false;
}
unsigned long unwind_get_return_address(struct unwind_state *state)
{
return __unwind_get_return_address(state);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
__unwind_start(state, task, regs);
state->type = UNWINDER_PROLOGUE;
state->first = true;
/*
* The current PC is not kernel text address, we cannot find its
* relative symbol. Thus, prologue analysis will be broken. Luckily,
* we can use the default_next_frame().
*/
if (!__kernel_text_address(state->pc)) {
state->type = UNWINDER_GUESS;
if (!unwind_done(state))
unwind_next_frame(state);
}
}
EXPORT_SYMBOL_GPL(unwind_start);
bool unwind_next_frame(struct unwind_state *state)
{
return state->type == UNWINDER_PROLOGUE ?
next_frame(state) : default_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
| linux-master | arch/loongarch/kernel/unwind_prologue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
* Copyright (C) 2005, 2006 by Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/nmi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/loongarch.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/*
* Idle related variables and functions
*/
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
unsigned long crmd;
unsigned long prmd;
unsigned long euen;
/* New thread loses kernel privileges. */
crmd = regs->csr_crmd & ~(PLV_MASK);
crmd |= PLV_USER;
regs->csr_crmd = crmd;
prmd = regs->csr_prmd & ~(PLV_MASK);
prmd |= PLV_USER;
regs->csr_prmd = prmd;
euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
regs->csr_euen = euen;
lose_fpu(0);
lose_lbt(0);
clear_thread_flag(TIF_LSX_CTX_LIVE);
clear_thread_flag(TIF_LASX_CTX_LIVE);
clear_thread_flag(TIF_LBT_CTX_LIVE);
clear_used_math();
regs->csr_era = pc;
regs->regs[3] = sp;
}
void flush_thread(void)
{
flush_ptrace_hw_breakpoint(current);
}
void exit_thread(struct task_struct *tsk)
{
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save any process state which is live in hardware registers to the
* parent context prior to duplication. This prevents the new child
* state becoming stale if the parent is preempted before copy_thread()
* gets a chance to save the parent's live hardware registers to the
* child context.
*/
preempt_disable();
if (is_fpu_owner()) {
if (is_lasx_enabled())
save_lasx(current);
else if (is_lsx_enabled())
save_lsx(current);
else
save_fp(current);
}
preempt_enable();
if (!used_math())
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
else
memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
#ifdef CONFIG_CPU_HAS_LBT
memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
#endif
return 0;
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long childksp;
unsigned long tls = args->tls;
unsigned long usp = args->stack;
unsigned long clone_flags = args->flags;
struct pt_regs *childregs, *regs = current_pt_regs();
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.sched_cfa = 0;
p->thread.csr_euen = 0;
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
if (unlikely(args->fn)) {
/* kernel thread */
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
childregs->csr_prmd = p->thread.csr_prmd;
childregs->csr_ecfg = p->thread.csr_ecfg;
goto out;
}
/* user thread */
*childregs = *regs;
childregs->regs[4] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
p->thread.reg01 = (unsigned long) ret_from_fork;
p->thread.sched_ra = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
childregs->csr_euen = 0;
if (clone_flags & CLONE_SETTLS)
childregs->regs[2] = tls;
out:
ptrace_hw_copy_thread(p);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDSIMD);
clear_tsk_thread_flag(p, TIF_USEDLBT);
clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE);
return 0;
}
unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
struct unwind_state state;
if (!try_get_task_stack(task))
return 0;
for (unwind_start(&state, task, NULL);
!unwind_done(&state); unwind_next_frame(&state)) {
pc = unwind_get_return_address(&state);
if (!pc)
break;
if (in_sched_functions(pc))
continue;
break;
}
put_task_stack(task);
return pc;
}
bool in_irq_stack(unsigned long stack, struct stack_info *info)
{
unsigned long nextsp;
unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
unsigned long end = begin + IRQ_STACK_START;
if (stack < begin || stack >= end)
return false;
nextsp = *(unsigned long *)end;
if (nextsp & (SZREG - 1))
return false;
info->begin = begin;
info->end = end;
info->next_sp = nextsp;
info->type = STACK_TYPE_IRQ;
return true;
}
bool in_task_stack(unsigned long stack, struct task_struct *task,
struct stack_info *info)
{
unsigned long begin = (unsigned long)task_stack_page(task);
unsigned long end = begin + THREAD_SIZE;
if (stack < begin || stack >= end)
return false;
info->begin = begin;
info->end = end;
info->next_sp = 0;
info->type = STACK_TYPE_TASK;
return true;
}
int get_stack_info(unsigned long stack, struct task_struct *task,
struct stack_info *info)
{
task = task ? : current;
if (!stack || stack & (SZREG - 1))
goto unknown;
if (in_task_stack(stack, task, info))
return 0;
if (task != current)
goto unknown;
if (in_irq_stack(stack, info))
return 0;
unknown:
info->type = STACK_TYPE_UNKNOWN;
return -EINVAL;
}
unsigned long stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
/* Space for the VDSO & data page */
top -= PAGE_ALIGN(current->thread.vdso->size);
top -= VVAR_SIZE;
/* Space to randomize the VDSO base */
if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
return top;
}
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & STACK_ALIGN;
}
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy;
static void handle_backtrace(void *info)
{
nmi_cpu_backtrace(get_irq_regs());
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
static void raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
/*
* If we previously sent an IPI to the target CPU & it hasn't
* cleared its bit in the busy cpumask then it didn't handle
* our previous IPI & it's not safe for us to reuse the
* call_single_data_t.
*/
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu);
continue;
}
csd = &per_cpu(backtrace_csd, cpu);
csd->func = handle_backtrace;
smp_call_function_single_async(cpu, csd);
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
#ifdef CONFIG_64BIT
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
}
uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
}
#endif /* CONFIG_64BIT */
| linux-master | arch/loongarch/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LoongArch specific sysrq operations.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/workqueue.h>
#include <asm/cpu-features.h>
#include <asm/tlb.h>
/*
* Dump TLB entries on all CPUs.
*/
static DEFINE_SPINLOCK(show_lock);
static void sysrq_tlbdump_single(void *dummy)
{
unsigned long flags;
spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
pr_info("\n");
spin_unlock_irqrestore(&show_lock, flags);
}
#ifdef CONFIG_SMP
static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
{
smp_call_function(sysrq_tlbdump_single, NULL, 0);
}
static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
#endif
static void sysrq_handle_tlbdump(u8 key)
{
sysrq_tlbdump_single(NULL);
#ifdef CONFIG_SMP
schedule_work(&sysrq_tlbdump);
#endif
}
static struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
static int __init loongarch_sysrq_init(void)
{
return register_sysrq_key('x', &sysrq_tlbdump_op);
}
arch_initcall(loongarch_sysrq_init);
| linux-master | arch/loongarch/kernel/sysrq.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.