python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/char/ds1620.c: Dallas Semiconductors DS1620 * thermometer driver (as used in the Rebel.com NetWinder) */ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/mutex.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <linux/uaccess.h> #include <asm/therm.h> #ifdef CONFIG_PROC_FS /* define for /proc interface */ #define THERM_USE_PROC #endif /* Definitions for DS1620 chip */ #define THERM_START_CONVERT 0xee #define THERM_RESET 0xaf #define THERM_READ_CONFIG 0xac #define THERM_READ_TEMP 0xaa #define THERM_READ_TL 0xa2 #define THERM_READ_TH 0xa1 #define THERM_WRITE_CONFIG 0x0c #define THERM_WRITE_TL 0x02 #define THERM_WRITE_TH 0x01 #define CFG_CPU 2 #define CFG_1SHOT 1 static DEFINE_MUTEX(ds1620_mutex); static const char *fan_state[] = { "off", "on", "on (hardwired)" }; /* * Start of NetWinder specifics * Note! We have to hold the gpio lock with IRQs disabled over the * whole of our transaction to the Dallas chip, since there is a * chance that the WaveArtist driver could touch these bits to * enable or disable the speaker. */ extern unsigned int system_rev; static inline void netwinder_ds1620_set_clk(int clk) { nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0); } static inline void netwinder_ds1620_set_data(int dat) { nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0); } static inline int netwinder_ds1620_get_data(void) { return nw_gpio_read() & GPIO_DATA; } static inline void netwinder_ds1620_set_data_dir(int dir) { nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0); } static inline void netwinder_ds1620_reset(void) { nw_cpld_modify(CPLD_DS_ENABLE, 0); nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE); } static inline void netwinder_lock(unsigned long *flags) { raw_spin_lock_irqsave(&nw_gpio_lock, *flags); } static inline void netwinder_unlock(unsigned long *flags) { raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags); } static inline void netwinder_set_fan(int i) { unsigned long flags; raw_spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); } static inline int netwinder_get_fan(void) { if ((system_rev & 0xf000) == 0x4000) return FAN_ALWAYS_ON; return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF; } /* * End of NetWinder specifics */ static void ds1620_send_bits(int nr, int value) { int i; for (i = 0; i < nr; i++) { netwinder_ds1620_set_data(value & 1); netwinder_ds1620_set_clk(0); udelay(1); netwinder_ds1620_set_clk(1); udelay(1); value >>= 1; } } static unsigned int ds1620_recv_bits(int nr) { unsigned int value = 0, mask = 1; int i; netwinder_ds1620_set_data(0); for (i = 0; i < nr; i++) { netwinder_ds1620_set_clk(0); udelay(1); if (netwinder_ds1620_get_data()) value |= mask; mask <<= 1; netwinder_ds1620_set_clk(1); udelay(1); } return value; } static void ds1620_out(int cmd, int bits, int value) { unsigned long flags; netwinder_lock(&flags); netwinder_ds1620_set_clk(1); netwinder_ds1620_set_data_dir(0); netwinder_ds1620_reset(); udelay(1); ds1620_send_bits(8, cmd); if (bits) ds1620_send_bits(bits, value); udelay(1); netwinder_ds1620_reset(); netwinder_unlock(&flags); msleep(20); } static unsigned int ds1620_in(int cmd, int bits) { unsigned long flags; unsigned int value; netwinder_lock(&flags); netwinder_ds1620_set_clk(1); netwinder_ds1620_set_data_dir(0); netwinder_ds1620_reset(); udelay(1); ds1620_send_bits(8, cmd); netwinder_ds1620_set_data_dir(1); value = ds1620_recv_bits(bits); netwinder_ds1620_reset(); netwinder_unlock(&flags); return value; } static int cvt_9_to_int(unsigned int val) { if (val & 0x100) val |= 0xfffffe00; return val; } static void ds1620_write_state(struct therm *therm) { ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU); ds1620_out(THERM_WRITE_TL, 9, therm->lo); ds1620_out(THERM_WRITE_TH, 9, therm->hi); ds1620_out(THERM_START_CONVERT, 0, 0); } static void ds1620_read_state(struct therm *therm) { therm->lo = cvt_9_to_int(ds1620_in(THERM_READ_TL, 9)); therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9)); } static int ds1620_open(struct inode *inode, struct file *file) { return stream_open(inode, file); } static ssize_t ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) { signed int cur_temp; signed char cur_temp_degF; cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1; /* convert to Fahrenheit, as per wdt.c */ cur_temp_degF = (cur_temp * 9) / 5 + 32; if (copy_to_user(buf, &cur_temp_degF, 1)) return -EFAULT; return 1; } static int ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct therm therm; union { struct therm __user *therm; int __user *i; } uarg; int i; uarg.i = (int __user *)arg; switch(cmd) { case CMD_SET_THERMOSTATE: case CMD_SET_THERMOSTATE2: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (cmd == CMD_SET_THERMOSTATE) { if (get_user(therm.hi, uarg.i)) return -EFAULT; therm.lo = therm.hi - 3; } else { if (copy_from_user(&therm, uarg.therm, sizeof(therm))) return -EFAULT; } therm.lo <<= 1; therm.hi <<= 1; ds1620_write_state(&therm); break; case CMD_GET_THERMOSTATE: case CMD_GET_THERMOSTATE2: ds1620_read_state(&therm); therm.lo >>= 1; therm.hi >>= 1; if (cmd == CMD_GET_THERMOSTATE) { if (put_user(therm.hi, uarg.i)) return -EFAULT; } else { if (copy_to_user(uarg.therm, &therm, sizeof(therm))) return -EFAULT; } break; case CMD_GET_TEMPERATURE: case CMD_GET_TEMPERATURE2: i = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); if (cmd == CMD_GET_TEMPERATURE) i >>= 1; return put_user(i, uarg.i) ? -EFAULT : 0; case CMD_GET_STATUS: i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3; return put_user(i, uarg.i) ? -EFAULT : 0; case CMD_GET_FAN: i = netwinder_get_fan(); return put_user(i, uarg.i) ? -EFAULT : 0; case CMD_SET_FAN: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(i, uarg.i)) return -EFAULT; netwinder_set_fan(i); break; default: return -ENOIOCTLCMD; } return 0; } static long ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ds1620_mutex); ret = ds1620_ioctl(file, cmd, arg); mutex_unlock(&ds1620_mutex); return ret; } #ifdef THERM_USE_PROC static int ds1620_proc_therm_show(struct seq_file *m, void *v) { struct therm th; int temp; ds1620_read_state(&th); temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); seq_printf(m, "Thermostat: HI %i.%i, LOW %i.%i; temperature: %i.%i C, fan %s\n", th.hi >> 1, th.hi & 1 ? 5 : 0, th.lo >> 1, th.lo & 1 ? 5 : 0, temp >> 1, temp & 1 ? 5 : 0, fan_state[netwinder_get_fan()]); return 0; } #endif static const struct file_operations ds1620_fops = { .owner = THIS_MODULE, .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { TEMP_MINOR, "temp", &ds1620_fops }; static int __init ds1620_init(void) { int ret; struct therm th, th_start; if (!machine_is_netwinder()) return -ENODEV; ds1620_out(THERM_RESET, 0, 0); ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU); ds1620_out(THERM_START_CONVERT, 0, 0); /* * Trigger the fan to start by setting * temperature high point low. This kicks * the fan into action. */ ds1620_read_state(&th); th_start.lo = 0; th_start.hi = 1; ds1620_write_state(&th_start); msleep(2000); ds1620_write_state(&th); ret = misc_register(&ds1620_miscdev); if (ret < 0) return ret; #ifdef THERM_USE_PROC if (!proc_create_single("therm", 0, NULL, ds1620_proc_therm_show)) printk(KERN_ERR "therm: unable to register /proc/therm\n"); #endif ds1620_read_state(&th); ret = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)); printk(KERN_INFO "Thermostat: high %i.%i, low %i.%i, " "current %i.%i C, fan %s.\n", th.hi >> 1, th.hi & 1 ? 5 : 0, th.lo >> 1, th.lo & 1 ? 5 : 0, ret >> 1, ret & 1 ? 5 : 0, fan_state[netwinder_get_fan()]); return 0; } static void __exit ds1620_exit(void) { #ifdef THERM_USE_PROC remove_proc_entry("therm", NULL); #endif misc_deregister(&ds1620_miscdev); } module_init(ds1620_init); module_exit(ds1620_exit); MODULE_LICENSE("GPL");
linux-master
drivers/char/ds1620.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/char/mem.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Added devfs support. * Jan-11-1998, C. Scott Ananian <[email protected]> * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <[email protected]> */ #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/random.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/device.h> #include <linux/highmem.h> #include <linux/backing-dev.h> #include <linux/shmem_fs.h> #include <linux/splice.h> #include <linux/pfn.h> #include <linux/export.h> #include <linux/io.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/security.h> #ifdef CONFIG_IA64 # include <linux/efi.h> #endif #define DEVMEM_MINOR 1 #define DEVPORT_MINOR 4 static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { unsigned long sz; sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); return min(sz, size); } #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) { return addr + count <= __pa(high_memory); } static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } #endif #ifdef CONFIG_STRICT_DEVMEM static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; while (cursor < to) { if (!devmem_is_allowed(pfn)) return 0; cursor += PAGE_SIZE; pfn++; } return 1; } #else static inline int page_is_allowed(unsigned long pfn) { return 1; } static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #endif static inline bool should_stop_iteration(void) { if (need_resched()) cond_resched(); return signal_pending(current); } /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. */ static ssize_t read_mem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; char *bounce; int err; if (p != *ppos) return 0; if (!valid_phys_addr_range(p, count)) return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } } #endif bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!bounce) return -ENOMEM; while (count > 0) { unsigned long remaining; int allowed, probe; sz = size_inside_page(p, count); err = -EPERM; allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) goto failed; err = -EFAULT; if (allowed == 2) { /* Show zeros for restricted memory. */ remaining = clear_user(buf, sz); } else { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) goto failed; probe = copy_from_kernel_nofault(bounce, ptr, sz); unxlate_dev_mem_ptr(p, ptr); if (probe) goto failed; remaining = copy_to_user(buf, bounce, sz); } if (remaining) goto failed; buf += sz; p += sz; count -= sz; read += sz; if (should_stop_iteration()) break; } kfree(bounce); *ppos += read; return read; failed: kfree(bounce); return err; } static ssize_t write_mem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { phys_addr_t p = *ppos; ssize_t written, sz; unsigned long copied; void *ptr; if (p != *ppos) return -EFBIG; if (!valid_phys_addr_range(p, count)) return -EFAULT; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { int allowed; sz = size_inside_page(p, count); allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) return -EPERM; /* Skip actual writing when a page is marked as restricted. */ if (allowed == 1) { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) { if (written) break; return -EFAULT; } copied = copy_from_user(ptr, buf, sz); unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } } buf += sz; p += sz; count -= sz; written += sz; if (should_stop_iteration()) break; } *ppos += written; return written; } int __weak phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { return 1; } #ifndef __HAVE_PHYS_MEM_ACCESS_PROT /* * Architectures vary in how they handle caching for addresses * outside of main memory. * */ #ifdef pgprot_noncached static int uncached_access(struct file *file, phys_addr_t addr) { #if defined(CONFIG_IA64) /* * On ia64, we ignore O_DSYNC because we cannot tolerate memory * attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #else /* * Accessing memory above the top the kernel knows about or through a * file pointer * that was marked O_DSYNC will be done non-cached. */ if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif } #endif static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { #ifdef pgprot_noncached phys_addr_t offset = pfn << PAGE_SHIFT; if (uncached_access(file, offset)) return pgprot_noncached(vma_prot); #endif return vma_prot; } #endif #ifndef CONFIG_MMU static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (!valid_mmap_phys_addr_range(pgoff, len)) return (unsigned long) -EINVAL; return pgoff << PAGE_SHIFT; } /* permit direct mmap, for read, write or exec */ static unsigned memory_mmap_capabilities(struct file *file) { return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; } static unsigned zero_mmap_capabilities(struct file *file) { return NOMMU_MAP_COPY; } /* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return is_nommu_shared_mapping(vma->vm_flags); } #else static inline int private_mapping_ok(struct vm_area_struct *vma) { return 1; } #endif static const struct vm_operations_struct mmap_mem_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif }; static int mmap_mem(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; /* Does it even fit in phys_addr_t? */ if (offset >> PAGE_SHIFT != vma->vm_pgoff) return -EINVAL; /* It's illegal to wrap around the end of the physical address space. */ if (offset + (phys_addr_t)size - 1 < offset) return -EINVAL; if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) return -EINVAL; if (!private_mapping_ok(vma)) return -ENOSYS; if (!range_is_allowed(vma->vm_pgoff, size)) return -EPERM; if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, &vma->vm_page_prot)) return -EINVAL; vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); vma->vm_ops = &mmap_mem_ops; /* Remap-pfn-range will mark the range VM_IO */ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) { return -EAGAIN; } return 0; } static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; char __user *tmp = buf; if (!access_ok(buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { if (__put_user(inb(i), tmp) < 0) return -EFAULT; i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t write_port(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; const char __user *tmp = buf; if (!access_ok(buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; if (__get_user(c, tmp)) { if (tmp > buf) break; return -EFAULT; } outb(c, i); i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t write_null(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) { return 0; } static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) { size_t count = iov_iter_count(from); iov_iter_advance(from, count); return count; } static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { return sd->len; } static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags) { return 0; } static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) { size_t written = 0; while (iov_iter_count(iter)) { size_t chunk = iov_iter_count(iter), n; if (chunk > PAGE_SIZE) chunk = PAGE_SIZE; /* Just for latency reasons */ n = iov_iter_zero(chunk, iter); if (!n && iov_iter_count(iter)) return written ? written : -EFAULT; written += n; if (signal_pending(current)) return written ? written : -ERESTARTSYS; if (!need_resched()) continue; if (iocb->ki_flags & IOCB_NOWAIT) return written ? written : -EAGAIN; cond_resched(); } return written; } static ssize_t read_zero(struct file *file, char __user *buf, size_t count, loff_t *ppos) { size_t cleared = 0; while (count) { size_t chunk = min_t(size_t, count, PAGE_SIZE); size_t left; left = clear_user(buf + cleared, chunk); if (unlikely(left)) { cleared += (chunk - left); if (!cleared) return -EFAULT; break; } cleared += chunk; count -= chunk; if (signal_pending(current)) break; cond_resched(); } return cleared; } static int mmap_zero(struct file *file, struct vm_area_struct *vma) { #ifndef CONFIG_MMU return -ENOSYS; #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); vma_set_anonymous(vma); return 0; } static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { #ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge; * and pass NULL for file as in mmap.c's get_unmapped_area(), * so as not to confuse shmem with our handle on "/dev/zero". */ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); #else return -ENOSYS; #endif } static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return -ENOSPC; } /* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */ static loff_t null_lseek(struct file *file, loff_t offset, int orig) { return file->f_pos = 0; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; inode_lock(file_inode(file)); switch (orig) { case SEEK_CUR: offset += file->f_pos; fallthrough; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= -MAX_ERRNO) { ret = -EOVERFLOW; break; } file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; } inode_unlock(file_inode(file)); return ret; } static int open_port(struct inode *inode, struct file *filp) { int rc; if (!capable(CAP_SYS_RAWIO)) return -EPERM; rc = security_locked_down(LOCKDOWN_DEV_MEM); if (rc) return rc; if (iminor(inode) != DEVMEM_MINOR) return 0; /* * Use a unified address space to have a single point to manage * revocations when drivers want to take over a /dev/mem mapped * range. */ filp->f_mapping = iomem_get_mapping(); return 0; } #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null #define write_iter_zero write_iter_null #define open_mem open_port static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem, #ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif }; static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null, .read_iter = read_iter_null, .write_iter = write_iter_null, .splice_write = splice_write_null, .uring_cmd = uring_cmd_null, }; static const struct file_operations __maybe_unused port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; static const struct file_operations zero_fops = { .llseek = zero_lseek, .write = write_zero, .read_iter = read_iter_zero, .read = read_zero, .write_iter = write_iter_zero, .mmap = mmap_zero, .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif }; static const struct file_operations full_fops = { .llseek = full_lseek, .read_iter = read_iter_zero, .write = write_full, }; static const struct memdev { const char *name; const struct file_operations *fops; fmode_t fmode; umode_t mode; } devlist[] = { #ifdef CONFIG_DEVMEM [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 }, #endif [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 }, #ifdef CONFIG_DEVPORT [4] = { "port", &port_fops, 0, 0 }, #endif [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 }, [7] = { "full", &full_fops, 0, 0666 }, [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 }, [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", &kmsg_fops, 0, 0644 }, #endif }; static int memory_open(struct inode *inode, struct file *filp) { int minor; const struct memdev *dev; minor = iminor(inode); if (minor >= ARRAY_SIZE(devlist)) return -ENXIO; dev = &devlist[minor]; if (!dev->fops) return -ENXIO; filp->f_op = dev->fops; filp->f_mode |= dev->fmode; if (dev->fops->open) return dev->fops->open(inode, filp); return 0; } static const struct file_operations memory_fops = { .open = memory_open, .llseek = noop_llseek, }; static char *mem_devnode(const struct device *dev, umode_t *mode) { if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; return NULL; } static const struct class mem_class = { .name = "mem", .devnode = mem_devnode, }; static int __init chr_dev_init(void) { int retval; int minor; if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); retval = class_register(&mem_class); if (retval) return retval; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; /* * Create /dev/port? */ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); } return tty_init(); } fs_initcall(chr_dev_init);
linux-master
drivers/char/mem.c
/* * Timer device implementation for SGI UV platform. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> #include <asm/genapic.h> #include <asm/uv/uv_hub.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> MODULE_AUTHOR("Dimitri Sivanich <[email protected]>"); MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define UV_MMTIMER_NAME "mmtimer" #define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer" #define UV_MMTIMER_VERSION "1.0" static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long uv_mmtimer_femtoperiod; static const struct file_operations uv_mmtimer_fops = { .owner = THIS_MODULE, .mmap = uv_mmtimer_mmap, .unlocked_ioctl = uv_mmtimer_ioctl, .llseek = noop_llseek, }; /** * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * Starting with HUB rev 2.0, the UV RTC register is * replicated across all cachelines of it's own page. * This allows faster simultaneous reads from a given socket. * * The offset returned is in 64 bit units. */ if (uv_get_min_hub_revision_id() == 1) ret = 0; else ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE) / 8; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if (copy_to_user((unsigned long __user *)arg, &uv_mmtimer_femtoperiod, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if (copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); break; case MMTIMER_MMAPAVAIL: ret = 1; break; case MMTIMER_GETCOUNTER: if (copy_to_user((unsigned long __user *)arg, (unsigned long *)uv_local_mmr_address(UVH_RTC), sizeof(unsigned long))) ret = -EFAULT; break; default: ret = -ENOTTY; break; } return ret; } /** * uv_mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long uv_mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC; uv_mmtimer_addr &= ~(PAGE_SIZE - 1); uv_mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n"); return -EAGAIN; } return 0; } static struct miscdevice uv_mmtimer_miscdev = { MISC_DYNAMIC_MINOR, UV_MMTIMER_NAME, &uv_mmtimer_fops }; /** * uv_mmtimer_init - device initialization routine * * Does initial setup for the uv_mmtimer device. */ static int __init uv_mmtimer_init(void) { if (!is_uv_system()) { printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME); return -1; } /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", UV_MMTIMER_NAME); return -1; } uv_mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (misc_register(&uv_mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", UV_MMTIMER_NAME); return -1; } printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC, UV_MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; } module_init(uv_mmtimer_init);
linux-master
drivers/char/uv_mmtimer.c
/* * The DSP56001 Device Driver, saviour of the Free World(tm) * * Authors: Fredrik Noring <[email protected]> * lars brinkhoff <[email protected]> * Tomas Berndtsson <[email protected]> * * First version May 1996 * * History: * 97-01-29 Tomas Berndtsson, * Integrated with Linux 2.1.21 kernel sources. * 97-02-15 Tomas Berndtsson, * Fixed for kernel 2.1.26 * * BUGS: * Hmm... there must be something here :) * * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/major.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> /* guess what */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/uaccess.h> /* For put_user and get_user */ #include <asm/atarihw.h> #include <asm/traps.h> #include <asm/dsp56k.h> /* minor devices */ #define DSP56K_DEV_56001 0 /* The only device so far */ #define TIMEOUT 10 /* Host port timeout in number of tries */ #define MAXIO 2048 /* Maximum number of words before sleep */ #define DSP56K_MAX_BINARY_LENGTH (3*64*1024) #define DSP56K_TX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_TREQ #define DSP56K_RX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_RREQ #define DSP56K_TX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ #define DSP56K_RX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ #define DSP56K_TRANSMIT (dsp56k_host_interface.isr & DSP56K_ISR_TXDE) #define DSP56K_RECEIVE (dsp56k_host_interface.isr & DSP56K_ISR_RXDF) #define handshake(count, maxio, timeout, ENABLE, f) \ { \ long i, t, m; \ while (count > 0) { \ m = min_t(unsigned long, count, maxio); \ for (i = 0; i < m; i++) { \ for (t = 0; t < timeout && !ENABLE; t++) \ msleep(20); \ if(!ENABLE) \ return -EIO; \ f; \ } \ count -= m; \ if (m == maxio) msleep(20); \ } \ } #define tx_wait(n) \ { \ int t; \ for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \ msleep(10); \ if(!DSP56K_TRANSMIT) { \ return -EIO; \ } \ } #define rx_wait(n) \ { \ int t; \ for(t = 0; t < n && !DSP56K_RECEIVE; t++) \ msleep(10); \ if(!DSP56K_RECEIVE) { \ return -EIO; \ } \ } static DEFINE_MUTEX(dsp56k_mutex); static struct dsp56k_device { unsigned long in_use; long maxio, timeout; int tx_wsize, rx_wsize; } dsp56k; static const struct class dsp56k_class = { .name = "dsp56k", }; static int dsp56k_reset(void) { u_char status; /* Power down the DSP */ sound_ym.rd_data_reg_sel = 14; status = sound_ym.rd_data_reg_sel & 0xef; sound_ym.wd_data = status; sound_ym.wd_data = status | 0x10; udelay(10); /* Power up the DSP */ sound_ym.rd_data_reg_sel = 14; sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef; return 0; } static int dsp56k_upload(u_char __user *bin, int len) { struct platform_device *pdev; const struct firmware *fw; const char fw_name[] = "dsp56k/bootstrap.bin"; int err; int i; dsp56k_reset(); pdev = platform_device_register_simple("dsp56k", 0, NULL, 0); if (IS_ERR(pdev)) { printk(KERN_ERR "Failed to register device for \"%s\"\n", fw_name); return -EINVAL; } err = request_firmware(&fw, fw_name, &pdev->dev); platform_device_unregister(pdev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, err); return err; } if (fw->size % 3) { printk(KERN_ERR "Bogus length %d in image \"%s\"\n", fw->size, fw_name); release_firmware(fw); return -EINVAL; } for (i = 0; i < fw->size; i = i + 3) { /* tx_wait(10); */ dsp56k_host_interface.data.b[1] = fw->data[i]; dsp56k_host_interface.data.b[2] = fw->data[i + 1]; dsp56k_host_interface.data.b[3] = fw->data[i + 2]; } release_firmware(fw); for (; i < 512; i++) { /* tx_wait(10); */ dsp56k_host_interface.data.b[1] = 0; dsp56k_host_interface.data.b[2] = 0; dsp56k_host_interface.data.b[3] = 0; } for (i = 0; i < len; i++) { tx_wait(10); get_user(dsp56k_host_interface.data.b[1], bin++); get_user(dsp56k_host_interface.data.b[2], bin++); get_user(dsp56k_host_interface.data.b[3], bin++); } tx_wait(10); dsp56k_host_interface.data.l = 3; /* Magic execute */ return 0; } static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(file); int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: { long n; /* Don't do anything if nothing is to be done */ if (!count) return 0; n = 0; switch (dsp56k.rx_wsize) { case 1: /* 8 bit */ { handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.b[3], buf+n++)); return n; } case 2: /* 16 bit */ { short __user *data; count /= 2; data = (short __user *) buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.w[1], data+n++)); return 2*n; } case 3: /* 24 bit */ { count /= 3; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.b[1], buf+n++); put_user(dsp56k_host_interface.data.b[2], buf+n++); put_user(dsp56k_host_interface.data.b[3], buf+n++)); return 3*n; } case 4: /* 32 bit */ { long __user *data; count /= 4; data = (long __user *) buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.l, data+n++)); return 4*n; } } return -EFAULT; } default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(file); int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: { long n; /* Don't do anything if nothing is to be done */ if (!count) return 0; n = 0; switch (dsp56k.tx_wsize) { case 1: /* 8 bit */ { handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.b[3], buf+n++)); return n; } case 2: /* 16 bit */ { const short __user *data; count /= 2; data = (const short __user *)buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.w[1], data+n++)); return 2*n; } case 3: /* 24 bit */ { count /= 3; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.b[1], buf+n++); get_user(dsp56k_host_interface.data.b[2], buf+n++); get_user(dsp56k_host_interface.data.b[3], buf+n++)); return 3*n; } case 4: /* 32 bit */ { const long __user *data; count /= 4; data = (const long __user *)buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.l, data+n++)); return 4*n; } } return -EFAULT; } default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } static long dsp56k_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int dev = iminor(file_inode(file)) & 0x0f; void __user *argp = (void __user *)arg; switch(dev) { case DSP56K_DEV_56001: switch(cmd) { case DSP56K_UPLOAD: { char __user *bin; int r, len; struct dsp56k_upload __user *binary = argp; if(get_user(len, &binary->len) < 0) return -EFAULT; if(get_user(bin, &binary->bin) < 0) return -EFAULT; if (len <= 0) { return -EINVAL; /* nothing to upload?!? */ } if (len > DSP56K_MAX_BINARY_LENGTH) { return -EINVAL; } mutex_lock(&dsp56k_mutex); r = dsp56k_upload(bin, len); mutex_unlock(&dsp56k_mutex); if (r < 0) { return r; } break; } case DSP56K_SET_TX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; mutex_lock(&dsp56k_mutex); dsp56k.tx_wsize = (int) arg; mutex_unlock(&dsp56k_mutex); break; case DSP56K_SET_RX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; mutex_lock(&dsp56k_mutex); dsp56k.rx_wsize = (int) arg; mutex_unlock(&dsp56k_mutex); break; case DSP56K_HOST_FLAGS: { int dir, out, status; struct dsp56k_host_flags __user *hf = argp; if(get_user(dir, &hf->dir) < 0) return -EFAULT; if(get_user(out, &hf->out) < 0) return -EFAULT; mutex_lock(&dsp56k_mutex); if ((dir & 0x1) && (out & 0x1)) dsp56k_host_interface.icr |= DSP56K_ICR_HF0; else if (dir & 0x1) dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; if ((dir & 0x2) && (out & 0x2)) dsp56k_host_interface.icr |= DSP56K_ICR_HF1; else if (dir & 0x2) dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; status = 0; if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1; if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2; if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4; if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8; mutex_unlock(&dsp56k_mutex); return put_user(status, &hf->status); } case DSP56K_HOST_CMD: if (arg > 31) return -EINVAL; mutex_lock(&dsp56k_mutex); dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | DSP56K_CVR_HC); mutex_unlock(&dsp56k_mutex); break; default: return -EINVAL; } return 0; default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } /* As of 2.1.26 this should be dsp56k_poll, * but how do I then check device minor number? * Do I need this function at all??? */ #if 0 static __poll_t dsp56k_poll(struct file *file, poll_table *wait) { int dev = iminor(file_inode(file)) & 0x0f; switch(dev) { case DSP56K_DEV_56001: /* poll_wait(file, ???, wait); */ return EPOLLIN | EPOLLRDNORM | EPOLLOUT; default: printk("DSP56k driver: Unknown minor device: %d\n", dev); return 0; } } #endif static int dsp56k_open(struct inode *inode, struct file *file) { int dev = iminor(inode) & 0x0f; int ret = 0; mutex_lock(&dsp56k_mutex); switch(dev) { case DSP56K_DEV_56001: if (test_and_set_bit(0, &dsp56k.in_use)) { ret = -EBUSY; goto out; } dsp56k.timeout = TIMEOUT; dsp56k.maxio = MAXIO; dsp56k.rx_wsize = dsp56k.tx_wsize = 4; DSP56K_TX_INT_OFF; DSP56K_RX_INT_OFF; /* Zero host flags */ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; break; default: ret = -ENODEV; } out: mutex_unlock(&dsp56k_mutex); return ret; } static int dsp56k_release(struct inode *inode, struct file *file) { int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: clear_bit(0, &dsp56k.in_use); break; default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } return 0; } static const struct file_operations dsp56k_fops = { .owner = THIS_MODULE, .read = dsp56k_read, .write = dsp56k_write, .unlocked_ioctl = dsp56k_ioctl, .open = dsp56k_open, .release = dsp56k_release, .llseek = noop_llseek, }; /****** Init and module functions ******/ static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n"; static int __init dsp56k_init_driver(void) { int err; if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) { printk("DSP56k driver: Hardware not present\n"); return -ENODEV; } if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) { printk("DSP56k driver: Unable to register driver\n"); return -ENODEV; } err = class_register(&dsp56k_class); if (err) goto out_chrdev; device_create(&dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL, "dsp56k"); printk(banner); goto out; out_chrdev: unregister_chrdev(DSP56K_MAJOR, "dsp56k"); out: return err; } module_init(dsp56k_init_driver); static void __exit dsp56k_cleanup_driver(void) { device_destroy(&dsp56k_class, MKDEV(DSP56K_MAJOR, 0)); class_unregister(&dsp56k_class); unregister_chrdev(DSP56K_MAJOR, "dsp56k"); } module_exit(dsp56k_cleanup_driver); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("dsp56k/bootstrap.bin");
linux-master
drivers/char/dsp56k.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights * reserved. */ /* * SN Platform Special Memory (mspec) Support * * This driver exports the SN special memory (mspec) facility to user * processes. * There are two types of memory made available thru this driver: * uncached and cached. * * Uncached are used for memory write combining feature of the ia64 * cpu. * * Cached are used for areas of memory that are used as cached addresses * on our partition and used as uncached addresses from other partitions. * Due to a design constraint of the SN2 Shub, you can not have processors * on the same FSB perform both a cached and uncached reference to the * same cache line. These special memory cached regions prevent the * kernel from ever dropping in a TLB entry and therefore prevent the * processor from ever speculating a cache line from this page. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/numa.h> #include <linux/refcount.h> #include <asm/page.h> #include <linux/atomic.h> #include <asm/tlbflush.h> #include <asm/uncached.h> #define CACHED_ID "Cached," #define UNCACHED_ID "Uncached" #define REVISION "4.0" #define MSPEC_BASENAME "mspec" /* * Page types allocated by the device. */ enum mspec_page_type { MSPEC_CACHED = 2, MSPEC_UNCACHED }; /* * One of these structures is allocated when an mspec region is mmaped. The * structure is pointed to by the vma->vm_private_data field in the vma struct. * This structure is used to record the addresses of the mspec pages. * This structure is shared by all vma's that are split off from the * original vma when split_vma()'s are done. * * The refcnt is incremented atomically because mm->mmap_lock does not * protect in fork case where multiple tasks share the vma_data. */ struct vma_data { refcount_t refcnt; /* Number of vmas sharing the data. */ spinlock_t lock; /* Serialize access to this structure. */ int count; /* Number of pages allocated. */ enum mspec_page_type type; /* Type of pages allocated. */ unsigned long vm_start; /* Original (unsplit) base. */ unsigned long vm_end; /* Original (unsplit) end. */ unsigned long maddr[]; /* Array of MSPEC addresses. */ }; /* * mspec_open * * Called when a device mapping is created by a means other than mmap * (via fork, munmap, etc.). Increments the reference count on the * underlying mspec data so it is not freed prematurely. */ static void mspec_open(struct vm_area_struct *vma) { struct vma_data *vdata; vdata = vma->vm_private_data; refcount_inc(&vdata->refcnt); } /* * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages * belonging to all the vma's sharing this vma_data structure. */ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; int index, last_index; unsigned long my_page; vdata = vma->vm_private_data; if (!refcount_dec_and_test(&vdata->refcnt)) return; last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; for (index = 0; index < last_index; index++) { if (vdata->maddr[index] == 0) continue; /* * Clear the page before sticking it back * into the pool. */ my_page = vdata->maddr[index]; vdata->maddr[index] = 0; memset((char *)my_page, 0, PAGE_SIZE); uncached_free_page(my_page, 1); } kvfree(vdata); } /* * mspec_fault * * Creates a mspec page and maps it to user space. */ static vm_fault_t mspec_fault(struct vm_fault *vmf) { unsigned long paddr, maddr; unsigned long pfn; pgoff_t index = vmf->pgoff; struct vma_data *vdata = vmf->vma->vm_private_data; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id(), 1); if (maddr == 0) return VM_FAULT_OOM; spin_lock(&vdata->lock); if (vdata->maddr[index] == 0) { vdata->count++; vdata->maddr[index] = maddr; } else { uncached_free_page(maddr, 1); maddr = vdata->maddr[index]; } spin_unlock(&vdata->lock); } paddr = maddr & ~__IA64_UNCACHED_OFFSET; pfn = paddr >> PAGE_SHIFT; return vmf_insert_pfn(vmf->vma, vmf->address, pfn); } static const struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, .fault = mspec_fault, }; /* * mspec_mmap * * Called when mmapping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ static int mspec_mmap(struct file *file, struct vm_area_struct *vma, enum mspec_page_type type) { struct vma_data *vdata; int pages, vdata_size; if (vma->vm_pgoff != 0) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) return -EINVAL; if ((vma->vm_flags & VM_WRITE) == 0) return -EPERM; pages = vma_pages(vma); vdata_size = sizeof(struct vma_data) + pages * sizeof(long); vdata = kvzalloc(vdata_size, GFP_KERNEL); if (!vdata) return -ENOMEM; vdata->vm_start = vma->vm_start; vdata->vm_end = vma->vm_end; vdata->type = type; spin_lock_init(&vdata->lock); refcount_set(&vdata->refcnt, 1); vma->vm_private_data = vdata; vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); if (vdata->type == MSPEC_UNCACHED) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &mspec_vm_ops; return 0; } static int cached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_CACHED); } static int uncached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_UNCACHED); } static const struct file_operations cached_fops = { .owner = THIS_MODULE, .mmap = cached_mmap, .llseek = noop_llseek, }; static struct miscdevice cached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_cached", .fops = &cached_fops }; static const struct file_operations uncached_fops = { .owner = THIS_MODULE, .mmap = uncached_mmap, .llseek = noop_llseek, }; static struct miscdevice uncached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_uncached", .fops = &uncached_fops }; /* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; ret = misc_register(&cached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", CACHED_ID, ret); return ret; } ret = misc_register(&uncached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", UNCACHED_ID, ret); misc_deregister(&cached_miscdev); return ret; } printk(KERN_INFO "%s %s initialized devices: %s %s\n", MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID); return 0; } static void __exit mspec_exit(void) { misc_deregister(&uncached_miscdev); misc_deregister(&cached_miscdev); } module_init(mspec_init); module_exit(mspec_exit); MODULE_AUTHOR("Silicon Graphics, Inc. <[email protected]>"); MODULE_DESCRIPTION("Driver for SGI SN special memory operations"); MODULE_LICENSE("GPL");
linux-master
drivers/char/mspec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic parallel printer driver * * Copyright (C) 1992 by Jim Weigand and Linus Torvalds * Copyright (C) 1992,1993 by Michael K. Johnson * - Thanks much to Gunter Windau for pointing out to me where the error * checking ought to be. * Copyright (C) 1993 by Nigel Gamble (added interrupt code) * Copyright (C) 1994 by Alan Cox (Modularised it) * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, [email protected] * Statistics and support for slow printers by Rob Janssen, [email protected] * "lp=" command line parameters added by Grant Guenther, [email protected] * lp_read (Status readback) support added by Carsten Gross, * [email protected] * Support for parport by Philip Blundell <[email protected]> * Parport sharing hacking by Andrea Arcangeli * Fixed kernel_(to/from)_user memory copy to check for errors * by Riccardo Facchetti <[email protected]> * 22-JAN-1998 Added support for devfs Richard Gooch <[email protected]> * Redesigned interrupt handling for handle printers with buggy handshake * by Andrea Arcangeli, 11 May 1998 * Full efficient handling of printer with buggy irq handshake (now I have * understood the meaning of the strange handshake). This is done sending new * characters if the interrupt is just happened, even if the printer say to * be still BUSY. This is needed at least with Epson Stylus Color. To enable * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below... * Fixed the irq on the rising edge of the strobe case. * Obsoleted the CAREFUL flag since a printer that doesn' t work with * CAREFUL will block a bit after in lp_check_status(). * Andrea Arcangeli, 15 Oct 1998 * Obsoleted and removed all the lowlevel stuff implemented in the last * month to use the IEEE1284 functions (that handle the _new_ compatibilty * mode fine). */ /* This driver should, in theory, work with any parallel port that has an * appropriate low-level driver; all I/O is done through the parport * abstraction layer. * * If this driver is built into the kernel, you can configure it using the * kernel command-line. For example: * * lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and * bind lp2 to parport2) * * lp=auto (assign lp devices to all ports that * have printers attached, as determined * by the IEEE-1284 autoprobe) * * lp=reset (reset the printer during * initialisation) * * lp=off (disable the printer driver entirely) * * If the driver is loaded as a module, similar functionality is available * using module parameters. The equivalent of the above commands would be: * * # insmod lp.o parport=1,none,2 * * # insmod lp.o parport=auto * * # insmod lp.o reset=1 */ /* COMPATIBILITY WITH OLD KERNELS * * Under Linux 2.0 and previous versions, lp devices were bound to ports at * particular I/O addresses, as follows: * * lp0 0x3bc * lp1 0x378 * lp2 0x278 * * The new driver, by default, binds lp devices to parport devices as it * finds them. This means that if you only have one port, it will be bound * to lp0 regardless of its I/O address. If you need the old behaviour, you * can force it using the parameters described above. */ /* * The new interrupt handling code take care of the buggy handshake * of some HP and Epson printer: * ___ * ACK _______________ ___________ * |__| * ____ * BUSY _________ _______ * |____________| * * I discovered this using the printer scanner that you can find at: * * ftp://e-mind.com/pub/linux/pscan/ * * 11 May 98, Andrea Arcangeli * * My printer scanner run on an Epson Stylus Color show that such printer * generates the irq on the _rising_ edge of the STROBE. Now lp handle * this case fine too. * * 15 Oct 1998, Andrea Arcangeli * * The so called `buggy' handshake is really the well documented * compatibility mode IEEE1284 handshake. They changed the well known * Centronics handshake acking in the middle of busy expecting to not * break drivers or legacy application, while they broken linux lp * until I fixed it reverse engineering the protocol by hand some * month ago... * * 14 Dec 1998, Andrea Arcangeli * * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl) */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/poll.h> #include <linux/console.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/compat.h> #include <linux/parport.h> #undef LP_STATS #include <linux/lp.h> #include <asm/irq.h> #include <linux/uaccess.h> /* if you have more than 8 printers, remember to increase LP_NO */ #define LP_NO 8 static DEFINE_MUTEX(lp_mutex); static struct lp_struct lp_table[LP_NO]; static int port_num[LP_NO]; static unsigned int lp_count = 0; static const struct class lp_class = { .name = "printer", }; #ifdef CONFIG_LP_CONSOLE static struct parport *console_registered; #endif /* CONFIG_LP_CONSOLE */ #undef LP_DEBUG /* Bits used to manage claiming the parport device */ #define LP_PREEMPT_REQUEST 1 #define LP_PARPORT_CLAIMED 2 /* --- low-level port access ----------------------------------- */ #define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port)) #define r_str(x) (parport_read_status(lp_table[(x)].dev->port)) #define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0) #define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0) /* Claim the parport or block trying unless we've already claimed it */ static void lp_claim_parport_or_block(struct lp_struct *this_lp) { if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { parport_claim_or_block(this_lp->dev); } } /* Claim the parport or block trying unless we've already claimed it */ static void lp_release_parport(struct lp_struct *this_lp) { if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { parport_release(this_lp->dev); } } static int lp_preempt(void *handle) { struct lp_struct *this_lp = (struct lp_struct *)handle; set_bit(LP_PREEMPT_REQUEST, &this_lp->bits); return 1; } /* * Try to negotiate to a new mode; if unsuccessful negotiate to * compatibility mode. Return the mode we ended up in. */ static int lp_negotiate(struct parport *port, int mode) { if (parport_negotiate(port, mode) != 0) { mode = IEEE1284_MODE_COMPAT; parport_negotiate(port, mode); } return mode; } static int lp_reset(int minor) { int retval; lp_claim_parport_or_block(&lp_table[minor]); w_ctr(minor, LP_PSELECP); udelay(LP_DELAY); w_ctr(minor, LP_PSELECP | LP_PINITP); retval = r_str(minor); lp_release_parport(&lp_table[minor]); return retval; } static void lp_error(int minor) { DEFINE_WAIT(wait); int polling; if (LP_F(minor) & LP_ABORT) return; polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE; if (polling) lp_release_parport(&lp_table[minor]); prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(LP_TIMEOUT_POLLED); finish_wait(&lp_table[minor].waitq, &wait); if (polling) lp_claim_parport_or_block(&lp_table[minor]); else parport_yield_blocking(lp_table[minor].dev); } static int lp_check_status(int minor) { int error = 0; unsigned int last = lp_table[minor].last_error; unsigned char status = r_str(minor); if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL)) /* No error. */ last = 0; else if ((status & LP_POUTPA)) { if (last != LP_POUTPA) { last = LP_POUTPA; printk(KERN_INFO "lp%d out of paper\n", minor); } error = -ENOSPC; } else if (!(status & LP_PSELECD)) { if (last != LP_PSELECD) { last = LP_PSELECD; printk(KERN_INFO "lp%d off-line\n", minor); } error = -EIO; } else if (!(status & LP_PERRORP)) { if (last != LP_PERRORP) { last = LP_PERRORP; printk(KERN_INFO "lp%d on fire\n", minor); } error = -EIO; } else { last = 0; /* Come here if LP_CAREFUL is set and no errors are reported. */ } lp_table[minor].last_error = last; if (last != 0) lp_error(minor); return error; } static int lp_wait_ready(int minor, int nonblock) { int error = 0; /* If we're not in compatibility mode, we're ready now! */ if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) { return 0; } do { error = lp_check_status(minor); if (error && (nonblock || (LP_F(minor) & LP_ABORT))) break; if (signal_pending(current)) { error = -EINTR; break; } } while (error); return error; } static ssize_t lp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = iminor(file_inode(file)); struct parport *port = lp_table[minor].dev->port; char *kbuf = lp_table[minor].lp_buffer; ssize_t retv = 0; ssize_t written; size_t copy_size = count; int nonblock = ((file->f_flags & O_NONBLOCK) || (LP_F(minor) & LP_ABORT)); #ifdef LP_STATS if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor))) lp_table[minor].runchars = 0; lp_table[minor].lastcall = jiffies; #endif /* Need to copy the data from user-space. */ if (copy_size > LP_BUFFER_SIZE) copy_size = LP_BUFFER_SIZE; if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; if (copy_from_user(kbuf, buf, copy_size)) { retv = -EFAULT; goto out_unlock; } /* Claim Parport or sleep until it becomes available */ lp_claim_parport_or_block(&lp_table[minor]); /* Go to the proper mode. */ lp_table[minor].current_mode = lp_negotiate(port, lp_table[minor].best_mode); parport_set_timeout(lp_table[minor].dev, (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK : lp_table[minor].timeout)); if ((retv = lp_wait_ready(minor, nonblock)) == 0) do { /* Write the data. */ written = parport_write(port, kbuf, copy_size); if (written > 0) { copy_size -= written; count -= written; buf += written; retv += written; } if (signal_pending(current)) { if (retv == 0) retv = -EINTR; break; } if (copy_size > 0) { /* incomplete write -> check error ! */ int error; parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; error = lp_wait_ready(minor, nonblock); if (error) { if (retv == 0) retv = error; break; } else if (nonblock) { if (retv == 0) retv = -EAGAIN; break; } parport_yield_blocking(lp_table[minor].dev); lp_table[minor].current_mode = lp_negotiate(port, lp_table[minor].best_mode); } else if (need_resched()) schedule(); if (count) { copy_size = count; if (copy_size > LP_BUFFER_SIZE) copy_size = LP_BUFFER_SIZE; if (copy_from_user(kbuf, buf, copy_size)) { if (retv == 0) retv = -EFAULT; break; } } } while (count > 0); if (test_and_clear_bit(LP_PREEMPT_REQUEST, &lp_table[minor].bits)) { printk(KERN_INFO "lp%d releasing parport\n", minor); parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; lp_release_parport(&lp_table[minor]); } out_unlock: mutex_unlock(&lp_table[minor].port_mutex); return retv; } #ifdef CONFIG_PARPORT_1284 /* Status readback conforming to ieee1284 */ static ssize_t lp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { DEFINE_WAIT(wait); unsigned int minor=iminor(file_inode(file)); struct parport *port = lp_table[minor].dev->port; ssize_t retval = 0; char *kbuf = lp_table[minor].lp_buffer; int nonblock = ((file->f_flags & O_NONBLOCK) || (LP_F(minor) & LP_ABORT)); if (count > LP_BUFFER_SIZE) count = LP_BUFFER_SIZE; if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; lp_claim_parport_or_block(&lp_table[minor]); parport_set_timeout(lp_table[minor].dev, (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK : lp_table[minor].timeout)); parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); if (parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } while (retval == 0) { retval = parport_read(port, kbuf, count); if (retval > 0) break; if (nonblock) { retval = -EAGAIN; break; } /* Wait for data. */ if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) { parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_error(minor); if (parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } } else { prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(LP_TIMEOUT_POLLED); finish_wait(&lp_table[minor].waitq, &wait); } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } cond_resched(); } parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); out: lp_release_parport(&lp_table[minor]); if (retval > 0 && copy_to_user(buf, kbuf, retval)) retval = -EFAULT; mutex_unlock(&lp_table[minor].port_mutex); return retval; } #endif /* IEEE 1284 support */ static int lp_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); int ret = 0; mutex_lock(&lp_mutex); if (minor >= LP_NO) { ret = -ENXIO; goto out; } if ((LP_F(minor) & LP_EXIST) == 0) { ret = -ENXIO; goto out; } if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) { ret = -EBUSY; goto out; } /* If ABORTOPEN is set and the printer is offline or out of paper, we may still want to open it to perform ioctl()s. Therefore we have commandeered O_NONBLOCK, even though it is being used in a non-standard manner. This is strictly a Linux hack, and should most likely only ever be used by the tunelp application. */ if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) { int status; lp_claim_parport_or_block(&lp_table[minor]); status = r_str(minor); lp_release_parport(&lp_table[minor]); if (status & LP_POUTPA) { printk(KERN_INFO "lp%d out of paper\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -ENOSPC; goto out; } else if (!(status & LP_PSELECD)) { printk(KERN_INFO "lp%d off-line\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -EIO; goto out; } else if (!(status & LP_PERRORP)) { printk(KERN_ERR "lp%d printer error\n", minor); LP_F(minor) &= ~LP_BUSY; ret = -EIO; goto out; } } lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); if (!lp_table[minor].lp_buffer) { LP_F(minor) &= ~LP_BUSY; ret = -ENOMEM; goto out; } /* Determine if the peripheral supports ECP mode */ lp_claim_parport_or_block(&lp_table[minor]); if ((lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) && !parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_ECP)) { printk(KERN_INFO "lp%d: ECP mode\n", minor); lp_table[minor].best_mode = IEEE1284_MODE_ECP; } else { lp_table[minor].best_mode = IEEE1284_MODE_COMPAT; } /* Leave peripheral in compatibility mode */ parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_release_parport(&lp_table[minor]); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; out: mutex_unlock(&lp_mutex); return ret; } static int lp_release(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); lp_claim_parport_or_block(&lp_table[minor]); parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; lp_release_parport(&lp_table[minor]); kfree(lp_table[minor].lp_buffer); lp_table[minor].lp_buffer = NULL; LP_F(minor) &= ~LP_BUSY; return 0; } static int lp_do_ioctl(unsigned int minor, unsigned int cmd, unsigned long arg, void __user *argp) { int status; int retval = 0; #ifdef LP_DEBUG printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg); #endif if (minor >= LP_NO) return -ENODEV; if ((LP_F(minor) & LP_EXIST) == 0) return -ENODEV; switch (cmd) { case LPTIME: if (arg > UINT_MAX / HZ) return -EINVAL; LP_TIME(minor) = arg * HZ/100; break; case LPCHAR: LP_CHAR(minor) = arg; break; case LPABORT: if (arg) LP_F(minor) |= LP_ABORT; else LP_F(minor) &= ~LP_ABORT; break; case LPABORTOPEN: if (arg) LP_F(minor) |= LP_ABORTOPEN; else LP_F(minor) &= ~LP_ABORTOPEN; break; case LPCAREFUL: if (arg) LP_F(minor) |= LP_CAREFUL; else LP_F(minor) &= ~LP_CAREFUL; break; case LPWAIT: LP_WAIT(minor) = arg; break; case LPSETIRQ: return -EINVAL; case LPGETIRQ: if (copy_to_user(argp, &LP_IRQ(minor), sizeof(int))) return -EFAULT; break; case LPGETSTATUS: if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; lp_claim_parport_or_block(&lp_table[minor]); status = r_str(minor); lp_release_parport(&lp_table[minor]); mutex_unlock(&lp_table[minor].port_mutex); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; break; case LPRESET: lp_reset(minor); break; #ifdef LP_STATS case LPGETSTATS: if (copy_to_user(argp, &LP_STAT(minor), sizeof(struct lp_stats))) return -EFAULT; if (capable(CAP_SYS_ADMIN)) memset(&LP_STAT(minor), 0, sizeof(struct lp_stats)); break; #endif case LPGETFLAGS: status = LP_F(minor); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; break; default: retval = -EINVAL; } return retval; } static int lp_set_timeout(unsigned int minor, s64 tv_sec, long tv_usec) { long to_jiffies; /* Convert to jiffies, place in lp_table */ if (tv_sec < 0 || tv_usec < 0) return -EINVAL; /* * we used to not check, so let's not make this fatal, * but deal with user space passing a 32-bit tv_nsec in * a 64-bit field, capping the timeout to 1 second * worth of microseconds, and capping the total at * MAX_JIFFY_OFFSET. */ if (tv_usec > 999999) tv_usec = 999999; if (tv_sec >= MAX_SEC_IN_JIFFIES - 1) { to_jiffies = MAX_JIFFY_OFFSET; } else { to_jiffies = DIV_ROUND_UP(tv_usec, 1000000/HZ); to_jiffies += tv_sec * (long) HZ; } if (to_jiffies <= 0) { return -EINVAL; } lp_table[minor].timeout = to_jiffies; return 0; } static int lp_set_timeout32(unsigned int minor, void __user *arg) { s32 karg[2]; if (copy_from_user(karg, arg, sizeof(karg))) return -EFAULT; return lp_set_timeout(minor, karg[0], karg[1]); } static int lp_set_timeout64(unsigned int minor, void __user *arg) { s64 karg[2]; if (copy_from_user(karg, arg, sizeof(karg))) return -EFAULT; /* sparc64 suseconds_t is 32-bit only */ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) karg[1] >>= 32; return lp_set_timeout(minor, karg[0], karg[1]); } static long lp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; int ret; minor = iminor(file_inode(file)); mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT_OLD: if (BITS_PER_LONG == 32) { ret = lp_set_timeout32(minor, (void __user *)arg); break; } fallthrough; /* for 64-bit */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; default: ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg); break; } mutex_unlock(&lp_mutex); return ret; } #ifdef CONFIG_COMPAT static long lp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; int ret; minor = iminor(file_inode(file)); mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT_OLD: if (!COMPAT_USE_64BIT_TIME) { ret = lp_set_timeout32(minor, (void __user *)arg); break; } fallthrough; /* for x32 mode */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; #ifdef LP_STATS case LPGETSTATS: /* FIXME: add an implementation if you set LP_STATS */ ret = -EINVAL; break; #endif default: ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg)); break; } mutex_unlock(&lp_mutex); return ret; } #endif static const struct file_operations lp_fops = { .owner = THIS_MODULE, .write = lp_write, .unlocked_ioctl = lp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lp_compat_ioctl, #endif .open = lp_open, .release = lp_release, #ifdef CONFIG_PARPORT_1284 .read = lp_read, #endif .llseek = noop_llseek, }; /* --- support for console on the line printer ----------------- */ #ifdef CONFIG_LP_CONSOLE #define CONSOLE_LP 0 /* If the printer is out of paper, we can either lose the messages or * stall until the printer is happy again. Define CONSOLE_LP_STRICT * non-zero to get the latter behaviour. */ #define CONSOLE_LP_STRICT 1 /* The console must be locked when we get here. */ static void lp_console_write(struct console *co, const char *s, unsigned count) { struct pardevice *dev = lp_table[CONSOLE_LP].dev; struct parport *port = dev->port; ssize_t written; if (parport_claim(dev)) /* Nothing we can do. */ return; parport_set_timeout(dev, 0); /* Go to compatibility mode. */ parport_negotiate(port, IEEE1284_MODE_COMPAT); do { /* Write the data, converting LF->CRLF as we go. */ ssize_t canwrite = count; char *lf = memchr(s, '\n', count); if (lf) canwrite = lf - s; if (canwrite > 0) { written = parport_write(port, s, canwrite); if (written <= 0) continue; s += written; count -= written; canwrite -= written; } if (lf && canwrite <= 0) { const char *crlf = "\r\n"; int i = 2; /* Dodge the original '\n', and put '\r\n' instead. */ s++; count--; do { written = parport_write(port, crlf, i); if (written > 0) { i -= written; crlf += written; } } while (i > 0 && (CONSOLE_LP_STRICT || written > 0)); } } while (count > 0 && (CONSOLE_LP_STRICT || written > 0)); parport_release(dev); } static struct console lpcons = { .name = "lp", .write = lp_console_write, .flags = CON_PRINTBUFFER, }; #endif /* console on line printer */ /* --- initialisation code ------------------------------------- */ static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC }; static char *parport[LP_NO]; static bool reset; module_param_array(parport, charp, NULL, 0); module_param(reset, bool, 0); #ifndef MODULE static int __init lp_setup(char *str) { static int parport_ptr; int x; if (get_option(&str, &x)) { if (x == 0) { /* disable driver on "lp=" or "lp=0" */ parport_nr[0] = LP_PARPORT_OFF; } else { printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x); return 0; } } else if (!strncmp(str, "parport", 7)) { int n = simple_strtoul(str+7, NULL, 10); if (parport_ptr < LP_NO) parport_nr[parport_ptr++] = n; else printk(KERN_INFO "lp: too many ports, %s ignored.\n", str); } else if (!strcmp(str, "auto")) { parport_nr[0] = LP_PARPORT_AUTO; } else if (!strcmp(str, "none")) { if (parport_ptr < LP_NO) parport_nr[parport_ptr++] = LP_PARPORT_NONE; else printk(KERN_INFO "lp: too many ports, %s ignored.\n", str); } else if (!strcmp(str, "reset")) { reset = true; } return 1; } #endif static int lp_register(int nr, struct parport *port) { struct pardev_cb ppdev_cb; memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.preempt = lp_preempt; ppdev_cb.private = &lp_table[nr]; lp_table[nr].dev = parport_register_dev_model(port, "lp", &ppdev_cb, nr); if (lp_table[nr].dev == NULL) return 1; lp_table[nr].flags |= LP_EXIST; if (reset) lp_reset(nr); device_create(&lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL, "lp%d", nr); printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); #ifdef CONFIG_LP_CONSOLE if (!nr) { if (port->modes & PARPORT_MODE_SAFEININT) { register_console(&lpcons); console_registered = port; printk(KERN_INFO "lp%d: console ready\n", CONSOLE_LP); } else printk(KERN_ERR "lp%d: cannot run console on %s\n", CONSOLE_LP, port->name); } #endif port_num[nr] = port->number; return 0; } static void lp_attach(struct parport *port) { unsigned int i; switch (parport_nr[0]) { case LP_PARPORT_UNSPEC: case LP_PARPORT_AUTO: if (parport_nr[0] == LP_PARPORT_AUTO && port->probe_info[0].class != PARPORT_CLASS_PRINTER) return; if (lp_count == LP_NO) { printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO); return; } for (i = 0; i < LP_NO; i++) if (port_num[i] == -1) break; if (!lp_register(i, port)) lp_count++; break; default: for (i = 0; i < LP_NO; i++) { if (port->number == parport_nr[i]) { if (!lp_register(i, port)) lp_count++; break; } } break; } } static void lp_detach(struct parport *port) { int n; /* Write this some day. */ #ifdef CONFIG_LP_CONSOLE if (console_registered == port) { unregister_console(&lpcons); console_registered = NULL; } #endif /* CONFIG_LP_CONSOLE */ for (n = 0; n < LP_NO; n++) { if (port_num[n] == port->number) { port_num[n] = -1; lp_count--; device_destroy(&lp_class, MKDEV(LP_MAJOR, n)); parport_unregister_device(lp_table[n].dev); } } } static struct parport_driver lp_driver = { .name = "lp", .match_port = lp_attach, .detach = lp_detach, .devmodel = true, }; static int __init lp_init(void) { int i, err; if (parport_nr[0] == LP_PARPORT_OFF) return 0; for (i = 0; i < LP_NO; i++) { lp_table[i].dev = NULL; lp_table[i].flags = 0; lp_table[i].chars = LP_INIT_CHAR; lp_table[i].time = LP_INIT_TIME; lp_table[i].wait = LP_INIT_WAIT; lp_table[i].lp_buffer = NULL; #ifdef LP_STATS lp_table[i].lastcall = 0; lp_table[i].runchars = 0; memset(&lp_table[i].stats, 0, sizeof(struct lp_stats)); #endif lp_table[i].last_error = 0; init_waitqueue_head(&lp_table[i].waitq); init_waitqueue_head(&lp_table[i].dataq); mutex_init(&lp_table[i].port_mutex); lp_table[i].timeout = 10 * HZ; port_num[i] = -1; } if (register_chrdev(LP_MAJOR, "lp", &lp_fops)) { printk(KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); return -EIO; } err = class_register(&lp_class); if (err) goto out_reg; if (parport_register_driver(&lp_driver)) { printk(KERN_ERR "lp: unable to register with parport\n"); err = -EIO; goto out_class; } if (!lp_count) { printk(KERN_INFO "lp: driver loaded but no devices found\n"); #ifndef CONFIG_PARPORT_1284 if (parport_nr[0] == LP_PARPORT_AUTO) printk(KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); #endif } return 0; out_class: class_unregister(&lp_class); out_reg: unregister_chrdev(LP_MAJOR, "lp"); return err; } static int __init lp_init_module(void) { if (parport[0]) { /* The user gave some parameters. Let's see what they were. */ if (!strncmp(parport[0], "auto", 4)) parport_nr[0] = LP_PARPORT_AUTO; else { int n; for (n = 0; n < LP_NO && parport[n]; n++) { if (!strncmp(parport[n], "none", 4)) parport_nr[n] = LP_PARPORT_NONE; else { char *ep; unsigned long r = simple_strtoul(parport[n], &ep, 0); if (ep != parport[n]) parport_nr[n] = r; else { printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); return -ENODEV; } } } } } return lp_init(); } static void lp_cleanup_module(void) { parport_unregister_driver(&lp_driver); #ifdef CONFIG_LP_CONSOLE unregister_console(&lpcons); #endif unregister_chrdev(LP_MAJOR, "lp"); class_unregister(&lp_class); } __setup("lp=", lp_setup); module_init(lp_init_module); module_exit(lp_cleanup_module); MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR); MODULE_LICENSE("GPL");
linux-master
drivers/char/lp.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* * Copyright (C) 2017-2022 Jason A. Donenfeld <[email protected]>. All Rights Reserved. * Copyright Matt Mackall <[email protected]>, 2003, 2004, 2005 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. * * This driver produces cryptographically secure pseudorandom data. It is divided * into roughly six sections, each with a section header: * * - Initialization and readiness waiting. * - Fast key erasure RNG, the "crng". * - Entropy accumulation and extraction routines. * - Entropy collection routines. * - Userspace reader/writer interfaces. * - Sysctl interface. * * The high level overview is that there is one input pool, into which * various pieces of data are hashed. Prior to initialization, some of that * data is then "credited" as having a certain number of bits of entropy. * When enough bits of entropy are available, the hash is finalized and * handed as a key to a stream cipher that expands it indefinitely for * various consumers. This key is periodically refreshed as the various * entropy collectors, described below, add data to the input pool. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/utsname.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/nodemask.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/percpu.h> #include <linux/ptrace.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/ratelimit.h> #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> #include <linux/uaccess.h> #include <linux/suspend.h> #include <linux/siphash.h> #include <linux/sched/isolation.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> #include <asm/archrandom.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/io.h> /********************************************************************* * * Initialization and readiness waiting. * * Much of the RNG infrastructure is devoted to various dependencies * being able to wait until the RNG has collected enough entropy and * is ready for safe consumption. * *********************************************************************/ /* * crng_init is protected by base_crng->lock, and only increases * its value (from empty->early->ready). */ static enum { CRNG_EMPTY = 0, /* Little to no entropy collected */ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ } crng_init __read_mostly = CRNG_EMPTY; static DEFINE_STATIC_KEY_FALSE(crng_is_ready); #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY) /* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; static ATOMIC_NOTIFIER_HEAD(random_ready_notifier); /* Control how we warn userspace. */ static struct ratelimit_state urandom_warning = RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); static int ratelimit_disable __read_mostly = IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); /* * Returns whether or not the input pool has been seeded and thus guaranteed * to supply cryptographically secure random numbers. This applies to: the * /dev/urandom device, the get_random_bytes function, and the get_random_{u8, * u16,u32,u64,long} family of functions. * * Returns: true if the input pool has been seeded. * false if the input pool has not been seeded. */ bool rng_is_initialized(void) { return crng_ready(); } EXPORT_SYMBOL(rng_is_initialized); static void __cold crng_set_ready(struct work_struct *work) { static_branch_enable(&crng_is_ready); } /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ static void try_to_generate_entropy(void); /* * Wait for the input pool to be seeded and thus guaranteed to supply * cryptographically secure random numbers. This applies to: the /dev/urandom * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64, * long} family of functions. Using any of these functions without first * calling this function forfeits the guarantee of security. * * Returns: 0 if the input pool has been seeded. * -ERESTARTSYS if the function was interrupted by a signal. */ int wait_for_random_bytes(void) { while (!crng_ready()) { int ret; try_to_generate_entropy(); ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); if (ret) return ret > 0 ? 0 : ret; } return 0; } EXPORT_SYMBOL(wait_for_random_bytes); /* * Add a callback function that will be invoked when the crng is initialised, * or immediately if it already has been. Only use this is you are absolutely * sure it is required. Most users should instead be able to test * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`. */ int __cold execute_with_initialized_rng(struct notifier_block *nb) { unsigned long flags; int ret = 0; spin_lock_irqsave(&random_ready_notifier.lock, flags); if (crng_ready()) nb->notifier_call(nb, 0, NULL); else ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb); spin_unlock_irqrestore(&random_ready_notifier.lock, flags); return ret; } #define warn_unseeded_randomness() \ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ __func__, (void *)_RET_IP_, crng_init) /********************************************************************* * * Fast key erasure RNG, the "crng". * * These functions expand entropy from the entropy extractor into * long streams for external consumption using the "fast key erasure" * RNG described at <https://blog.cr.yp.to/20170723-random.html>. * * There are a few exported interfaces for use by other drivers: * * void get_random_bytes(void *buf, size_t len) * u8 get_random_u8() * u16 get_random_u16() * u32 get_random_u32() * u32 get_random_u32_below(u32 ceil) * u32 get_random_u32_above(u32 floor) * u32 get_random_u32_inclusive(u32 floor, u32 ceil) * u64 get_random_u64() * unsigned long get_random_long() * * These interfaces will return the requested number of random bytes * into the given buffer or as a return value. This is equivalent to * a read from /dev/urandom. The u8, u16, u32, u64, long family of * functions may be higher performance for one-off random integers, * because they do a bit of buffering and do not invoke reseeding * until the buffer is emptied. * *********************************************************************/ enum { CRNG_RESEED_START_INTERVAL = HZ, CRNG_RESEED_INTERVAL = 60 * HZ }; static struct { u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); unsigned long generation; spinlock_t lock; } base_crng = { .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) }; struct crng { u8 key[CHACHA_KEY_SIZE]; unsigned long generation; local_lock_t lock; }; static DEFINE_PER_CPU(struct crng, crngs) = { .generation = ULONG_MAX, .lock = INIT_LOCAL_LOCK(crngs.lock), }; /* * Return the interval until the next reseeding, which is normally * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval * proportional to the uptime. */ static unsigned int crng_reseed_interval(void) { static bool early_boot = true; if (unlikely(READ_ONCE(early_boot))) { time64_t uptime = ktime_get_seconds(); if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) WRITE_ONCE(early_boot, false); else return max_t(unsigned int, CRNG_RESEED_START_INTERVAL, (unsigned int)uptime / 2 * HZ); } return CRNG_RESEED_INTERVAL; } /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ static void extract_entropy(void *buf, size_t len); /* This extracts a new crng key from the input pool. */ static void crng_reseed(struct work_struct *work) { static DECLARE_DELAYED_WORK(next_reseed, crng_reseed); unsigned long flags; unsigned long next_gen; u8 key[CHACHA_KEY_SIZE]; /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */ if (likely(system_unbound_wq)) queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval()); extract_entropy(key, sizeof(key)); /* * We copy the new key into the base_crng, overwriting the old one, * and update the generation counter. We avoid hitting ULONG_MAX, * because the per-cpu crngs are initialized to ULONG_MAX, so this * forces new CPUs that come online to always initialize. */ spin_lock_irqsave(&base_crng.lock, flags); memcpy(base_crng.key, key, sizeof(base_crng.key)); next_gen = base_crng.generation + 1; if (next_gen == ULONG_MAX) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); memzero_explicit(key, sizeof(key)); } /* * This generates a ChaCha block using the provided key, and then * immediately overwrites that key with half the block. It returns * the resultant ChaCha state to the user, along with the second * half of the block containing 32 bytes of random data that may * be used; random_data_len may not be greater than 32. * * The returned ChaCha state contains within it a copy of the old * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is * safer to set the random_data parameter to &chacha_state[4] so * that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], u32 chacha_state[CHACHA_STATE_WORDS], u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); memset(&chacha_state[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); memzero_explicit(first_block, sizeof(first_block)); } /* * This function returns a ChaCha state that you may use for generating * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], u8 *random_data, size_t random_data_len) { unsigned long flags; struct crng *crng; BUG_ON(random_data_len > 32); /* * For the fast path, we check whether we're ready, unlocked first, and * then re-check once locked later. In the case where we're really not * ready, we do fast key erasure with the base_crng directly, extracting * when crng_init is CRNG_EMPTY. */ if (!crng_ready()) { bool ready; spin_lock_irqsave(&base_crng.lock, flags); ready = crng_ready(); if (!ready) { if (crng_init == CRNG_EMPTY) extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_fast_key_erasure(base_crng.key, chacha_state, random_data, random_data_len); } spin_unlock_irqrestore(&base_crng.lock, flags); if (!ready) return; } local_lock_irqsave(&crngs.lock, flags); crng = raw_cpu_ptr(&crngs); /* * If our per-cpu crng is older than the base_crng, then it means * somebody reseeded the base_crng. In that case, we do fast key * erasure on the base_crng, and use its output as the new key * for our per-cpu crng. This brings us up to date with base_crng. */ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { spin_lock(&base_crng.lock); crng_fast_key_erasure(base_crng.key, chacha_state, crng->key, sizeof(crng->key)); crng->generation = base_crng.generation; spin_unlock(&base_crng.lock); } /* * Finally, when we've made it this far, our per-cpu crng has an up * to date key, and we can do fast key erasure with it to produce * some random data and a ChaCha state for the caller. All other * branches of this function are "unlikely", so most of the time we * should wind up here immediately. */ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); local_unlock_irqrestore(&crngs.lock, flags); } static void _get_random_bytes(void *buf, size_t len) { u32 chacha_state[CHACHA_STATE_WORDS]; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; if (!len) return; first_block_len = min_t(size_t, 32, len); crng_make_state(chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { chacha20_block(chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } chacha20_block(chacha_state, buf); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } memzero_explicit(chacha_state, sizeof(chacha_state)); } /* * This returns random bytes in arbitrary quantities. The quality of the * random bytes is good as /dev/urandom. In order to ensure that the * randomness provided by this function is okay, the function * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. */ void get_random_bytes(void *buf, size_t len) { warn_unseeded_randomness(); _get_random_bytes(buf, len); } EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { u32 chacha_state[CHACHA_STATE_WORDS]; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; if (unlikely(!iov_iter_count(iter))) return 0; /* * Immediately overwrite the ChaCha key at index 4 with random * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { chacha20_block(chacha_state, block); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; if (!iov_iter_count(iter) || copied != sizeof(block)) break; BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); if (ret % PAGE_SIZE == 0) { if (signal_pending(current)) break; cond_resched(); } } memzero_explicit(block, sizeof(block)); out_zero_chacha: memzero_explicit(chacha_state, sizeof(chacha_state)); return ret ? ret : -EFAULT; } /* * Batched entropy returns random integers. The quality of the random * number is good as /dev/urandom. In order to ensure that the randomness * provided by this function is okay, the function wait_for_random_bytes() * should be called and return 0 at least once at any point prior. */ #define DEFINE_BATCHED_ENTROPY(type) \ struct batch_ ##type { \ /* \ * We make this 1.5x a ChaCha block, so that we get the \ * remaining 32 bytes from fast key erasure, plus one full \ * block from the detached ChaCha state. We can increase \ * the size of this later if needed so long as we keep the \ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ */ \ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ local_lock_t lock; \ unsigned long generation; \ unsigned int position; \ }; \ \ static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ .position = UINT_MAX \ }; \ \ type get_random_ ##type(void) \ { \ type ret; \ unsigned long flags; \ struct batch_ ##type *batch; \ unsigned long next_gen; \ \ warn_unseeded_randomness(); \ \ if (!crng_ready()) { \ _get_random_bytes(&ret, sizeof(ret)); \ return ret; \ } \ \ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ batch = raw_cpu_ptr(&batched_entropy_##type); \ \ next_gen = READ_ONCE(base_crng.generation); \ if (batch->position >= ARRAY_SIZE(batch->entropy) || \ next_gen != batch->generation) { \ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ batch->position = 0; \ batch->generation = next_gen; \ } \ \ ret = batch->entropy[batch->position]; \ batch->entropy[batch->position] = 0; \ ++batch->position; \ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ return ret; \ } \ EXPORT_SYMBOL(get_random_ ##type); DEFINE_BATCHED_ENTROPY(u8) DEFINE_BATCHED_ENTROPY(u16) DEFINE_BATCHED_ENTROPY(u32) DEFINE_BATCHED_ENTROPY(u64) u32 __get_random_u32_below(u32 ceil) { /* * This is the slow path for variable ceil. It is still fast, most of * the time, by doing traditional reciprocal multiplication and * opportunistically comparing the lower half to ceil itself, before * falling back to computing a larger bound, and then rejecting samples * whose lower half would indicate a range indivisible by ceil. The use * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable * in 32-bits. */ u32 rand = get_random_u32(); u64 mult; /* * This function is technically undefined for ceil == 0, and in fact * for the non-underscored constant version in the header, we build bug * on that. But for the non-constant case, it's convenient to have that * evaluate to being a straight call to get_random_u32(), so that * get_random_u32_inclusive() can work over its whole range without * undefined behavior. */ if (unlikely(!ceil)) return rand; mult = (u64)ceil * rand; if (unlikely((u32)mult < ceil)) { u32 bound = -ceil % ceil; while (unlikely((u32)mult < bound)) mult = (u64)ceil * get_random_u32(); } return mult >> 32; } EXPORT_SYMBOL(__get_random_u32_below); #ifdef CONFIG_SMP /* * This function is called when the CPU is coming up, with entry * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. */ int __cold random_prepare_cpu(unsigned int cpu) { /* * When the cpu comes back online, immediately invalidate both * the per-cpu crng and all batches, so that we serve fresh * randomness. */ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX; per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX; per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; return 0; } #endif /********************************************************************** * * Entropy accumulation and extraction routines. * * Callers may add entropy via: * * static void mix_pool_bytes(const void *buf, size_t len) * * After which, if added entropy should be credited: * * static void credit_init_bits(size_t bits) * * Finally, extract entropy via: * * static void extract_entropy(void *buf, size_t len) * **********************************************************************/ enum { POOL_BITS = BLAKE2S_HASH_SIZE * 8, POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ }; static struct { struct blake2s_state hash; spinlock_t lock; unsigned int init_bits; } input_pool = { .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, .hash.outlen = BLAKE2S_HASH_SIZE, .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), }; static void _mix_pool_bytes(const void *buf, size_t len) { blake2s_update(&input_pool.hash, buf, len); } /* * This function adds bytes into the input pool. It does not * update the initialization bit counter; the caller should call * credit_init_bits if this is appropriate. */ static void mix_pool_bytes(const void *buf, size_t len) { unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } /* * This is an HKDF-like construction for using the hashed collected entropy * as a PRF key, that's then expanded block-by-block. */ static void extract_entropy(void *buf, size_t len) { unsigned long flags; u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; struct { unsigned long rdseed[32 / sizeof(long)]; size_t counter; } block; size_t i, longs; for (i = 0; i < ARRAY_SIZE(block.rdseed);) { longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); if (longs) { i += longs; continue; } longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i); if (longs) { i += longs; continue; } block.rdseed[i++] = random_get_entropy(); } spin_lock_irqsave(&input_pool.lock, flags); /* seed = HASHPRF(last_key, entropy_input) */ blake2s_final(&input_pool.hash, seed); /* next_key = HASHPRF(seed, RDSEED || 0) */ block.counter = 0; blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); spin_unlock_irqrestore(&input_pool.lock, flags); memzero_explicit(next_key, sizeof(next_key)); while (len) { i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); len -= i; buf += i; } memzero_explicit(seed, sizeof(seed)); memzero_explicit(&block, sizeof(block)); } #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) static void __cold _credit_init_bits(size_t bits) { static struct execute_work set_ready; unsigned int new, orig, add; unsigned long flags; if (!bits) return; add = min_t(size_t, bits, POOL_BITS); orig = READ_ONCE(input_pool.init_bits); do { new = min_t(unsigned int, POOL_BITS, orig + add); } while (!try_cmpxchg(&input_pool.init_bits, &orig, new)); if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */ if (static_key_initialized) execute_in_process_context(crng_set_ready, &set_ready); atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); if (urandom_warning.missed) pr_notice("%d urandom warning(s) missed due to ratelimiting\n", urandom_warning.missed); } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ if (crng_init == CRNG_EMPTY) { extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_init = CRNG_EARLY; } spin_unlock_irqrestore(&base_crng.lock, flags); } } /********************************************************************** * * Entropy collection routines. * * The following exported functions are used for pushing entropy into * the above entropy accumulation routines: * * void add_device_randomness(const void *buf, size_t len); * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after); * void add_bootloader_randomness(const void *buf, size_t len); * void add_vmfork_randomness(const void *unique_vm_id, size_t len); * void add_interrupt_randomness(int irq); * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); * void add_disk_randomness(struct gendisk *disk); * * add_device_randomness() adds data to the input pool that * is likely to differ between two devices (or possibly even per boot). * This would be things like MAC addresses or serial numbers, or the * read-out of the RTC. This does *not* credit any actual entropy to * the pool, but it initializes the pool to different values for devices * that might otherwise be identical and have very little entropy * available to them (particularly common in the embedded world). * * add_hwgenerator_randomness() is for true hardware RNGs, and will credit * entropy as specified by the caller. If the entropy pool is full it will * block until more entropy is needed. * * add_bootloader_randomness() is called by bootloader drivers, such as EFI * and device tree, and credits its input depending on whether or not the * command line option 'random.trust_bootloader'. * * add_vmfork_randomness() adds a unique (but not necessarily secret) ID * representing the current instance of a VM to the pool, without crediting, * and then force-reseeds the crng so that it takes effect immediately. * * add_interrupt_randomness() uses the interrupt timing as random * inputs to the entropy pool. Using the cycle counters and the irq source * as inputs, it feeds the input pool roughly once a second or after 64 * interrupts, crediting 1 bit of entropy for whichever comes first. * * add_input_randomness() uses the input layer interrupt timing, as well * as the event type information from the hardware. * * add_disk_randomness() uses what amounts to the seek time of block * layer request events, on a per-disk_devt basis, as input to the * entropy pool. Note that high-speed solid state drives with very low * seek times do not make for good sources of entropy, as their seek * times are usually fairly consistent. * * The last two routines try to estimate how many bits of entropy * to credit. They do this by keeping track of the first and second * order deltas of the event timings. * **********************************************************************/ static bool trust_cpu __initdata = true; static bool trust_bootloader __initdata = true; static int __init parse_trust_cpu(char *arg) { return kstrtobool(arg, &trust_cpu); } static int __init parse_trust_bootloader(char *arg) { return kstrtobool(arg, &trust_bootloader); } early_param("random.trust_cpu", parse_trust_cpu); early_param("random.trust_bootloader", parse_trust_bootloader); static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data) { unsigned long flags, entropy = random_get_entropy(); /* * Encode a representation of how long the system has been suspended, * in a way that is distinct from prior system suspends. */ ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() }; spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&action, sizeof(action)); _mix_pool_bytes(stamps, sizeof(stamps)); _mix_pool_bytes(&entropy, sizeof(entropy)); spin_unlock_irqrestore(&input_pool.lock, flags); if (crng_ready() && (action == PM_RESTORE_PREPARE || (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) { crng_reseed(NULL); pr_notice("crng reseeded on system resumption\n"); } return 0; } static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification }; /* * This is called extremely early, before time keeping functionality is * available, but arch randomness is. Interrupts are not yet enabled. */ void __init random_init_early(const char *command_line) { unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)]; size_t i, longs, arch_bits; #if defined(LATENT_ENTROPY_PLUGIN) static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) { longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i); if (longs) { _mix_pool_bytes(entropy, sizeof(*entropy) * longs); i += longs; continue; } longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i); if (longs) { _mix_pool_bytes(entropy, sizeof(*entropy) * longs); i += longs; continue; } arch_bits -= sizeof(*entropy) * 8; ++i; } _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname()))); _mix_pool_bytes(command_line, strlen(command_line)); /* Reseed if already seeded by earlier phases. */ if (crng_ready()) crng_reseed(NULL); else if (trust_cpu) _credit_init_bits(arch_bits); } /* * This is called a little bit after the prior function, and now there is * access to timestamps counters. Interrupts are not yet enabled. */ void __init random_init(void) { unsigned long entropy = random_get_entropy(); ktime_t now = ktime_get_real(); _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(&entropy, sizeof(entropy)); add_latent_entropy(); /* * If we were initialized by the cpu or bootloader before jump labels * are initialized, then we should enable the static branch here, where * it's guaranteed that jump labels have been initialized. */ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) crng_set_ready(NULL); /* Reseed if already seeded by earlier phases. */ if (crng_ready()) crng_reseed(NULL); WARN_ON(register_pm_notifier(&pm_notifier)); WARN(!entropy, "Missing cycle counter and fallback timer; RNG " "entropy collection will consequently suffer."); } /* * Add device- or boot-specific data to the input pool to help * initialize it. * * None of this adds any entropy; it is meant to avoid the problem of * the entropy pool having similar initial state across largely * identical devices. */ void add_device_randomness(const void *buf, size_t len) { unsigned long entropy = random_get_entropy(); unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy)); _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); /* * Interface for in-kernel drivers of true hardware RNGs. Those devices * may produce endless random bits, so this function will sleep for * some amount of time after, if the sleep_after parameter is true. */ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after) { mix_pool_bytes(buf, len); credit_init_bits(entropy); /* * Throttle writing to once every reseed interval, unless we're not yet * initialized or no entropy is credited. */ if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy)) schedule_timeout_interruptible(crng_reseed_interval()); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); /* * Handle random seed passed by bootloader, and credit it depending * on the command line option 'random.trust_bootloader'. */ void __init add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) credit_init_bits(len * 8); } #if IS_ENABLED(CONFIG_VMGENID) static BLOCKING_NOTIFIER_HEAD(vmfork_chain); /* * Handle a new unique VM ID, which is unique, not secret, so we * don't credit it, but we do immediately force a reseed after so * that it's used by the crng posthaste. */ void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) { add_device_randomness(unique_vm_id, len); if (crng_ready()) { crng_reseed(NULL); pr_notice("crng reseeded due to virtual machine fork\n"); } blocking_notifier_call_chain(&vmfork_chain, 0, NULL); } #if IS_MODULE(CONFIG_VMGENID) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif int __cold register_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmfork_chain, nb); } EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmfork_chain, nb); } EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); #endif struct fast_pool { unsigned long pool[4]; unsigned long last; unsigned int count; struct timer_list mix; }; static void mix_interrupt_randomness(struct timer_list *work); static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { #ifdef CONFIG_64BIT #define FASTMIX_PERM SIPHASH_PERMUTATION .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }, #else #define FASTMIX_PERM HSIPHASH_PERMUTATION .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }, #endif .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0) }; /* * This is [Half]SipHash-1-x, starting from an empty key. Because * the key is fixed, it assumes that its inputs are non-malicious, * and therefore this has no security on its own. s represents the * four-word SipHash state, while v represents a two-word input. */ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) { s[3] ^= v1; FASTMIX_PERM(s[0], s[1], s[2], s[3]); s[0] ^= v1; s[3] ^= v2; FASTMIX_PERM(s[0], s[1], s[2], s[3]); s[0] ^= v2; } #ifdef CONFIG_SMP /* * This function is called when the CPU has just come online, with * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. */ int __cold random_online_cpu(unsigned int cpu) { /* * During CPU shutdown and before CPU onlining, add_interrupt_ * randomness() may schedule mix_interrupt_randomness(), and * set the MIX_INFLIGHT flag. However, because the worker can * be scheduled on a different CPU during this period, that * flag will never be cleared. For that reason, we zero out * the flag here, which runs just after workqueues are onlined * for the CPU again. This also has the effect of setting the * irq randomness count to zero so that new accumulated irqs * are fresh. */ per_cpu_ptr(&irq_randomness, cpu)->count = 0; return 0; } #endif static void mix_interrupt_randomness(struct timer_list *work) { struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); /* * The size of the copied stack pool is explicitly 2 longs so that we * only ever ingest half of the siphash output each time, retaining * the other half as the next "key" that carries over. The entropy is * supposed to be sufficiently dispersed between bits so on average * we don't wind up "losing" some. */ unsigned long pool[2]; unsigned int count; /* Check to see if we're running on the wrong CPU due to hotplug. */ local_irq_disable(); if (fast_pool != this_cpu_ptr(&irq_randomness)) { local_irq_enable(); return; } /* * Copy the pool to the stack so that the mixer always has a * consistent view, before we reenable irqs again. */ memcpy(pool, fast_pool->pool, sizeof(pool)); count = fast_pool->count; fast_pool->count = 0; fast_pool->last = jiffies; local_irq_enable(); mix_pool_bytes(pool, sizeof(pool)); credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8)); memzero_explicit(pool, sizeof(pool)); } void add_interrupt_randomness(int irq) { enum { MIX_INFLIGHT = 1U << 31 }; unsigned long entropy = random_get_entropy(); struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned int new_count; fast_mix(fast_pool->pool, entropy, (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); new_count = ++fast_pool->count; if (new_count & MIX_INFLIGHT) return; if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) return; fast_pool->count |= MIX_INFLIGHT; if (!timer_pending(&fast_pool->mix)) { fast_pool->mix.expires = jiffies; add_timer_on(&fast_pool->mix, raw_smp_processor_id()); } } EXPORT_SYMBOL_GPL(add_interrupt_randomness); /* There is one of these per entropy source */ struct timer_rand_state { unsigned long last_time; long last_delta, last_delta2; }; /* * This function adds entropy to the entropy "pool" by using timing * delays. It uses the timer_rand_state structure to make an estimate * of how many bits of entropy this call has added to the pool. The * value "num" is also added to the pool; it should somehow describe * the type of event that just happened. */ static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { unsigned long entropy = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; unsigned int bits; /* * If we're in a hard IRQ, add_interrupt_randomness() will be called * sometime after, so mix into the fast pool. */ if (in_hardirq()) { fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); } else { spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy)); _mix_pool_bytes(&num, sizeof(num)); spin_unlock_irqrestore(&input_pool.lock, flags); } if (crng_ready()) return; /* * Calculate number of bits of randomness we probably added. * We take into account the first, second and third-order deltas * in order to make our estimate. */ delta = now - READ_ONCE(state->last_time); WRITE_ONCE(state->last_time, now); delta2 = delta - READ_ONCE(state->last_delta); WRITE_ONCE(state->last_delta, delta); delta3 = delta2 - READ_ONCE(state->last_delta2); WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; if (delta2 < 0) delta2 = -delta2; if (delta3 < 0) delta3 = -delta3; if (delta > delta2) delta = delta2; if (delta > delta3) delta = delta3; /* * delta is now minimum absolute delta. Round down by 1 bit * on general principles, and limit entropy estimate to 11 bits. */ bits = min(fls(delta >> 1), 11); /* * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() * will run after this, which uses a different crediting scheme of 1 bit * per every 64 interrupts. In order to let that function do accounting * close to the one in this function, we credit a full 64/64 bit per bit, * and then subtract one to account for the extra one added. */ if (in_hardirq()) this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; else _credit_init_bits(bits); } void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) { static unsigned char last_value; static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; /* Ignore autorepeat and the like. */ if (value == last_value) return; last_value = value; add_timer_randomness(&input_timer_state, (type << 4) ^ code ^ (code >> 4) ^ value); } EXPORT_SYMBOL_GPL(add_input_randomness); #ifdef CONFIG_BLOCK void add_disk_randomness(struct gendisk *disk) { if (!disk || !disk->random) return; /* First major is 1, so we get >= 0x200 here. */ add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); } EXPORT_SYMBOL_GPL(add_disk_randomness); void __cold rand_initialize_disk(struct gendisk *disk) { struct timer_rand_state *state; /* * If kzalloc returns null, we just won't use that entropy * source. */ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); if (state) { state->last_time = INITIAL_JIFFIES; disk->random = state; } } #endif struct entropy_timer_state { unsigned long entropy; struct timer_list timer; atomic_t samples; unsigned int samples_per_bit; }; /* * Each time the timer fires, we expect that we got an unpredictable jump in * the cycle counter. Even if the timer is running on another CPU, the timer * activity will be touching the stack of the CPU that is generating entropy. * * Note that we don't re-arm the timer in the timer itself - we are happy to be * scheduled away, since that just makes the load more complex, but we do not * want the timer to keep ticking unless the entropy loop is running. * * So the re-arming always happens in the entropy loop itself. */ static void __cold entropy_timer(struct timer_list *timer) { struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); unsigned long entropy = random_get_entropy(); mix_pool_bytes(&entropy, sizeof(entropy)); if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0) credit_init_bits(1); } /* * If we have an actual cycle counter, see if we can generate enough entropy * with timing noise. */ static void __cold try_to_generate_entropy(void) { enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 }; u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1]; struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES); unsigned int i, num_different = 0; unsigned long last = random_get_entropy(); int cpu = -1; for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { stack->entropy = random_get_entropy(); if (stack->entropy != last) ++num_different; last = stack->entropy; } stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT) return; atomic_set(&stack->samples, 0); timer_setup_on_stack(&stack->timer, entropy_timer, 0); while (!crng_ready() && !signal_pending(current)) { /* * Check !timer_pending() and then ensure that any previous callback has finished * executing by checking try_to_del_timer_sync(), before queueing the next one. */ if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { struct cpumask timer_cpus; unsigned int num_cpus; /* * Preemption must be disabled here, both to read the current CPU number * and to avoid scheduling a timer on a dead CPU. */ preempt_disable(); /* Only schedule callbacks on timer CPUs that are online. */ cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); num_cpus = cpumask_weight(&timer_cpus); /* In very bizarre case of misconfiguration, fallback to all online. */ if (unlikely(num_cpus == 0)) { timer_cpus = *cpu_online_mask; num_cpus = cpumask_weight(&timer_cpus); } /* Basic CPU round-robin, which avoids the current CPU. */ do { cpu = cpumask_next(cpu, &timer_cpus); if (cpu >= nr_cpu_ids) cpu = cpumask_first(&timer_cpus); } while (cpu == smp_processor_id() && num_cpus > 1); /* Expiring the timer at `jiffies` means it's the next tick. */ stack->timer.expires = jiffies; add_timer_on(&stack->timer, cpu); preempt_enable(); } mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); schedule(); stack->entropy = random_get_entropy(); } mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); del_timer_sync(&stack->timer); destroy_timer_on_stack(&stack->timer); } /********************************************************************** * * Userspace reader/writer interfaces. * * getrandom(2) is the primary modern interface into the RNG and should * be used in preference to anything else. * * Reading from /dev/random has the same functionality as calling * getrandom(2) with flags=0. In earlier versions, however, it had * vastly different semantics and should therefore be avoided, to * prevent backwards compatibility issues. * * Reading from /dev/urandom has the same functionality as calling * getrandom(2) with flags=GRND_INSECURE. Because it does not block * waiting for the RNG to be ready, it should not be used. * * Writing to either /dev/random or /dev/urandom adds entropy to * the input pool but does not credit it. * * Polling on /dev/random indicates when the RNG is initialized, on * the read side, and when it wants new entropy, on the write side. * * Both /dev/random and /dev/urandom have the same set of ioctls for * adding entropy, getting the entropy count, zeroing the count, and * reseeding the crng. * **********************************************************************/ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { struct iov_iter iter; struct iovec iov; int ret; if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; /* * Requesting insecure and blocking randomness at the same time makes * no sense. */ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) return -EINVAL; if (!crng_ready() && !(flags & GRND_INSECURE)) { if (flags & GRND_NONBLOCK) return -EAGAIN; ret = wait_for_random_bytes(); if (unlikely(ret)) return ret; } ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter); if (unlikely(ret)) return ret; return get_random_bytes_user(&iter); } static __poll_t random_poll(struct file *file, poll_table *wait) { poll_wait(file, &crng_init_wait, wait); return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } static ssize_t write_pool_user(struct iov_iter *iter) { u8 block[BLAKE2S_BLOCK_SIZE]; ssize_t ret = 0; size_t copied; if (unlikely(!iov_iter_count(iter))) return 0; for (;;) { copied = copy_from_iter(block, sizeof(block), iter); ret += copied; mix_pool_bytes(block, copied); if (!iov_iter_count(iter) || copied != sizeof(block)) break; BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); if (ret % PAGE_SIZE == 0) { if (signal_pending(current)) break; cond_resched(); } } memzero_explicit(block, sizeof(block)); return ret ? ret : -EFAULT; } static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) { return write_pool_user(iter); } static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { static int maxwarn = 10; /* * Opportunistically attempt to initialize the RNG on platforms that * have fast cycle counters, but don't (for now) require it to succeed. */ if (!crng_ready()) try_to_generate_entropy(); if (!crng_ready()) { if (!ratelimit_disable && maxwarn <= 0) ++urandom_warning.missed; else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", current->comm, iov_iter_count(iter)); } } return get_random_bytes_user(iter); } static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { int ret; if (!crng_ready() && ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || (kiocb->ki_filp->f_flags & O_NONBLOCK))) return -EAGAIN; ret = wait_for_random_bytes(); if (ret != 0) return ret; return get_random_bytes_user(iter); } static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int __user *p = (int __user *)arg; int ent_count; switch (cmd) { case RNDGETENTCNT: /* Inherently racy, no point locking. */ if (put_user(input_pool.init_bits, p)) return -EFAULT; return 0; case RNDADDTOENTCNT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; if (ent_count < 0) return -EINVAL; credit_init_bits(ent_count); return 0; case RNDADDENTROPY: { struct iov_iter iter; struct iovec iov; ssize_t ret; int len; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ent_count, p++)) return -EFAULT; if (ent_count < 0) return -EINVAL; if (get_user(len, p++)) return -EFAULT; ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter); if (unlikely(ret)) return ret; ret = write_pool_user(&iter); if (unlikely(ret < 0)) return ret; /* Since we're crediting, enforce that it was all written into the pool. */ if (unlikely(ret != len)) return -EFAULT; credit_init_bits(ent_count); return 0; } case RNDZAPENTCNT: case RNDCLEARPOOL: /* No longer has any effect. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!crng_ready()) return -ENODATA; crng_reseed(NULL); return 0; default: return -EINVAL; } } static int random_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &fasync); } const struct file_operations random_fops = { .read_iter = random_read_iter, .write_iter = random_write_iter, .poll = random_poll, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; const struct file_operations urandom_fops = { .read_iter = urandom_read_iter, .write_iter = random_write_iter, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; /******************************************************************** * * Sysctl interface. * * These are partly unused legacy knobs with dummy values to not break * userspace and partly still useful things. They are usually accessible * in /proc/sys/kernel/random/ and are as follows: * * - boot_id - a UUID representing the current boot. * * - uuid - a random UUID, different each time the file is read. * * - poolsize - the number of bits of entropy that the input pool can * hold, tied to the POOL_BITS constant. * * - entropy_avail - the number of bits of entropy currently in the * input pool. Always <= poolsize. * * - write_wakeup_threshold - the amount of entropy in the input pool * below which write polls to /dev/random will unblock, requesting * more entropy, tied to the POOL_READY_BITS constant. It is writable * to avoid breaking old userspaces, but writing to it does not * change any behavior of the RNG. * * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. * It is writable to avoid breaking old userspaces, but writing * to it does not change any behavior of the RNG. * ********************************************************************/ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; static int sysctl_poolsize = POOL_BITS; static u8 sysctl_bootid[UUID_SIZE]; /* * This function is used to return both the bootid UUID, and random * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ static int proc_do_uuid(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; char uuid_string[UUID_STRING_LEN + 1]; struct ctl_table fake_table = { .data = uuid_string, .maxlen = UUID_STRING_LEN }; if (write) return -EPERM; uuid = table->data; if (!uuid) { uuid = tmp_uuid; generate_random_uuid(uuid); } else { static DEFINE_SPINLOCK(bootid_spinlock); spin_lock(&bootid_spinlock); if (!uuid[8]) generate_random_uuid(uuid); spin_unlock(&bootid_spinlock); } snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); return proc_dostring(&fake_table, 0, buf, lenp, ppos); } /* The same as proc_dointvec, but writes don't change anything. */ static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } static struct ctl_table random_table[] = { { .procname = "poolsize", .data = &sysctl_poolsize, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "entropy_avail", .data = &input_pool.init_bits, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "write_wakeup_threshold", .data = &sysctl_random_write_wakeup_bits, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_rointvec, }, { .procname = "urandom_min_reseed_secs", .data = &sysctl_random_min_urandom_seed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_rointvec, }, { .procname = "boot_id", .data = &sysctl_bootid, .mode = 0444, .proc_handler = proc_do_uuid, }, { .procname = "uuid", .mode = 0444, .proc_handler = proc_do_uuid, }, { } }; /* * random_init() is called before sysctl_init(), * so we cannot call register_sysctl_init() in random_init() */ static int __init random_sysctls_init(void) { register_sysctl_init("kernel/random", random_table); return 0; } device_initcall(random_sysctls_init); #endif
linux-master
drivers/char/random.c
// SPDX-License-Identifier: GPL-2.0-or-later /* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops * * Copyright (c) 1996-2001 Jonathan A. Buzzard ([email protected]) * * Valuable assistance and patches from: * Tom May <[email protected]> * Rob Napier <[email protected]> * * Fn status port numbers for machine ID's courtesy of * 0xfc02: Scott Eisert <[email protected]> * 0xfc04: Steve VanDevender <[email protected]> * 0xfc08: Garth Berry <[email protected]> * 0xfc0a: Egbert Eich <[email protected]> * 0xfc10: Andrew Lofthouse <[email protected]> * 0xfc11: Spencer Olson <[email protected]> * 0xfc13: Claudius Frankewitz <[email protected]> * 0xfc15: Tom May <[email protected]> * 0xfc17: Dave Konrad <[email protected]> * 0xfc1a: George Betzos <[email protected]> * 0xfc1b: Munemasa Wada <[email protected]> * 0xfc1d: Arthur Liu <[email protected]> * 0xfc5a: Jacques L'helgoualc'h <[email protected]> * 0xfcd1: Mr. Dave Konrad <[email protected]> * * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING * * This code is covered by the GNU GPL and you are free to make any * changes you wish to it under the terms of the license. However the * code has the potential to render your computer and/or someone else's * unusable. Please proceed with care when modifying the code. * * Note: Unfortunately the laptop hardware can close the System Configuration * Interface on it's own accord. It is therefore necessary for *all* * programs using this driver to be aware that *any* SCI call can fail at * *any* time. It is up to any program to be aware of this eventuality * and take appropriate steps. * * The information used to write this driver has been obtained by reverse * engineering the software supplied by Toshiba for their portable computers in * strict accordance with the European Council Directive 92/250/EEC on the legal * protection of computer programs, and it's implementation into English Law by * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233). */ #define TOSH_VERSION "1.11 26/9/2001" #define TOSH_DEBUG 0 #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <asm/io.h> #include <linux/uaccess.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/toshiba.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan Buzzard <[email protected]>"); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); static DEFINE_MUTEX(tosh_mutex); static int tosh_fn; module_param_named(fn, tosh_fn, int, 0); MODULE_PARM_DESC(fn, "User specified Fn key detection port"); static int tosh_id; static int tosh_bios; static int tosh_date; static int tosh_sci; static int tosh_fan; static long tosh_ioctl(struct file *, unsigned int, unsigned long); static const struct file_operations tosh_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tosh_ioctl, .llseek = noop_llseek, }; static struct miscdevice tosh_device = { TOSH_MINOR_DEV, "toshiba", &tosh_fops }; /* * Read the Fn key status */ #ifdef CONFIG_PROC_FS static int tosh_fn_status(void) { unsigned char scan; unsigned long flags; if (tosh_fn!=0) { scan = inb(tosh_fn); } else { local_irq_save(flags); outb(0x8e, 0xe4); scan = inb(0xe5); local_irq_restore(flags); } return (int) scan; } #endif /* * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function */ static int tosh_emulate_fan(SMMRegisters *regs) { unsigned long eax,ecx,flags; unsigned char al; eax = regs->eax & 0xff00; ecx = regs->ecx & 0xffff; /* Portage 610CT */ if (tosh_id==0xfccb) { if (eax==0xfe00) { /* fan status */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = (unsigned int) (al & 0x01); } if ((eax==0xff00) && (ecx==0x0000)) { /* fan off */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); outb(0xbe, 0xe4); outb (al | 0x01, 0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x00; } if ((eax==0xff00) && (ecx==0x0001)) { /* fan on */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); outb(0xbe, 0xe4); outb(al & 0xfe, 0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x01; } } /* Tecra 700CS/CDT */ if (tosh_id==0xfccc) { if (eax==0xfe00) { /* fan status */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = al & 0x01; } if ((eax==0xff00) && (ecx==0x0000)) { /* fan off */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); outw(0xe0 | ((al & 0xfe) << 8), 0xe4); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x00; } if ((eax==0xff00) && (ecx==0x0001)) { /* fan on */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); outw(0xe0 | ((al | 0x01) << 8), 0xe4); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x01; } } return 0; } /* * Put the laptop into System Management Mode */ int tosh_smm(SMMRegisters *regs) { int eax; asm ("# load the values into the registers\n\t" \ "pushl %%eax\n\t" \ "movl 0(%%eax),%%edx\n\t" \ "push %%edx\n\t" \ "movl 4(%%eax),%%ebx\n\t" \ "movl 8(%%eax),%%ecx\n\t" \ "movl 12(%%eax),%%edx\n\t" \ "movl 16(%%eax),%%esi\n\t" \ "movl 20(%%eax),%%edi\n\t" \ "popl %%eax\n\t" \ "# call the System Management mode\n\t" \ "inb $0xb2,%%al\n\t" "# fill out the memory with the values in the registers\n\t" \ "xchgl %%eax,(%%esp)\n\t" "movl %%ebx,4(%%eax)\n\t" \ "movl %%ecx,8(%%eax)\n\t" \ "movl %%edx,12(%%eax)\n\t" \ "movl %%esi,16(%%eax)\n\t" \ "movl %%edi,20(%%eax)\n\t" \ "popl %%edx\n\t" \ "movl %%edx,0(%%eax)\n\t" \ "# setup the return value to the carry flag\n\t" \ "lahf\n\t" \ "shrl $8,%%eax\n\t" \ "andl $1,%%eax\n" \ : "=a" (eax) : "a" (regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); return eax; } EXPORT_SYMBOL(tosh_smm); static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { SMMRegisters regs; SMMRegisters __user *argp = (SMMRegisters __user *)arg; unsigned short ax,bx; int err; if (!argp) return -EINVAL; if (copy_from_user(&regs, argp, sizeof(SMMRegisters))) return -EFAULT; switch (cmd) { case TOSH_SMM: ax = regs.eax & 0xff00; bx = regs.ebx & 0xffff; /* block HCI calls to read/write memory & PCI devices */ if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069)) return -EINVAL; /* do we need to emulate the fan ? */ mutex_lock(&tosh_mutex); if (tosh_fan==1) { if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { err = tosh_emulate_fan(&regs); mutex_unlock(&tosh_mutex); break; } } err = tosh_smm(&regs); mutex_unlock(&tosh_mutex); break; default: return -EINVAL; } if (copy_to_user(argp, &regs, sizeof(SMMRegisters))) return -EFAULT; return (err==0) ? 0:-EINVAL; } /* * Print the information for /proc/toshiba */ #ifdef CONFIG_PROC_FS static int proc_toshiba_show(struct seq_file *m, void *v) { int key; key = tosh_fn_status(); /* Arguments 0) Linux driver version (this will change if format changes) 1) Machine ID 2) SCI version 3) BIOS version (major, minor) 4) BIOS date (in SCI date format) 5) Fn Key status */ seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n", tosh_id, (tosh_sci & 0xff00)>>8, tosh_sci & 0xff, (tosh_bios & 0xff00)>>8, tosh_bios & 0xff, tosh_date, key); return 0; } #endif /* * Determine which port to use for the Fn key status */ static void tosh_set_fn_port(void) { switch (tosh_id) { case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10: case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b: case 0xfc5a: tosh_fn = 0x62; break; case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0: case 0xfce2: tosh_fn = 0x68; break; default: tosh_fn = 0x00; break; } return; } /* * Get the machine identification number of the current model */ static int tosh_get_machine_id(void __iomem *bios) { int id; SMMRegisters regs; unsigned short bx,cx; unsigned long address; id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa)); /* do we have a SCTTable machine identication number on our hands */ if (id==0xfc2f) { /* start by getting a pointer into the BIOS */ regs.eax = 0xc000; regs.ebx = 0x0000; regs.ecx = 0x0000; tosh_smm(&regs); bx = (unsigned short) (regs.ebx & 0xffff); /* At this point in the Toshiba routines under MS Windows the bx register holds 0xe6f5. However my code is producing a different value! For the time being I will just fudge the value. This has been verified on a Satellite Pro 430CDT, Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */ #if TOSH_DEBUG pr_debug("toshiba: debugging ID ebx=0x%04x\n", regs.ebx); #endif bx = 0xe6f5; /* now twiddle with our pointer a bit */ address = bx; cx = readw(bios + address); address = 9+bx+cx; cx = readw(bios + address); address = 0xa+cx; cx = readw(bios + address); /* now construct our machine identification number */ id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8); } return id; } /* * Probe for the presence of a Toshiba laptop * * returns and non-zero if unable to detect the presence of a Toshiba * laptop, otherwise zero and determines the Machine ID, BIOS version and * date, and SCI version. */ static int tosh_probe(void) { int i,major,minor,day,year,month,flag; unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; SMMRegisters regs; void __iomem *bios = ioremap(0xf0000, 0x10000); if (!bios) return -ENOMEM; /* extra sanity check for the string "TOSHIBA" in the BIOS because some machines that are not Toshiba's pass the next test */ for (i=0;i<7;i++) { if (readb(bios+0xe010+i)!=signature[i]) { pr_err("toshiba: not a supported Toshiba laptop\n"); iounmap(bios); return -ENODEV; } } /* call the Toshiba SCI support check routine */ regs.eax = 0xf0f0; regs.ebx = 0x0000; regs.ecx = 0x0000; flag = tosh_smm(&regs); /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */ if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) { pr_err("toshiba: not a supported Toshiba laptop\n"); iounmap(bios); return -ENODEV; } /* if we get this far then we are running on a Toshiba (probably)! */ tosh_sci = regs.edx & 0xffff; /* next get the machine ID of the current laptop */ tosh_id = tosh_get_machine_id(bios); /* get the BIOS version */ major = readb(bios+0xe009)-'0'; minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0'); tosh_bios = (major*0x100)+minor; /* get the BIOS date */ day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0'); month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0'); year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0'); tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6) | ((day & 0x1f)<<1); /* in theory we should check the ports we are going to use for the fn key detection (and the fan on the Portage 610/Tecra700), and then request them to stop other drivers using them. However as the keyboard driver grabs 0x60-0x6f and the pic driver grabs 0xa0-0xbf we can't. We just have to live dangerously and use the ports anyway, oh boy! */ /* do we need to emulate the fan? */ if ((tosh_id==0xfccb) || (tosh_id==0xfccc)) tosh_fan = 1; iounmap(bios); return 0; } static int __init toshiba_init(void) { int retval; /* are we running on a Toshiba laptop */ if (tosh_probe()) return -ENODEV; pr_info("Toshiba System Management Mode driver v" TOSH_VERSION "\n"); /* set the port to use for Fn status if not specified as a parameter */ if (tosh_fn==0x00) tosh_set_fn_port(); /* register the device file */ retval = misc_register(&tosh_device); if (retval < 0) return retval; #ifdef CONFIG_PROC_FS { struct proc_dir_entry *pde; pde = proc_create_single("toshiba", 0, NULL, proc_toshiba_show); if (!pde) { misc_deregister(&tosh_device); return -ENOMEM; } } #endif return 0; } static void __exit toshiba_exit(void) { remove_proc_entry("toshiba", NULL); misc_deregister(&tosh_device); } module_init(toshiba_init); module_exit(toshiba_exit);
linux-master
drivers/char/toshiba.c
// SPDX-License-Identifier: GPL-2.0-only /* linux/drivers/char/pc8736x_gpio.c National Semiconductor PC8736x GPIO driver. Allows a user space process to play with the GPIO pins. Copyright (c) 2005,2006 Jim Cromie <[email protected]> adapted from linux/drivers/char/scx200_gpio.c Copyright (c) 2001,2002 Christer Weinigel <[email protected]>, */ #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/nsc_gpio.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #define DEVNAME "pc8736x_gpio" MODULE_AUTHOR("Jim Cromie <[email protected]>"); MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); static DEFINE_MUTEX(pc8736x_gpio_config_lock); static unsigned pc8736x_gpio_base; static u8 pc8736x_gpio_shadow[4]; #define SIO_BASE1 0x2E /* 1st command-reg to check */ #define SIO_BASE2 0x4E /* alt command-reg to check */ #define SIO_SID 0x20 /* SuperI/O ID Register */ #define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */ #define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */ #define SIO_CF1 0x21 /* chip config, bit0 is chip enable */ #define PC8736X_GPIO_RANGE 16 /* ioaddr range */ #define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */ #define SIO_UNIT_SEL 0x7 /* unit select reg */ #define SIO_UNIT_ACT 0x30 /* unit enable */ #define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */ #define SIO_VLM_UNIT 0x0D #define SIO_TMS_UNIT 0x0E /* config-space addrs to read/write each unit's runtime addr */ #define SIO_BASE_HADDR 0x60 #define SIO_BASE_LADDR 0x61 /* GPIO config-space pin-control addresses */ #define SIO_GPIO_PIN_SELECT 0xF0 #define SIO_GPIO_PIN_CONFIG 0xF1 #define SIO_GPIO_PIN_EVENT 0xF2 static unsigned char superio_cmd = 0; static unsigned char selected_device = 0xFF; /* bogus start val */ /* GPIO port runtime access, functionality */ static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */ /* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */ #define PORT_OUT 0 #define PORT_IN 1 #define PORT_EVT_EN 2 #define PORT_EVT_STST 3 static struct platform_device *pdev; /* use in dev_*() */ static inline void superio_outb(int addr, int val) { outb_p(addr, superio_cmd); outb_p(val, superio_cmd + 1); } static inline int superio_inb(int addr) { outb_p(addr, superio_cmd); return inb_p(superio_cmd + 1); } static int pc8736x_superio_present(void) { int id; /* try the 2 possible values, read a hardware reg to verify */ superio_cmd = SIO_BASE1; id = superio_inb(SIO_SID); if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) return superio_cmd; superio_cmd = SIO_BASE2; id = superio_inb(SIO_SID); if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) return superio_cmd; return 0; } static void device_select(unsigned devldn) { superio_outb(SIO_UNIT_SEL, devldn); selected_device = devldn; } static void select_pin(unsigned iminor) { /* select GPIO port/pin from device minor number */ device_select(SIO_GPIO_UNIT); superio_outb(SIO_GPIO_PIN_SELECT, ((iminor << 1) & 0xF0) | (iminor & 0x7)); } static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits, u32 func_slct) { u32 config, new_config; mutex_lock(&pc8736x_gpio_config_lock); device_select(SIO_GPIO_UNIT); select_pin(index); /* read current config value */ config = superio_inb(func_slct); /* set new config */ new_config = (config & mask) | bits; superio_outb(func_slct, new_config); mutex_unlock(&pc8736x_gpio_config_lock); return config; } static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits) { return pc8736x_gpio_configure_fn(index, mask, bits, SIO_GPIO_PIN_CONFIG); } static int pc8736x_gpio_get(unsigned minor) { int port, bit, val; port = minor >> 3; bit = minor & 7; val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); val >>= bit; val &= 1; dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n", minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit, val); return val; } static void pc8736x_gpio_set(unsigned minor, int val) { int port, bit, curval; minor &= 0x1f; port = minor >> 3; bit = minor & 7; curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n", pc8736x_gpio_base + port_offset[port] + PORT_OUT, curval, bit, (curval & ~(1 << bit)), val, (val << bit)); val = (curval & ~(1 << bit)) | (val << bit); dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)" " %2x -> %2x\n", minor, port, bit, curval, val); outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT); curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val); pc8736x_gpio_shadow[port] = val; } static int pc8736x_gpio_current(unsigned minor) { int port, bit; minor &= 0x1f; port = minor >> 3; bit = minor & 7; return ((pc8736x_gpio_shadow[port] >> bit) & 0x01); } static void pc8736x_gpio_change(unsigned index) { pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); } static struct nsc_gpio_ops pc8736x_gpio_ops = { .owner = THIS_MODULE, .gpio_config = pc8736x_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = pc8736x_gpio_get, .gpio_set = pc8736x_gpio_set, .gpio_change = pc8736x_gpio_change, .gpio_current = pc8736x_gpio_current }; static int pc8736x_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); file->private_data = &pc8736x_gpio_ops; dev_dbg(&pdev->dev, "open %d\n", m); if (m >= PC8736X_GPIO_CT) return -EINVAL; return nonseekable_open(inode, file); } static const struct file_operations pc8736x_gpio_fileops = { .owner = THIS_MODULE, .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) { int port; /* read the current values driven on the GPIO signals */ for (port = 0; port < 4; ++port) pc8736x_gpio_shadow[port] = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); } static struct cdev pc8736x_gpio_cdev; static int __init pc8736x_gpio_init(void) { int rc; dev_t devid; pdev = platform_device_alloc(DEVNAME, 0); if (!pdev) return -ENOMEM; rc = platform_device_add(pdev); if (rc) { rc = -ENODEV; goto undo_platform_dev_alloc; } dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n"); if (!pc8736x_superio_present()) { rc = -ENODEV; dev_err(&pdev->dev, "no device found\n"); goto undo_platform_dev_add; } pc8736x_gpio_ops.dev = &pdev->dev; /* Verify that chip and it's GPIO unit are both enabled. My BIOS does this, so I take minimum action here */ rc = superio_inb(SIO_CF1); if (!(rc & 0x01)) { rc = -ENODEV; dev_err(&pdev->dev, "device not enabled\n"); goto undo_platform_dev_add; } device_select(SIO_GPIO_UNIT); if (!superio_inb(SIO_UNIT_ACT)) { rc = -ENODEV; dev_err(&pdev->dev, "GPIO unit not enabled\n"); goto undo_platform_dev_add; } /* read the GPIO unit base addr that chip responds to */ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8 | superio_inb(SIO_BASE_LADDR)); if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) { rc = -ENODEV; dev_err(&pdev->dev, "GPIO ioport %x busy\n", pc8736x_gpio_base); goto undo_platform_dev_add; } dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base); if (major) { devid = MKDEV(major, 0); rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME); } else { rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME); major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc); goto undo_request_region; } if (!major) { major = rc; dev_dbg(&pdev->dev, "got dynamic major %d\n", major); } pc8736x_init_shadow(); /* ignore minor errs, and succeed */ cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops); cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT); return 0; undo_request_region: release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); undo_platform_dev_add: platform_device_del(pdev); undo_platform_dev_alloc: platform_device_put(pdev); return rc; } static void __exit pc8736x_gpio_cleanup(void) { dev_dbg(&pdev->dev, "cleanup\n"); cdev_del(&pc8736x_gpio_cdev); unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT); release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); platform_device_unregister(pdev); } module_init(pc8736x_gpio_init); module_exit(pc8736x_gpio_cleanup);
linux-master
drivers/char/pc8736x_gpio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. * Copyright (C) 2009, 2010, 2011 Amit Shah <[email protected]> */ #include <linux/cdev.h> #include <linux/debugfs.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/err.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/splice.h> #include <linux/pagemap.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/list.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_console.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include "../tty/hvc/hvc_console.h" #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) #define VIRTCONS_MAX_PORTS 0x8000 /* * This is a global struct for storing common data for all the devices * this driver handles. * * Mainly, it has a linked list for all the consoles in one place so * that callbacks from hvc for get_chars(), put_chars() work properly * across multiple devices and multiple ports per device. */ struct ports_driver_data { /* Used for exporting per-port information to debugfs */ struct dentry *debugfs_dir; /* List of all the devices we're handling */ struct list_head portdevs; /* All the console devices handled by this driver */ struct list_head consoles; }; static struct ports_driver_data pdrvdata; static const struct class port_class = { .name = "virtio-ports", }; static DEFINE_SPINLOCK(pdrvdata_lock); static DECLARE_COMPLETION(early_console_added); /* This struct holds information that's relevant only for console ports */ struct console { /* We'll place all consoles in a list in the pdrvdata struct */ struct list_head list; /* The hvc device associated with this console port */ struct hvc_struct *hvc; /* The size of the console */ struct winsize ws; /* * This number identifies the number that we used to register * with hvc in hvc_instantiate() and hvc_alloc(); this is the * number passed on by the hvc callbacks to us to * differentiate between the other console ports handled by * this driver */ u32 vtermno; }; static DEFINE_IDA(vtermno_ida); struct port_buffer { char *buf; /* size of the buffer in *buf above */ size_t size; /* used length of the buffer */ size_t len; /* offset in the buf from which to consume data */ size_t offset; /* DMA address of buffer */ dma_addr_t dma; /* Device we got DMA memory from */ struct device *dev; /* List of pending dma buffers to free */ struct list_head list; /* If sgpages == 0 then buf is used */ unsigned int sgpages; /* sg is used if spages > 0. sg must be the last in is struct */ struct scatterlist sg[]; }; /* * This is a per-device struct that stores data common to all the * ports for that device (vdev->priv). */ struct ports_device { /* Next portdev in the list, head is in the pdrvdata struct */ struct list_head list; /* * Workqueue handlers where we process deferred work after * notification */ struct work_struct control_work; struct work_struct config_work; struct list_head ports; /* To protect the list of ports */ spinlock_t ports_lock; /* To protect the vq operations for the control channel */ spinlock_t c_ivq_lock; spinlock_t c_ovq_lock; /* max. number of ports this device can hold */ u32 max_nr_ports; /* The virtio device we're associated with */ struct virtio_device *vdev; /* * A couple of virtqueues for the control channel: one for * guest->host transfers, one for host->guest transfers */ struct virtqueue *c_ivq, *c_ovq; /* * A control packet buffer for guest->host requests, protected * by c_ovq_lock. */ struct virtio_console_control cpkt; /* Array of per-port IO virtqueues */ struct virtqueue **in_vqs, **out_vqs; /* Major number for this device. Ports will be created as minors. */ int chr_major; }; struct port_stats { unsigned long bytes_sent, bytes_received, bytes_discarded; }; /* This struct holds the per-port data */ struct port { /* Next port in the list, head is in the ports_device */ struct list_head list; /* Pointer to the parent virtio_console device */ struct ports_device *portdev; /* The current buffer from which data has to be fed to readers */ struct port_buffer *inbuf; /* * To protect the operations on the in_vq associated with this * port. Has to be a spinlock because it can be called from * interrupt context (get_char()). */ spinlock_t inbuf_lock; /* Protect the operations on the out_vq. */ spinlock_t outvq_lock; /* The IO vqs for this port */ struct virtqueue *in_vq, *out_vq; /* File in the debugfs directory that exposes this port's information */ struct dentry *debugfs_file; /* * Keep count of the bytes sent, received and discarded for * this port for accounting and debugging purposes. These * counts are not reset across port open / close events. */ struct port_stats stats; /* * The entries in this struct will be valid if this port is * hooked up to an hvc console */ struct console cons; /* Each port associates with a separate char device */ struct cdev *cdev; struct device *dev; /* Reference-counting to handle port hot-unplugs and file operations */ struct kref kref; /* A waitqueue for poll() or blocking read operations */ wait_queue_head_t waitqueue; /* The 'name' of the port that we expose via sysfs properties */ char *name; /* We can notify apps of host connect / disconnect events via SIGIO */ struct fasync_struct *async_queue; /* The 'id' to identify the port with the Host */ u32 id; bool outvq_full; /* Is the host device open */ bool host_connected; /* We should allow only one process to open a port */ bool guest_connected; }; /* This is the very early arch-specified put chars function. */ static int (*early_put_chars)(u32, const char *, int); static struct port *find_port_by_vtermno(u32 vtermno) { struct port *port; struct console *cons; unsigned long flags; spin_lock_irqsave(&pdrvdata_lock, flags); list_for_each_entry(cons, &pdrvdata.consoles, list) { if (cons->vtermno == vtermno) { port = container_of(cons, struct port, cons); goto out; } } port = NULL; out: spin_unlock_irqrestore(&pdrvdata_lock, flags); return port; } static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, dev_t dev) { struct port *port; unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); list_for_each_entry(port, &portdev->ports, list) { if (port->cdev->dev == dev) { kref_get(&port->kref); goto out; } } port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); return port; } static struct port *find_port_by_devt(dev_t dev) { struct ports_device *portdev; struct port *port; unsigned long flags; spin_lock_irqsave(&pdrvdata_lock, flags); list_for_each_entry(portdev, &pdrvdata.portdevs, list) { port = find_port_by_devt_in_portdev(portdev, dev); if (port) goto out; } port = NULL; out: spin_unlock_irqrestore(&pdrvdata_lock, flags); return port; } static struct port *find_port_by_id(struct ports_device *portdev, u32 id) { struct port *port; unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); list_for_each_entry(port, &portdev->ports, list) if (port->id == id) goto out; port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); return port; } static struct port *find_port_by_vq(struct ports_device *portdev, struct virtqueue *vq) { struct port *port; unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); list_for_each_entry(port, &portdev->ports, list) if (port->in_vq == vq || port->out_vq == vq) goto out; port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); return port; } static bool is_console_port(struct port *port) { if (port->cons.hvc) return true; return false; } static bool is_rproc_serial(const struct virtio_device *vdev) { return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; } static inline bool use_multiport(struct ports_device *portdev) { /* * This condition can be true when put_chars is called from * early_init */ if (!portdev->vdev) return false; return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT); } static DEFINE_SPINLOCK(dma_bufs_lock); static LIST_HEAD(pending_free_dma_bufs); static void free_buf(struct port_buffer *buf, bool can_sleep) { unsigned int i; for (i = 0; i < buf->sgpages; i++) { struct page *page = sg_page(&buf->sg[i]); if (!page) break; put_page(page); } if (!buf->dev) { kfree(buf->buf); } else if (is_rproc_enabled) { unsigned long flags; /* dma_free_coherent requires interrupts to be enabled. */ if (!can_sleep) { /* queue up dma-buffers to be freed later */ spin_lock_irqsave(&dma_bufs_lock, flags); list_add_tail(&buf->list, &pending_free_dma_bufs); spin_unlock_irqrestore(&dma_bufs_lock, flags); return; } dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); /* Release device refcnt and allow it to be freed */ put_device(buf->dev); } kfree(buf); } static void reclaim_dma_bufs(void) { unsigned long flags; struct port_buffer *buf, *tmp; LIST_HEAD(tmp_list); if (list_empty(&pending_free_dma_bufs)) return; /* Create a copy of the pending_free_dma_bufs while holding the lock */ spin_lock_irqsave(&dma_bufs_lock, flags); list_cut_position(&tmp_list, &pending_free_dma_bufs, pending_free_dma_bufs.prev); spin_unlock_irqrestore(&dma_bufs_lock, flags); /* Release the dma buffers, without irqs enabled */ list_for_each_entry_safe(buf, tmp, &tmp_list, list) { list_del(&buf->list); free_buf(buf, true); } } static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, int pages) { struct port_buffer *buf; reclaim_dma_bufs(); /* * Allocate buffer and the sg list. The sg list array is allocated * directly after the port_buffer struct. */ buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL); if (!buf) goto fail; buf->sgpages = pages; if (pages > 0) { buf->dev = NULL; buf->buf = NULL; return buf; } if (is_rproc_serial(vdev)) { /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is * associated with the parent device: * virtioY => remoteprocX#vdevYbuffer. */ buf->dev = vdev->dev.parent; if (!buf->dev) goto free_buf; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, GFP_KERNEL); } else { buf->dev = NULL; buf->buf = kmalloc(buf_size, GFP_KERNEL); } if (!buf->buf) goto free_buf; buf->len = 0; buf->offset = 0; buf->size = buf_size; return buf; free_buf: kfree(buf); fail: return NULL; } /* Callers should take appropriate locks */ static struct port_buffer *get_inbuf(struct port *port) { struct port_buffer *buf; unsigned int len; if (port->inbuf) return port->inbuf; buf = virtqueue_get_buf(port->in_vq, &len); if (buf) { buf->len = min_t(size_t, len, buf->size); buf->offset = 0; port->stats.bytes_received += len; } return buf; } /* * Create a scatter-gather list representing our input buffer and put * it in the queue. * * Callers should take appropriate locks. */ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) { struct scatterlist sg[1]; int ret; sg_init_one(sg, buf->buf, buf->size); ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC); virtqueue_kick(vq); if (!ret) ret = vq->num_free; return ret; } /* Discard any unread data this port has. Callers lockers. */ static void discard_port_data(struct port *port) { struct port_buffer *buf; unsigned int err; if (!port->portdev) { /* Device has been unplugged. vqs are already gone. */ return; } buf = get_inbuf(port); err = 0; while (buf) { port->stats.bytes_discarded += buf->len - buf->offset; if (add_inbuf(port->in_vq, buf) < 0) { err++; free_buf(buf, false); } port->inbuf = NULL; buf = get_inbuf(port); } if (err) dev_warn(port->dev, "Errors adding %d buffers back to vq\n", err); } static bool port_has_data(struct port *port) { unsigned long flags; bool ret; ret = false; spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = get_inbuf(port); if (port->inbuf) ret = true; spin_unlock_irqrestore(&port->inbuf_lock, flags); return ret; } static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, unsigned int event, unsigned int value) { struct scatterlist sg[1]; struct virtqueue *vq; unsigned int len; if (!use_multiport(portdev)) return 0; vq = portdev->c_ovq; spin_lock(&portdev->c_ovq_lock); portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event); portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value); sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control)); if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) { virtqueue_kick(vq); while (!virtqueue_get_buf(vq, &len) && !virtqueue_is_broken(vq)) cpu_relax(); } spin_unlock(&portdev->c_ovq_lock); return 0; } static ssize_t send_control_msg(struct port *port, unsigned int event, unsigned int value) { /* Did the port get unplugged before userspace closed it? */ if (port->portdev) return __send_control_msg(port->portdev, port->id, event, value); return 0; } /* Callers must take the port->outvq_lock */ static void reclaim_consumed_buffers(struct port *port) { struct port_buffer *buf; unsigned int len; if (!port->portdev) { /* Device has been unplugged. vqs are already gone. */ return; } while ((buf = virtqueue_get_buf(port->out_vq, &len))) { free_buf(buf, false); port->outvq_full = false; } } static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, int nents, size_t in_count, void *data, bool nonblock) { struct virtqueue *out_vq; int err; unsigned long flags; unsigned int len; out_vq = port->out_vq; spin_lock_irqsave(&port->outvq_lock, flags); reclaim_consumed_buffers(port); err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC); /* Tell Host to go! */ virtqueue_kick(out_vq); if (err) { in_count = 0; goto done; } if (out_vq->num_free == 0) port->outvq_full = true; if (nonblock) goto done; /* * Wait till the host acknowledges it pushed out the data we * sent. This is done for data from the hvc_console; the tty * operations are performed with spinlocks held so we can't * sleep here. An alternative would be to copy the data to a * buffer and relax the spinning requirement. The downside is * we need to kmalloc a GFP_ATOMIC buffer each time the * console driver writes something out. */ while (!virtqueue_get_buf(out_vq, &len) && !virtqueue_is_broken(out_vq)) cpu_relax(); done: spin_unlock_irqrestore(&port->outvq_lock, flags); port->stats.bytes_sent += in_count; /* * We're expected to return the amount of data we wrote -- all * of it */ return in_count; } /* * Give out the data that's requested from the buffer that we have * queued up. */ static ssize_t fill_readbuf(struct port *port, char __user *out_buf, size_t out_count, bool to_user) { struct port_buffer *buf; unsigned long flags; if (!out_count || !port_has_data(port)) return 0; buf = port->inbuf; out_count = min(out_count, buf->len - buf->offset); if (to_user) { ssize_t ret; ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); if (ret) return -EFAULT; } else { memcpy((__force char *)out_buf, buf->buf + buf->offset, out_count); } buf->offset += out_count; if (buf->offset == buf->len) { /* * We're done using all the data in this buffer. * Re-queue so that the Host can send us more data. */ spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = NULL; if (add_inbuf(port->in_vq, buf) < 0) dev_warn(port->dev, "failed add_buf\n"); spin_unlock_irqrestore(&port->inbuf_lock, flags); } /* Return the number of bytes actually copied */ return out_count; } /* The condition that must be true for polling to end */ static bool will_read_block(struct port *port) { if (!port->guest_connected) { /* Port got hot-unplugged. Let's exit. */ return false; } return !port_has_data(port) && port->host_connected; } static bool will_write_block(struct port *port) { bool ret; if (!port->guest_connected) { /* Port got hot-unplugged. Let's exit. */ return false; } if (!port->host_connected) return true; spin_lock_irq(&port->outvq_lock); /* * Check if the Host has consumed any buffers since we last * sent data (this is only applicable for nonblocking ports). */ reclaim_consumed_buffers(port); ret = port->outvq_full; spin_unlock_irq(&port->outvq_lock); return ret; } static ssize_t port_fops_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct port *port; ssize_t ret; port = filp->private_data; /* Port is hot-unplugged. */ if (!port->guest_connected) return -ENODEV; if (!port_has_data(port)) { /* * If nothing's connected on the host just return 0 in * case of list_empty; this tells the userspace app * that there's no connection */ if (!port->host_connected) return 0; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_freezable(port->waitqueue, !will_read_block(port)); if (ret < 0) return ret; } /* Port got hot-unplugged while we were waiting above. */ if (!port->guest_connected) return -ENODEV; /* * We could've received a disconnection message while we were * waiting for more data. * * This check is not clubbed in the if() statement above as we * might receive some data as well as the host could get * disconnected after we got woken up from our wait. So we * really want to give off whatever data we have and only then * check for host_connected. */ if (!port_has_data(port) && !port->host_connected) return 0; return fill_readbuf(port, ubuf, count, true); } static int wait_port_writable(struct port *port, bool nonblock) { int ret; if (will_write_block(port)) { if (nonblock) return -EAGAIN; ret = wait_event_freezable(port->waitqueue, !will_write_block(port)); if (ret < 0) return ret; } /* Port got hot-unplugged. */ if (!port->guest_connected) return -ENODEV; return 0; } static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { struct port *port; struct port_buffer *buf; ssize_t ret; bool nonblock; struct scatterlist sg[1]; /* Userspace could be out to fool us */ if (!count) return 0; port = filp->private_data; nonblock = filp->f_flags & O_NONBLOCK; ret = wait_port_writable(port, nonblock); if (ret < 0) return ret; count = min((size_t)(32 * 1024), count); buf = alloc_buf(port->portdev->vdev, count, 0); if (!buf) return -ENOMEM; ret = copy_from_user(buf->buf, ubuf, count); if (ret) { ret = -EFAULT; goto free_buf; } /* * We now ask send_buf() to not spin for generic ports -- we * can re-use the same code path that non-blocking file * descriptors take for blocking file descriptors since the * wait is already done and we're certain the write will go * through to the host. */ nonblock = true; sg_init_one(sg, buf->buf, count); ret = __send_to_port(port, sg, 1, count, buf, nonblock); if (nonblock && ret > 0) goto out; free_buf: free_buf(buf, true); out: return ret; } struct sg_list { unsigned int n; unsigned int size; size_t len; struct scatterlist *sg; }; static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct sg_list *sgl = sd->u.data; unsigned int offset, len; if (sgl->n == sgl->size) return 0; /* Try lock this page */ if (pipe_buf_try_steal(pipe, buf)) { /* Get reference and unlock page for moving */ get_page(buf->page); unlock_page(buf->page); len = min(buf->len, sd->len); sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); } else { /* Failback to copying a page */ struct page *page = alloc_page(GFP_KERNEL); char *src; if (!page) return -ENOMEM; offset = sd->pos & ~PAGE_MASK; len = sd->len; if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; src = kmap_atomic(buf->page); memcpy(page_address(page) + offset, src + buf->offset, len); kunmap_atomic(src); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); } sgl->n++; sgl->len += len; return len; } /* Faster zero-copy write by splicing */ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, struct file *filp, loff_t *ppos, size_t len, unsigned int flags) { struct port *port = filp->private_data; struct sg_list sgl; ssize_t ret; struct port_buffer *buf; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.data = &sgl, }; unsigned int occupancy; /* * Rproc_serial does not yet support splice. To support splice * pipe_to_sg() must allocate dma-buffers and copy content from * regular pages to dma pages. And alloc_buf and free_buf must * support allocating and freeing such a list of dma-buffers. */ if (is_rproc_serial(port->out_vq->vdev)) return -EINVAL; pipe_lock(pipe); ret = 0; if (pipe_empty(pipe->head, pipe->tail)) goto error_out; ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) goto error_out; occupancy = pipe_occupancy(pipe->head, pipe->tail); buf = alloc_buf(port->portdev->vdev, 0, occupancy); if (!buf) { ret = -ENOMEM; goto error_out; } sgl.n = 0; sgl.len = 0; sgl.size = occupancy; sgl.sg = buf->sg; sg_init_table(sgl.sg, sgl.size); ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); pipe_unlock(pipe); if (likely(ret > 0)) ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); if (unlikely(ret <= 0)) free_buf(buf, true); return ret; error_out: pipe_unlock(pipe); return ret; } static __poll_t port_fops_poll(struct file *filp, poll_table *wait) { struct port *port; __poll_t ret; port = filp->private_data; poll_wait(filp, &port->waitqueue, wait); if (!port->guest_connected) { /* Port got unplugged */ return EPOLLHUP; } ret = 0; if (!will_read_block(port)) ret |= EPOLLIN | EPOLLRDNORM; if (!will_write_block(port)) ret |= EPOLLOUT; if (!port->host_connected) ret |= EPOLLHUP; return ret; } static void remove_port(struct kref *kref); static int port_fops_release(struct inode *inode, struct file *filp) { struct port *port; port = filp->private_data; /* Notify host of port being closed */ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); spin_lock_irq(&port->inbuf_lock); port->guest_connected = false; discard_port_data(port); spin_unlock_irq(&port->inbuf_lock); spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); reclaim_dma_bufs(); /* * Locks aren't necessary here as a port can't be opened after * unplug, and if a port isn't unplugged, a kref would already * exist for the port. Plus, taking ports_lock here would * create a dependency on other locks taken by functions * inside remove_port if we're the last holder of the port, * creating many problems. */ kref_put(&port->kref, remove_port); return 0; } static int port_fops_open(struct inode *inode, struct file *filp) { struct cdev *cdev = inode->i_cdev; struct port *port; int ret; /* We get the port with a kref here */ port = find_port_by_devt(cdev->dev); if (!port) { /* Port was unplugged before we could proceed */ return -ENXIO; } filp->private_data = port; /* * Don't allow opening of console port devices -- that's done * via /dev/hvc */ if (is_console_port(port)) { ret = -ENXIO; goto out; } /* Allow only one process to open a particular port at a time */ spin_lock_irq(&port->inbuf_lock); if (port->guest_connected) { spin_unlock_irq(&port->inbuf_lock); ret = -EBUSY; goto out; } port->guest_connected = true; spin_unlock_irq(&port->inbuf_lock); spin_lock_irq(&port->outvq_lock); /* * There might be a chance that we missed reclaiming a few * buffers in the window of the port getting previously closed * and opening now. */ reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); nonseekable_open(inode, filp); /* Notify host of port being opened */ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); return 0; out: kref_put(&port->kref, remove_port); return ret; } static int port_fops_fasync(int fd, struct file *filp, int mode) { struct port *port; port = filp->private_data; return fasync_helper(fd, filp, mode, &port->async_queue); } /* * The file operations that we support: programs in the guest can open * a console device, read from it, write to it, poll for data and * close it. The devices are at * /dev/vport<device number>p<port number> */ static const struct file_operations port_fops = { .owner = THIS_MODULE, .open = port_fops_open, .read = port_fops_read, .write = port_fops_write, .splice_write = port_fops_splice_write, .poll = port_fops_poll, .release = port_fops_release, .fasync = port_fops_fasync, .llseek = no_llseek, }; /* * The put_chars() callback is pretty straightforward. * * We turn the characters into a scatter-gather list, add it to the * output queue and then kick the Host. Then we sit here waiting for * it to finish: inefficient in theory, but in practice * implementations will do it immediately. */ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; struct scatterlist sg[1]; void *data; int ret; if (unlikely(early_put_chars)) return early_put_chars(vtermno, buf, count); port = find_port_by_vtermno(vtermno); if (!port) return -EPIPE; data = kmemdup(buf, count, GFP_ATOMIC); if (!data) return -ENOMEM; sg_init_one(sg, data, count); ret = __send_to_port(port, sg, 1, count, data, false); kfree(data); return ret; } /* * get_chars() is the callback from the hvc_console infrastructure * when an interrupt is received. * * We call out to fill_readbuf that gets us the required data from the * buffers that are queued up. */ static int get_chars(u32 vtermno, char *buf, int count) { struct port *port; /* If we've not set up the port yet, we have no input to give. */ if (unlikely(early_put_chars)) return 0; port = find_port_by_vtermno(vtermno); if (!port) return -EPIPE; /* If we don't have an input queue yet, we can't get input. */ BUG_ON(!port->in_vq); return fill_readbuf(port, (__force char __user *)buf, count, false); } static void resize_console(struct port *port) { struct virtio_device *vdev; /* The port could have been hot-unplugged */ if (!port || !is_console_port(port)) return; vdev = port->portdev->vdev; /* Don't test F_SIZE at all if we're rproc: not a valid feature! */ if (!is_rproc_serial(vdev) && virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) hvc_resize(port->cons.hvc, port->cons.ws); } /* We set the configuration at this point, since we now have a tty */ static int notifier_add_vio(struct hvc_struct *hp, int data) { struct port *port; port = find_port_by_vtermno(hp->vtermno); if (!port) return -EINVAL; hp->irq_requested = 1; resize_console(port); return 0; } static void notifier_del_vio(struct hvc_struct *hp, int data) { hp->irq_requested = 0; } /* The operations for console ports. */ static const struct hv_ops hv_ops = { .get_chars = get_chars, .put_chars = put_chars, .notifier_add = notifier_add_vio, .notifier_del = notifier_del_vio, .notifier_hangup = notifier_del_vio, }; /* * Console drivers are initialized very early so boot messages can go * out, so we do things slightly differently from the generic virtio * initialization of the net and block drivers. * * At this stage, the console is output-only. It's too early to set * up a virtqueue, so we let the drivers do some boutique early-output * thing. */ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) { early_put_chars = put_chars; return hvc_instantiate(0, 0, &hv_ops); } static int init_port_console(struct port *port) { int ret; /* * The Host's telling us this port is a console port. Hook it * up with an hvc console. * * To set up and manage our virtual console, we call * hvc_alloc(). * * The first argument of hvc_alloc() is the virtual console * number. The second argument is the parameter for the * notification mechanism (like irq number). We currently * leave this as zero, virtqueues have implicit notifications. * * The third argument is a "struct hv_ops" containing the * put_chars() get_chars(), notifier_add() and notifier_del() * pointers. The final argument is the output buffer size: we * can do any size, so we put PAGE_SIZE here. */ ret = ida_alloc_min(&vtermno_ida, 1, GFP_KERNEL); if (ret < 0) return ret; port->cons.vtermno = ret; port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); if (IS_ERR(port->cons.hvc)) { ret = PTR_ERR(port->cons.hvc); dev_err(port->dev, "error %d allocating hvc for port\n", ret); port->cons.hvc = NULL; ida_free(&vtermno_ida, port->cons.vtermno); return ret; } spin_lock_irq(&pdrvdata_lock); list_add_tail(&port->cons.list, &pdrvdata.consoles); spin_unlock_irq(&pdrvdata_lock); port->guest_connected = true; /* * Start using the new console output if this is the first * console to come up. */ if (early_put_chars) early_put_chars = NULL; /* Notify host of port being opened */ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); return 0; } static ssize_t show_port_name(struct device *dev, struct device_attribute *attr, char *buffer) { struct port *port; port = dev_get_drvdata(dev); return sprintf(buffer, "%s\n", port->name); } static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); static struct attribute *port_sysfs_entries[] = { &dev_attr_name.attr, NULL }; static const struct attribute_group port_attribute_group = { .name = NULL, /* put in device directory */ .attrs = port_sysfs_entries, }; static int port_debugfs_show(struct seq_file *s, void *data) { struct port *port = s->private; seq_printf(s, "name: %s\n", port->name ? port->name : ""); seq_printf(s, "guest_connected: %d\n", port->guest_connected); seq_printf(s, "host_connected: %d\n", port->host_connected); seq_printf(s, "outvq_full: %d\n", port->outvq_full); seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent); seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received); seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded); seq_printf(s, "is_console: %s\n", is_console_port(port) ? "yes" : "no"); seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno); return 0; } DEFINE_SHOW_ATTRIBUTE(port_debugfs); static void set_console_size(struct port *port, u16 rows, u16 cols) { if (!port || !is_console_port(port)) return; port->cons.ws.ws_row = rows; port->cons.ws.ws_col = cols; } static int fill_queue(struct virtqueue *vq, spinlock_t *lock) { struct port_buffer *buf; int nr_added_bufs; int ret; nr_added_bufs = 0; do { buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) return -ENOMEM; spin_lock_irq(lock); ret = add_inbuf(vq, buf); if (ret < 0) { spin_unlock_irq(lock); free_buf(buf, true); return ret; } nr_added_bufs++; spin_unlock_irq(lock); } while (ret > 0); return nr_added_bufs; } static void send_sigio_to_port(struct port *port) { if (port->async_queue && port->guest_connected) kill_fasync(&port->async_queue, SIGIO, POLL_OUT); } static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; dev_t devt; int err; port = kmalloc(sizeof(*port), GFP_KERNEL); if (!port) { err = -ENOMEM; goto fail; } kref_init(&port->kref); port->portdev = portdev; port->id = id; port->name = NULL; port->inbuf = NULL; port->cons.hvc = NULL; port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; port->cons.vtermno = 0; port->host_connected = port->guest_connected = false; port->stats = (struct port_stats) { 0 }; port->outvq_full = false; port->in_vq = portdev->in_vqs[port->id]; port->out_vq = portdev->out_vqs[port->id]; port->cdev = cdev_alloc(); if (!port->cdev) { dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); err = -ENOMEM; goto free_port; } port->cdev->ops = &port_fops; devt = MKDEV(portdev->chr_major, id); err = cdev_add(port->cdev, devt, 1); if (err < 0) { dev_err(&port->portdev->vdev->dev, "Error %d adding cdev for port %u\n", err, id); goto free_cdev; } port->dev = device_create(&port_class, &port->portdev->vdev->dev, devt, port, "vport%up%u", port->portdev->vdev->index, id); if (IS_ERR(port->dev)) { err = PTR_ERR(port->dev); dev_err(&port->portdev->vdev->dev, "Error %d creating device for port %u\n", err, id); goto free_cdev; } spin_lock_init(&port->inbuf_lock); spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); /* We can safely ignore ENOSPC because it means * the queue already has buffers. Buffers are removed * only by virtcons_remove(), not by unplug_port() */ err = fill_queue(port->in_vq, &port->inbuf_lock); if (err < 0 && err != -ENOSPC) { dev_err(port->dev, "Error allocating inbufs\n"); goto free_device; } if (is_rproc_serial(port->portdev->vdev)) /* * For rproc_serial assume remote processor is connected. * rproc_serial does not want the console port, only * the generic port implementation. */ port->host_connected = true; else if (!use_multiport(port->portdev)) { /* * If we're not using multiport support, * this has to be a console port. */ err = init_port_console(port); if (err) goto free_inbufs; } spin_lock_irq(&portdev->ports_lock); list_add_tail(&port->list, &port->portdev->ports); spin_unlock_irq(&portdev->ports_lock); /* * Tell the Host we're set so that it can send us various * configuration parameters for this port (eg, port name, * caching, whether this is a console port, etc.) */ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); /* * Finally, create the debugfs file that we can use to * inspect a port's state at any time */ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", port->portdev->vdev->index, id); port->debugfs_file = debugfs_create_file(debugfs_name, 0444, pdrvdata.debugfs_dir, port, &port_debugfs_fops); return 0; free_inbufs: free_device: device_destroy(&port_class, port->dev->devt); free_cdev: cdev_del(port->cdev); free_port: kfree(port); fail: /* The host might want to notify management sw about port add failure */ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); return err; } /* No users remain, remove all port-specific data. */ static void remove_port(struct kref *kref) { struct port *port; port = container_of(kref, struct port, kref); kfree(port); } static void remove_port_data(struct port *port) { spin_lock_irq(&port->inbuf_lock); /* Remove unused data this port might have received. */ discard_port_data(port); spin_unlock_irq(&port->inbuf_lock); spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); } /* * Port got unplugged. Remove port from portdev's list and drop the * kref reference. If no userspace has this port opened, it will * result in immediate removal the port. */ static void unplug_port(struct port *port) { spin_lock_irq(&port->portdev->ports_lock); list_del(&port->list); spin_unlock_irq(&port->portdev->ports_lock); spin_lock_irq(&port->inbuf_lock); if (port->guest_connected) { /* Let the app know the port is going down. */ send_sigio_to_port(port); /* Do this after sigio is actually sent */ port->guest_connected = false; port->host_connected = false; wake_up_interruptible(&port->waitqueue); } spin_unlock_irq(&port->inbuf_lock); if (is_console_port(port)) { spin_lock_irq(&pdrvdata_lock); list_del(&port->cons.list); spin_unlock_irq(&pdrvdata_lock); hvc_remove(port->cons.hvc); ida_free(&vtermno_ida, port->cons.vtermno); } remove_port_data(port); /* * We should just assume the device itself has gone off -- * else a close on an open port later will try to send out a * control message. */ port->portdev = NULL; sysfs_remove_group(&port->dev->kobj, &port_attribute_group); device_destroy(&port_class, port->dev->devt); cdev_del(port->cdev); debugfs_remove(port->debugfs_file); kfree(port->name); /* * Locks around here are not necessary - a port can't be * opened after we removed the port struct from ports_list * above. */ kref_put(&port->kref, remove_port); } /* Any private messages that the Host and Guest want to share */ static void handle_control_message(struct virtio_device *vdev, struct ports_device *portdev, struct port_buffer *buf) { struct virtio_console_control *cpkt; struct port *port; size_t name_size; int err; cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id)); if (!port && cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) { /* No valid header at start of buffer. Drop it. */ dev_dbg(&portdev->vdev->dev, "Invalid index %u in control packet\n", cpkt->id); return; } switch (virtio16_to_cpu(vdev, cpkt->event)) { case VIRTIO_CONSOLE_PORT_ADD: if (port) { dev_dbg(&portdev->vdev->dev, "Port %u already added\n", port->id); send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); break; } if (virtio32_to_cpu(vdev, cpkt->id) >= portdev->max_nr_ports) { dev_warn(&portdev->vdev->dev, "Request for adding port with " "out-of-bound id %u, max. supported id: %u\n", cpkt->id, portdev->max_nr_ports - 1); break; } add_port(portdev, virtio32_to_cpu(vdev, cpkt->id)); break; case VIRTIO_CONSOLE_PORT_REMOVE: unplug_port(port); break; case VIRTIO_CONSOLE_CONSOLE_PORT: if (!cpkt->value) break; if (is_console_port(port)) break; init_port_console(port); complete(&early_console_added); /* * Could remove the port here in case init fails - but * have to notify the host first. */ break; case VIRTIO_CONSOLE_RESIZE: { struct { __u16 rows; __u16 cols; } size; if (!is_console_port(port)) break; memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), sizeof(size)); set_console_size(port, size.rows, size.cols); port->cons.hvc->irq_requested = 1; resize_console(port); break; } case VIRTIO_CONSOLE_PORT_OPEN: port->host_connected = virtio16_to_cpu(vdev, cpkt->value); wake_up_interruptible(&port->waitqueue); /* * If the host port got closed and the host had any * unconsumed buffers, we'll be able to reclaim them * now. */ spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); /* * If the guest is connected, it'll be interested in * knowing the host connection state changed. */ spin_lock_irq(&port->inbuf_lock); send_sigio_to_port(port); spin_unlock_irq(&port->inbuf_lock); break; case VIRTIO_CONSOLE_PORT_NAME: /* * If we woke up after hibernation, we can get this * again. Skip it in that case. */ if (port->name) break; /* * Skip the size of the header and the cpkt to get the size * of the name that was sent */ name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; port->name = kmalloc(name_size, GFP_KERNEL); if (!port->name) { dev_err(port->dev, "Not enough space to store port name\n"); break; } strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), name_size); /* * Since we only have one sysfs attribute, 'name', * create it only if we have a name for the port. */ err = sysfs_create_group(&port->dev->kobj, &port_attribute_group); if (err) { dev_err(port->dev, "Error %d creating sysfs device attributes\n", err); } else { /* * Generate a udev event so that appropriate * symlinks can be created based on udev * rules. */ kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); } break; } } static void control_work_handler(struct work_struct *work) { struct ports_device *portdev; struct virtqueue *vq; struct port_buffer *buf; unsigned int len; portdev = container_of(work, struct ports_device, control_work); vq = portdev->c_ivq; spin_lock(&portdev->c_ivq_lock); while ((buf = virtqueue_get_buf(vq, &len))) { spin_unlock(&portdev->c_ivq_lock); buf->len = min_t(size_t, len, buf->size); buf->offset = 0; handle_control_message(vq->vdev, portdev, buf); spin_lock(&portdev->c_ivq_lock); if (add_inbuf(portdev->c_ivq, buf) < 0) { dev_warn(&portdev->vdev->dev, "Error adding buffer to queue\n"); free_buf(buf, false); } } spin_unlock(&portdev->c_ivq_lock); } static void flush_bufs(struct virtqueue *vq, bool can_sleep) { struct port_buffer *buf; unsigned int len; while ((buf = virtqueue_get_buf(vq, &len))) free_buf(buf, can_sleep); } static void out_intr(struct virtqueue *vq) { struct port *port; port = find_port_by_vq(vq->vdev->priv, vq); if (!port) { flush_bufs(vq, false); return; } wake_up_interruptible(&port->waitqueue); } static void in_intr(struct virtqueue *vq) { struct port *port; unsigned long flags; port = find_port_by_vq(vq->vdev->priv, vq); if (!port) { flush_bufs(vq, false); return; } spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = get_inbuf(port); /* * Normally the port should not accept data when the port is * closed. For generic serial ports, the host won't (shouldn't) * send data till the guest is connected. But this condition * can be reached when a console port is not yet connected (no * tty is spawned) and the other side sends out data over the * vring, or when a remote devices start sending data before * the ports are opened. * * A generic serial port will discard data if not connected, * while console ports and rproc-serial ports accepts data at * any time. rproc-serial is initiated with guest_connected to * false because port_fops_open expects this. Console ports are * hooked up with an HVC console and is initialized with * guest_connected to true. */ if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) discard_port_data(port); /* Send a SIGIO indicating new data in case the process asked for it */ send_sigio_to_port(port); spin_unlock_irqrestore(&port->inbuf_lock, flags); wake_up_interruptible(&port->waitqueue); if (is_console_port(port) && hvc_poll(port->cons.hvc)) hvc_kick(); } static void control_intr(struct virtqueue *vq) { struct ports_device *portdev; portdev = vq->vdev->priv; schedule_work(&portdev->control_work); } static void config_intr(struct virtio_device *vdev) { struct ports_device *portdev; portdev = vdev->priv; if (!use_multiport(portdev)) schedule_work(&portdev->config_work); } static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; u16 rows, cols; vdev = portdev->vdev; virtio_cread(vdev, struct virtio_console_config, cols, &cols); virtio_cread(vdev, struct virtio_console_config, rows, &rows); port = find_port_by_id(portdev, 0); set_console_size(port, rows, cols); /* * We'll use this way of resizing only for legacy * support. For newer userspace * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages * to indicate console size changes so that it can be * done per-port. */ resize_console(port); } } static int init_vqs(struct ports_device *portdev) { vq_callback_t **io_callbacks; char **io_names; struct virtqueue **vqs; u32 i, j, nr_ports, nr_queues; int err; nr_ports = portdev->max_nr_ports; nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), GFP_KERNEL); io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), GFP_KERNEL); if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || !portdev->out_vqs) { err = -ENOMEM; goto free; } /* * For backward compat (newer host but older guest), the host * spawns a console port first and also inits the vqs for port * 0 before others. */ j = 0; io_callbacks[j] = in_intr; io_callbacks[j + 1] = out_intr; io_names[j] = "input"; io_names[j + 1] = "output"; j += 2; if (use_multiport(portdev)) { io_callbacks[j] = control_intr; io_callbacks[j + 1] = NULL; io_names[j] = "control-i"; io_names[j + 1] = "control-o"; for (i = 1; i < nr_ports; i++) { j += 2; io_callbacks[j] = in_intr; io_callbacks[j + 1] = out_intr; io_names[j] = "input"; io_names[j + 1] = "output"; } } /* Find the queues. */ err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, io_callbacks, (const char **)io_names, NULL); if (err) goto free; j = 0; portdev->in_vqs[0] = vqs[0]; portdev->out_vqs[0] = vqs[1]; j += 2; if (use_multiport(portdev)) { portdev->c_ivq = vqs[j]; portdev->c_ovq = vqs[j + 1]; for (i = 1; i < nr_ports; i++) { j += 2; portdev->in_vqs[i] = vqs[j]; portdev->out_vqs[i] = vqs[j + 1]; } } kfree(io_names); kfree(io_callbacks); kfree(vqs); return 0; free: kfree(portdev->out_vqs); kfree(portdev->in_vqs); kfree(io_names); kfree(io_callbacks); kfree(vqs); return err; } static const struct file_operations portdev_fops = { .owner = THIS_MODULE, }; static void remove_vqs(struct ports_device *portdev) { struct virtqueue *vq; virtio_device_for_each_vq(portdev->vdev, vq) { struct port_buffer *buf; flush_bufs(vq, true); while ((buf = virtqueue_detach_unused_buf(vq))) free_buf(buf, true); cond_resched(); } portdev->vdev->config->del_vqs(portdev->vdev); kfree(portdev->in_vqs); kfree(portdev->out_vqs); } static void virtcons_remove(struct virtio_device *vdev) { struct ports_device *portdev; struct port *port, *port2; portdev = vdev->priv; spin_lock_irq(&pdrvdata_lock); list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); /* Device is going away, exit any polling for buffers */ virtio_break_device(vdev); if (use_multiport(portdev)) flush_work(&portdev->control_work); else flush_work(&portdev->config_work); /* Disable interrupts for vqs */ virtio_reset_device(vdev); /* Finish up work that's lined up */ if (use_multiport(portdev)) cancel_work_sync(&portdev->control_work); else cancel_work_sync(&portdev->config_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); unregister_chrdev(portdev->chr_major, "virtio-portsdev"); /* * When yanking out a device, we immediately lose the * (device-side) queues. So there's no point in keeping the * guest side around till we drop our final reference. This * also means that any ports which are in an open state will * have to just stop using the port, as the vqs are going * away. */ remove_vqs(portdev); kfree(portdev); } /* * Once we're further in boot, we get probed like any other virtio * device. * * If the host also supports multiple console ports, we check the * config space to see how many ports the host has spawned. We * initialize each port found. */ static int virtcons_probe(struct virtio_device *vdev) { struct ports_device *portdev; int err; bool multiport; bool early = early_put_chars != NULL; /* We only need a config space if features are offered */ if (!vdev->config->get && (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE) || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } /* Ensure to read early_put_chars now */ barrier(); portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); if (!portdev) { err = -ENOMEM; goto fail; } /* Attach this portdev to this virtio_device, and vice-versa. */ portdev->vdev = vdev; vdev->priv = portdev; portdev->chr_major = register_chrdev(0, "virtio-portsdev", &portdev_fops); if (portdev->chr_major < 0) { dev_err(&vdev->dev, "Error %d registering chrdev for device %u\n", portdev->chr_major, vdev->index); err = portdev->chr_major; goto free; } multiport = false; portdev->max_nr_ports = 1; /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ if (!is_rproc_serial(vdev) && virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT, struct virtio_console_config, max_nr_ports, &portdev->max_nr_ports) == 0) { if (portdev->max_nr_ports == 0 || portdev->max_nr_ports > VIRTCONS_MAX_PORTS) { dev_err(&vdev->dev, "Invalidate max_nr_ports %d", portdev->max_nr_ports); err = -EINVAL; goto free; } multiport = true; } err = init_vqs(portdev); if (err < 0) { dev_err(&vdev->dev, "Error %d initializing vqs\n", err); goto free_chrdev; } spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); INIT_LIST_HEAD(&portdev->list); virtio_device_ready(portdev->vdev); INIT_WORK(&portdev->config_work, &config_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); if (err < 0) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); /* * The host might want to notify mgmt sw about device * add failure. */ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 0); /* Device was functional: we need full cleanup. */ virtcons_remove(vdev); return err; } } else { /* * For backward compatibility: Create a console port * if we're running on older host. */ add_port(portdev, 0); } spin_lock_irq(&pdrvdata_lock); list_add_tail(&portdev->list, &pdrvdata.portdevs); spin_unlock_irq(&pdrvdata_lock); __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 1); /* * If there was an early virtio console, assume that there are no * other consoles. We need to wait until the hvc_alloc matches the * hvc_instantiate, otherwise tty_open will complain, resulting in * a "Warning: unable to open an initial console" boot failure. * Without multiport this is done in add_port above. With multiport * this might take some host<->guest communication - thus we have to * wait. */ if (multiport && early) wait_for_completion(&early_console_added); return 0; free_chrdev: unregister_chrdev(portdev->chr_major, "virtio-portsdev"); free: kfree(portdev); fail: return err; } static const struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; MODULE_DEVICE_TABLE(virtio, id_table); static const unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, VIRTIO_CONSOLE_F_MULTIPORT, }; static const struct virtio_device_id rproc_serial_id_table[] = { #if IS_ENABLED(CONFIG_REMOTEPROC) { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, #endif { 0 }, }; MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static const unsigned int rproc_serial_features[] = { }; #ifdef CONFIG_PM_SLEEP static int virtcons_freeze(struct virtio_device *vdev) { struct ports_device *portdev; struct port *port; portdev = vdev->priv; virtio_reset_device(vdev); if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. */ if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); list_for_each_entry(port, &portdev->ports, list) { virtqueue_disable_cb(port->in_vq); virtqueue_disable_cb(port->out_vq); /* * We'll ask the host later if the new invocation has * the port opened or closed. */ port->host_connected = false; remove_port_data(port); } remove_vqs(portdev); return 0; } static int virtcons_restore(struct virtio_device *vdev) { struct ports_device *portdev; struct port *port; int ret; portdev = vdev->priv; ret = init_vqs(portdev); if (ret) return ret; virtio_device_ready(portdev->vdev); if (use_multiport(portdev)) fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); list_for_each_entry(port, &portdev->ports, list) { port->in_vq = portdev->in_vqs[port->id]; port->out_vq = portdev->out_vqs[port->id]; fill_queue(port->in_vq, &port->inbuf_lock); /* Get port open/close status on the host */ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); /* * If a port was open at the time of suspending, we * have to let the host know that it's still open. */ if (port->guest_connected) send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); } return 0; } #endif static struct virtio_driver virtio_console = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtcons_probe, .remove = virtcons_remove, .config_changed = config_intr, #ifdef CONFIG_PM_SLEEP .freeze = virtcons_freeze, .restore = virtcons_restore, #endif }; static struct virtio_driver virtio_rproc_serial = { .feature_table = rproc_serial_features, .feature_table_size = ARRAY_SIZE(rproc_serial_features), .driver.name = "virtio_rproc_serial", .driver.owner = THIS_MODULE, .id_table = rproc_serial_id_table, .probe = virtcons_probe, .remove = virtcons_remove, }; static int __init virtio_console_init(void) { int err; err = class_register(&port_class); if (err) return err; pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); INIT_LIST_HEAD(&pdrvdata.consoles); INIT_LIST_HEAD(&pdrvdata.portdevs); err = register_virtio_driver(&virtio_console); if (err < 0) { pr_err("Error %d registering virtio driver\n", err); goto free; } err = register_virtio_driver(&virtio_rproc_serial); if (err < 0) { pr_err("Error %d registering virtio rproc serial driver\n", err); goto unregister; } return 0; unregister: unregister_virtio_driver(&virtio_console); free: debugfs_remove_recursive(pdrvdata.debugfs_dir); class_unregister(&port_class); return err; } static void __exit virtio_console_fini(void) { reclaim_dma_bufs(); unregister_virtio_driver(&virtio_console); unregister_virtio_driver(&virtio_rproc_serial); class_unregister(&port_class); debugfs_remove_recursive(pdrvdata.debugfs_dir); } module_init(virtio_console_init); module_exit(virtio_console_fini); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/virtio_console.c
// SPDX-License-Identifier: GPL-2.0-or-later /* IBM POWER Barrier Synchronization Register Driver * * Copyright IBM Corporation 2008 * * Author: Sonny Rao <[email protected]> */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/cdev.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/io.h> /* This driver exposes a special register which can be used for fast synchronization across a large SMP machine. The hardware is exposed as an array of bytes where each process will write to one of the bytes to indicate it has finished the current stage and this update is broadcast to all processors without having to bounce a cacheline between them. In POWER5 and POWER6 there is one of these registers per SMP, but it is presented in two forms; first, it is given as a whole and then as a number of smaller registers which alias to parts of the single whole register. This can potentially allow multiple groups of processes to each have their own private synchronization device. Note that this hardware *must* be written to using *only* single byte writes. It may be read using 1, 2, 4, or 8 byte loads which must be aligned since this region is treated as cache-inhibited processes should also use a full sync before and after writing to the BSR to ensure all stores and the BSR update have made it to all chips in the system */ /* This is arbitrary number, up to Power6 it's been 17 or fewer */ #define BSR_MAX_DEVS (32) struct bsr_dev { u64 bsr_addr; /* Real address */ u64 bsr_len; /* length of mem region we can map */ unsigned bsr_bytes; /* size of the BSR reg itself */ unsigned bsr_stride; /* interval at which BSR repeats in the page */ unsigned bsr_type; /* maps to enum below */ unsigned bsr_num; /* bsr id number for its type */ int bsr_minor; struct list_head bsr_list; dev_t bsr_dev; struct cdev bsr_cdev; struct device *bsr_device; char bsr_name[32]; }; static unsigned total_bsr_devs; static LIST_HEAD(bsr_devs); static int bsr_major; enum { BSR_8 = 0, BSR_16 = 1, BSR_64 = 2, BSR_128 = 3, BSR_4096 = 4, BSR_UNKNOWN = 5, BSR_MAX = 6, }; static unsigned bsr_types[BSR_MAX]; static ssize_t bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", bsr_dev->bsr_bytes); } static DEVICE_ATTR_RO(bsr_size); static ssize_t bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", bsr_dev->bsr_stride); } static DEVICE_ATTR_RO(bsr_stride); static ssize_t bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bsr_dev *bsr_dev = dev_get_drvdata(dev); return sprintf(buf, "%llu\n", bsr_dev->bsr_len); } static DEVICE_ATTR_RO(bsr_length); static struct attribute *bsr_dev_attrs[] = { &dev_attr_bsr_size.attr, &dev_attr_bsr_stride.attr, &dev_attr_bsr_length.attr, NULL, }; ATTRIBUTE_GROUPS(bsr_dev); static const struct class bsr_class = { .name = "bsr", .dev_groups = bsr_dev_groups, }; static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start; struct bsr_dev *dev = filp->private_data; int ret; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* check for the case of a small BSR device and map one 4k page for it*/ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, vma->vm_page_prot); else if (size <= dev->bsr_len) ret = io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, size, vma->vm_page_prot); else return -EINVAL; if (ret) return -EAGAIN; return 0; } static int bsr_open(struct inode *inode, struct file *filp) { struct cdev *cdev = inode->i_cdev; struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev); filp->private_data = dev; return 0; } static const struct file_operations bsr_fops = { .owner = THIS_MODULE, .mmap = bsr_mmap, .open = bsr_open, .llseek = noop_llseek, }; static void bsr_cleanup_devs(void) { struct bsr_dev *cur, *n; list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { if (cur->bsr_device) { cdev_del(&cur->bsr_cdev); device_del(cur->bsr_device); } list_del(&cur->bsr_list); kfree(cur); } } static int bsr_add_node(struct device_node *bn) { int bsr_stride_len, bsr_bytes_len, num_bsr_devs; const u32 *bsr_stride; const u32 *bsr_bytes; unsigned i; int ret = -ENODEV; bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len); bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len); if (!bsr_stride || !bsr_bytes || (bsr_stride_len != bsr_bytes_len)) { printk(KERN_ERR "bsr of-node has missing/incorrect property\n"); return ret; } num_bsr_devs = bsr_bytes_len / sizeof(u32); for (i = 0 ; i < num_bsr_devs; i++) { struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev), GFP_KERNEL); struct resource res; int result; if (!cur) { printk(KERN_ERR "Unable to alloc bsr dev\n"); ret = -ENOMEM; goto out_err; } result = of_address_to_resource(bn, i, &res); if (result < 0) { printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n"); kfree(cur); continue; } cur->bsr_minor = i + total_bsr_devs; cur->bsr_addr = res.start; cur->bsr_len = resource_size(&res); cur->bsr_bytes = bsr_bytes[i]; cur->bsr_stride = bsr_stride[i]; cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ /* we can only map 4k of it, so only advertise the 4k in sysfs */ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) cur->bsr_len = 4096; switch(cur->bsr_bytes) { case 8: cur->bsr_type = BSR_8; break; case 16: cur->bsr_type = BSR_16; break; case 64: cur->bsr_type = BSR_64; break; case 128: cur->bsr_type = BSR_128; break; case 4096: cur->bsr_type = BSR_4096; break; default: cur->bsr_type = BSR_UNKNOWN; } cur->bsr_num = bsr_types[cur->bsr_type]; snprintf(cur->bsr_name, 32, "bsr%d_%d", cur->bsr_bytes, cur->bsr_num); cdev_init(&cur->bsr_cdev, &bsr_fops); result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); if (result) { kfree(cur); goto out_err; } cur->bsr_device = device_create(&bsr_class, NULL, cur->bsr_dev, cur, "%s", cur->bsr_name); if (IS_ERR(cur->bsr_device)) { printk(KERN_ERR "device_create failed for %s\n", cur->bsr_name); cdev_del(&cur->bsr_cdev); kfree(cur); goto out_err; } bsr_types[cur->bsr_type] = cur->bsr_num + 1; list_add_tail(&cur->bsr_list, &bsr_devs); } total_bsr_devs += num_bsr_devs; return 0; out_err: bsr_cleanup_devs(); return ret; } static int bsr_create_devs(struct device_node *bn) { int ret; while (bn) { ret = bsr_add_node(bn); if (ret) { of_node_put(bn); return ret; } bn = of_find_compatible_node(bn, NULL, "ibm,bsr"); } return 0; } static int __init bsr_init(void) { struct device_node *np; dev_t bsr_dev; int ret = -ENODEV; np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); if (!np) goto out_err; ret = class_register(&bsr_class); if (ret) goto out_err_1; ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); bsr_major = MAJOR(bsr_dev); if (ret < 0) { printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); goto out_err_2; } ret = bsr_create_devs(np); if (ret < 0) { np = NULL; goto out_err_3; } return 0; out_err_3: unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS); out_err_2: class_unregister(&bsr_class); out_err_1: of_node_put(np); out_err: return ret; } static void __exit bsr_exit(void) { bsr_cleanup_devs(); class_unregister(&bsr_class); if (bsr_major) unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS); } module_init(bsr_init); module_exit(bsr_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sonny Rao <[email protected]>");
linux-master
drivers/char/bsr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Flash memory interface rev.5 driver for the Intel * Flash chips used on the NetWinder. * * 20/08/2000 RMK use __ioremap to map flash into virtual memory * make a few more places use "volatile" * 22/05/2001 RMK - Lock read against write * - merge printk level changes (with mods) from Alan Cox. * - use *ppos as the file position, not file->f_pos. * - fix check for out of range pos and r/w size * * Please note that we are tampering with the only flash chip in the * machine, which contains the bootup code. We therefore have the * power to convert these machines into doorstops... */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/rwsem.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <asm/hardware/dec21285.h> #include <asm/io.h> #include <asm/mach-types.h> #include <linux/uaccess.h> /*****************************************************************************/ #include <asm/nwflash.h> #define NWFLASH_VERSION "6.4" static DEFINE_MUTEX(flash_mutex); static void kick_open(void); static int get_flash_id(void); static int erase_block(int nBlock); static int write_block(unsigned long p, const char __user *buf, int count); #define KFLASH_SIZE 1024*1024 //1 Meg #define KFLASH_SIZE4 4*1024*1024 //4 Meg #define KFLASH_ID 0x89A6 //Intel flash #define KFLASH_ID4 0xB0D4 //Intel flash 4Meg static bool flashdebug; //if set - we will display progress msgs static int gbWriteEnable; static int gbWriteBase64Enable; static volatile unsigned char *FLASH_BASE; static int gbFlashSize = KFLASH_SIZE; static DEFINE_MUTEX(nwflash_mutex); static int get_flash_id(void) { volatile unsigned int c1, c2; /* * try to get flash chip ID */ kick_open(); c2 = inb(0x80); *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90; udelay(15); c1 = *(volatile unsigned char *) FLASH_BASE; c2 = inb(0x80); /* * on 4 Meg flash the second byte is actually at offset 2... */ if (c1 == 0xB0) c2 = *(volatile unsigned char *) (FLASH_BASE + 2); else c2 = *(volatile unsigned char *) (FLASH_BASE + 1); c2 += (c1 << 8); /* * set it back to read mode */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; if (c2 == KFLASH_ID4) gbFlashSize = KFLASH_SIZE4; return c2; } static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { mutex_lock(&flash_mutex); switch (cmd) { case CMD_WRITE_DISABLE: gbWriteBase64Enable = 0; gbWriteEnable = 0; break; case CMD_WRITE_ENABLE: gbWriteEnable = 1; break; case CMD_WRITE_BASE64K_ENABLE: gbWriteBase64Enable = 1; break; default: gbWriteBase64Enable = 0; gbWriteEnable = 0; mutex_unlock(&flash_mutex); return -EINVAL; } mutex_unlock(&flash_mutex); return 0; } static ssize_t flash_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { ssize_t ret; if (flashdebug) printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, " "buffer=%p, count=0x%zx.\n", *ppos, buf, size); /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize); mutex_unlock(&nwflash_mutex); return ret; } static ssize_t flash_write(struct file *file, const char __user *buf, size_t size, loff_t * ppos) { unsigned long p = *ppos; unsigned int count = size; int written; int nBlock, temp, rc; int i, j; if (flashdebug) printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n", p, buf, count); if (!gbWriteEnable) return -EINVAL; if (p < 64 * 1024 && (!gbWriteBase64Enable)) return -EINVAL; /* * check for out of range pos or count */ if (p >= gbFlashSize) return count ? -ENXIO : 0; if (count > gbFlashSize - p) count = gbFlashSize - p; if (!access_ok(buf, count)) return -EFAULT; /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; written = 0; nBlock = (int) p >> 16; //block # of 64K bytes /* * # of 64K blocks to erase and write */ temp = ((int) (p + count) >> 16) - nBlock + 1; /* * write ends at exactly 64k boundary? */ if (((int) (p + count) & 0xFFFF) == 0) temp -= 1; if (flashdebug) printk(KERN_DEBUG "flash_write: writing %d block(s) " "starting at %d.\n", temp, nBlock); for (; temp; temp--, nBlock++) { if (flashdebug) printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock); /* * first we have to erase the block(s), where we will write... */ i = 0; j = 0; RetryBlock: do { rc = erase_block(nBlock); i++; } while (rc && i < 10); if (rc) { printk(KERN_ERR "flash_write: erase error %x\n", rc); break; } if (flashdebug) printk(KERN_DEBUG "flash_write: writing offset %lX, " "from buf %p, bytes left %X.\n", p, buf, count - written); /* * write_block will limit write to space left in this block */ rc = write_block(p, buf, count - written); j++; /* * if somehow write verify failed? Can't happen?? */ if (!rc) { /* * retry up to 10 times */ if (j < 10) goto RetryBlock; else /* * else quit with error... */ rc = -1; } if (rc < 0) { printk(KERN_ERR "flash_write: write error %X\n", rc); break; } p += rc; buf += rc; written += rc; *ppos += rc; if (flashdebug) printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written); } mutex_unlock(&nwflash_mutex); return written; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) { loff_t ret; mutex_lock(&flash_mutex); if (flashdebug) printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", (unsigned int) offset, orig); ret = no_seek_end_llseek_size(file, offset, orig, gbFlashSize); mutex_unlock(&flash_mutex); return ret; } /* * assume that main Write routine did the parameter checking... * so just go ahead and erase, what requested! */ static int erase_block(int nBlock) { volatile unsigned int c1; volatile unsigned char *pWritePtr; unsigned long timeout; int temp, temp1; /* * reset footbridge to the correct offset 0 (...0..3) */ *CSR_ROMWRITEREG = 0; /* * dummy ROM read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); kick_open(); /* * reset status if old errors */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * erase a block... * aim at the middle of a current block... */ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16))); /* * dummy read */ c1 = *pWritePtr; kick_open(); /* * erase */ *(volatile unsigned char *) pWritePtr = 0x20; /* * confirm */ *(volatile unsigned char *) pWritePtr = 0xD0; /* * wait 10 ms */ msleep(10); /* * wait while erasing in process (up to 10 sec) */ timeout = jiffies + 10 * HZ; c1 = 0; while (!(c1 & 0x80) && time_before(jiffies, timeout)) { msleep(10); /* * read any address */ c1 = *(volatile unsigned char *) (pWritePtr); // printk("Flash_erase: status=%X.\n",c1); } /* * set flash for normal read access */ kick_open(); // *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF; *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation /* * check if erase errors were reported */ if (c1 & 0x20) { printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr); /* * reset error */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; return -2; } /* * just to make sure - verify if erased OK... */ msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16))); for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) { if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) { printk(KERN_ERR "flash_erase: verify err at %p = %X\n", pWritePtr, temp1); return -1; } } return 0; } /* * write_block will limit number of bytes written to the space in this block */ static int write_block(unsigned long p, const char __user *buf, int count) { volatile unsigned int c1; volatile unsigned int c2; unsigned char *pWritePtr; unsigned int uAddress; unsigned int offset; unsigned long timeout; unsigned long timeout1; pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); /* * check if write will end in this block.... */ offset = p & 0xFFFF; if (offset + count > 0x10000) count = 0x10000 - offset; /* * wait up to 30 sec for this block */ timeout = jiffies + 30 * HZ; for (offset = 0; offset < count; offset++, pWritePtr++) { uAddress = (unsigned int) pWritePtr; uAddress &= 0xFFFFFFFC; if (__get_user(c2, buf + offset)) return -EFAULT; WriteRetry: /* * dummy read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * kick open the write gate */ kick_open(); /* * program footbridge to the correct offset...0..3 */ *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3; /* * write cmd */ *(volatile unsigned char *) (uAddress) = 0x40; /* * data to write */ *(volatile unsigned char *) (uAddress) = c2; /* * get status */ *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70; c1 = 0; /* * wait up to 1 sec for this byte */ timeout1 = jiffies + 1 * HZ; /* * while not ready... */ while (!(c1 & 0x80) && time_before(jiffies, timeout1)) c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * if timeout getting status */ if (time_after_eq(jiffies, timeout1)) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; goto WriteRetry; } /* * switch on read access, as a default flash operation mode */ kick_open(); /* * read access */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; /* * if hardware reports an error writing, and not timeout - * reset the chip and retry */ if (c1 & 0x10) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * before timeout? */ if (time_before(jiffies, timeout)) { if (flashdebug) printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n", pWritePtr - FLASH_BASE); /* * wait couple ms */ msleep(10); goto WriteRetry; } else { printk(KERN_ERR "write_block: timeout at 0x%X\n", pWritePtr - FLASH_BASE); /* * return error -2 */ return -2; } } } msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); for (offset = 0; offset < count; offset++) { char c, c1; if (__get_user(c, buf)) return -EFAULT; buf++; if ((c1 = *pWritePtr++) != c) { printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n", pWritePtr - FLASH_BASE, c1, c); return 0; } } return count; } static void kick_open(void) { unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ raw_spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... */ udelay(25); } static const struct file_operations flash_fops = { .owner = THIS_MODULE, .llseek = flash_llseek, .read = flash_read, .write = flash_write, .unlocked_ioctl = flash_ioctl, }; static struct miscdevice flash_miscdev = { NWFLASH_MINOR, "nwflash", &flash_fops }; static int __init nwflash_init(void) { int ret = -ENODEV; if (machine_is_netwinder()) { int id; FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4); if (!FLASH_BASE) goto out; id = get_flash_id(); if ((id != KFLASH_ID) && (id != KFLASH_ID4)) { ret = -ENXIO; iounmap((void *)FLASH_BASE); printk("Flash: incorrect ID 0x%04X.\n", id); goto out; } printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n", NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024)); ret = misc_register(&flash_miscdev); if (ret < 0) { iounmap((void *)FLASH_BASE); } } out: return ret; } static void __exit nwflash_exit(void) { misc_deregister(&flash_miscdev); iounmap((void *)FLASH_BASE); } MODULE_LICENSE("GPL"); module_param(flashdebug, bool, 0644); module_init(nwflash_init); module_exit(nwflash_exit);
linux-master
drivers/char/nwflash.c
// SPDX-License-Identifier: GPL-2.0 /* * Privileged ADI driver for sparc64 * * Author: Tom Hromatka <[email protected]> */ #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/asi.h> #define MAX_BUF_SZ PAGE_SIZE static int adi_open(struct inode *inode, struct file *file) { file->f_mode |= FMODE_UNSIGNED_OFFSET; return 0; } static int read_mcd_tag(unsigned long addr) { long err; int ver; __asm__ __volatile__( "1: ldxa [%[addr]] %[asi], %[ver]\n" " mov 0, %[err]\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %%g1\n" " jmpl %%g1 + %%lo(2b), %%g0\n" " mov %[invalid], %[err]\n" " .previous\n" " .section __ex_table, \"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" : [ver] "=r" (ver), [err] "=r" (err) : [addr] "r" (addr), [invalid] "i" (EFAULT), [asi] "i" (ASI_MCD_REAL) : "memory", "g1" ); if (err) return -EFAULT; else return ver; } static ssize_t adi_read(struct file *file, char __user *buf, size_t count, loff_t *offp) { size_t ver_buf_sz, bytes_read = 0; int ver_buf_idx = 0; loff_t offset; u8 *ver_buf; ssize_t ret; ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ); ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL); if (!ver_buf) return -ENOMEM; offset = (*offp) * adi_blksize(); while (bytes_read < count) { ret = read_mcd_tag(offset); if (ret < 0) goto out; ver_buf[ver_buf_idx] = (u8)ret; ver_buf_idx++; offset += adi_blksize(); if (ver_buf_idx >= ver_buf_sz) { if (copy_to_user(buf + bytes_read, ver_buf, ver_buf_sz)) { ret = -EFAULT; goto out; } bytes_read += ver_buf_sz; ver_buf_idx = 0; ver_buf_sz = min(count - bytes_read, (size_t)MAX_BUF_SZ); } } (*offp) += bytes_read; ret = bytes_read; out: kfree(ver_buf); return ret; } static int set_mcd_tag(unsigned long addr, u8 ver) { long err; __asm__ __volatile__( "1: stxa %[ver], [%[addr]] %[asi]\n" " mov 0, %[err]\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %%g1\n" " jmpl %%g1 + %%lo(2b), %%g0\n" " mov %[invalid], %[err]\n" " .previous\n" " .section __ex_table, \"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" : [err] "=r" (err) : [ver] "r" (ver), [addr] "r" (addr), [invalid] "i" (EFAULT), [asi] "i" (ASI_MCD_REAL) : "memory", "g1" ); if (err) return -EFAULT; else return ver; } static ssize_t adi_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { size_t ver_buf_sz, bytes_written = 0; loff_t offset; u8 *ver_buf; ssize_t ret; int i; if (count <= 0) return -EINVAL; ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ); ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL); if (!ver_buf) return -ENOMEM; offset = (*offp) * adi_blksize(); do { if (copy_from_user(ver_buf, &buf[bytes_written], ver_buf_sz)) { ret = -EFAULT; goto out; } for (i = 0; i < ver_buf_sz; i++) { ret = set_mcd_tag(offset, ver_buf[i]); if (ret < 0) goto out; offset += adi_blksize(); } bytes_written += ver_buf_sz; ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ); } while (bytes_written < count); (*offp) += bytes_written; ret = bytes_written; out: __asm__ __volatile__("membar #Sync"); kfree(ver_buf); return ret; } static loff_t adi_llseek(struct file *file, loff_t offset, int whence) { loff_t ret = -EINVAL; switch (whence) { case SEEK_END: case SEEK_DATA: case SEEK_HOLE: /* unsupported */ return -EINVAL; case SEEK_CUR: if (offset == 0) return file->f_pos; offset += file->f_pos; break; case SEEK_SET: break; } if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; ret = offset; } return ret; } static const struct file_operations adi_fops = { .owner = THIS_MODULE, .llseek = adi_llseek, .open = adi_open, .read = adi_read, .write = adi_write, }; static struct miscdevice adi_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = KBUILD_MODNAME, .fops = &adi_fops, }; static int __init adi_init(void) { if (!adi_capable()) return -EPERM; return misc_register(&adi_miscdev); } static void __exit adi_exit(void) { misc_deregister(&adi_miscdev); } module_init(adi_init); module_exit(adi_exit); MODULE_AUTHOR("Tom Hromatka <[email protected]>"); MODULE_DESCRIPTION("Privileged interface to ADI"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/adi.c
// SPDX-License-Identifier: GPL-2.0-only /* * bios-less APM driver for ARM Linux * Jamey Hicks <[email protected]> * adapted from the APM BIOS driver for Linux by Stephen Rothwell ([email protected]) * * APM 1.2 Reference: * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.2, February 1996. * * This document is available from Microsoft at: * http://www.microsoft.com/whdc/archive/amp_12.mspx */ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/apm_bios.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/apm-emulation.h> #include <linux/freezer.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/delay.h> /* * One option can be changed at boot time as follows: * apm=on/off enable/disable APM */ /* * Maximum number of events stored */ #define APM_MAX_EVENTS 16 struct apm_queue { unsigned int event_head; unsigned int event_tail; apm_event_t events[APM_MAX_EVENTS]; }; /* * thread states (for threads using a writable /dev/apm_bios fd): * * SUSPEND_NONE: nothing happening * SUSPEND_PENDING: suspend event queued for thread and pending to be read * SUSPEND_READ: suspend event read, pending acknowledgement * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), * waiting for resume * SUSPEND_ACKTO: acknowledgement timeout * SUSPEND_DONE: thread had acked suspend and is now notified of * resume * * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume * * A thread migrates in one of three paths: * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE * -6-> ACKTO -7-> NONE * NONE -8-> WAIT -9-> NONE * * While in PENDING or READ, the thread is accounted for in the * suspend_acks_pending counter. * * The transitions are invoked as follows: * 1: suspend event is signalled from the core PM code * 2: the suspend event is read from the fd by the userspace thread * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) * 4: core PM code signals that we have resumed * 5: APM_IOC_SUSPEND ioctl returns * * 6: the notifier invoked from the core PM code timed out waiting * for all relevant threds to enter ACKED state and puts those * that haven't into ACKTO * 7: those threads issue APM_IOC_SUSPEND ioctl too late, * get an error * * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), * ioctl code invokes pm_suspend() * 9: pm_suspend() returns indicating resume */ enum apm_suspend_state { SUSPEND_NONE, SUSPEND_PENDING, SUSPEND_READ, SUSPEND_ACKED, SUSPEND_ACKTO, SUSPEND_WAIT, SUSPEND_DONE, }; /* * The per-file APM data */ struct apm_user { struct list_head list; unsigned int suser: 1; unsigned int writer: 1; unsigned int reader: 1; int suspend_result; enum apm_suspend_state suspend_state; struct apm_queue queue; }; /* * Local variables */ static atomic_t suspend_acks_pending = ATOMIC_INIT(0); static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); static int apm_disabled; static struct task_struct *kapmd_tsk; static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); /* * This is a list of everyone who has opened /dev/apm_bios */ static DECLARE_RWSEM(user_list_lock); static LIST_HEAD(apm_user_list); /* * kapmd info. kapmd provides us a process context to handle * "APM" events within - specifically necessary if we're going * to be suspending the system. */ static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); static DEFINE_SPINLOCK(kapmd_queue_lock); static struct apm_queue kapmd_queue; static DEFINE_MUTEX(state_lock); static const char driver_version[] = "1.13"; /* no spaces */ /* * Compatibility cruft until the IPAQ people move over to the new * interface. */ static void __apm_get_power_status(struct apm_power_info *info) { } /* * This allows machines to provide their own "apm get power status" function. */ void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; EXPORT_SYMBOL(apm_get_power_status); /* * APM event queue management. */ static inline int queue_empty(struct apm_queue *q) { return q->event_head == q->event_tail; } static inline apm_event_t queue_get_event(struct apm_queue *q) { q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; return q->events[q->event_tail]; } static void queue_add_event(struct apm_queue *q, apm_event_t event) { q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; if (q->event_head == q->event_tail) { static int notified; if (notified++ == 0) printk(KERN_ERR "apm: an event queue overflowed\n"); q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; } q->events[q->event_head] = event; } static void queue_event(apm_event_t event) { struct apm_user *as; down_read(&user_list_lock); list_for_each_entry(as, &apm_user_list, list) { if (as->reader) queue_add_event(&as->queue, event); } up_read(&user_list_lock); wake_up_interruptible(&apm_waitqueue); } static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as = fp->private_data; apm_event_t event; int i = count, ret = 0; if (count < sizeof(apm_event_t)) return -EINVAL; if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) return -EAGAIN; wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { event = queue_get_event(&as->queue); ret = -EFAULT; if (copy_to_user(buf, &event, sizeof(event))) break; mutex_lock(&state_lock); if (as->suspend_state == SUSPEND_PENDING && (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) as->suspend_state = SUSPEND_READ; mutex_unlock(&state_lock); buf += sizeof(event); i -= sizeof(event); } if (i < count) ret = count - i; return ret; } static __poll_t apm_poll(struct file *fp, poll_table * wait) { struct apm_user *as = fp->private_data; poll_wait(fp, &apm_waitqueue, wait); return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM; } /* * apm_ioctl - handle APM ioctl * * APM_IOC_SUSPEND * This IOCTL is overloaded, and performs two functions. It is used to: * - initiate a suspend * - acknowledge a suspend read from /dev/apm_bios. * Only when everyone who has opened /dev/apm_bios with write permission * has acknowledge does the actual suspend happen. */ static long apm_ioctl(struct file *filp, u_int cmd, u_long arg) { struct apm_user *as = filp->private_data; int err = -EINVAL; if (!as->suser || !as->writer) return -EPERM; switch (cmd) { case APM_IOC_SUSPEND: mutex_lock(&state_lock); as->suspend_result = -EINTR; switch (as->suspend_state) { case SUSPEND_READ: /* * If we read a suspend command from /dev/apm_bios, * then the corresponding APM_IOC_SUSPEND ioctl is * interpreted as an acknowledge. */ as->suspend_state = SUSPEND_ACKED; atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); /* * suspend_acks_pending changed, the notifier needs to * be woken up for this */ wake_up(&apm_suspend_waitqueue); /* * Wait for the suspend/resume to complete. If there * are pending acknowledges, we wait here for them. * wait_event_freezable() is interruptible and pending * signal can cause busy looping. We aren't doing * anything critical, chill a bit on each iteration. */ while (wait_event_freezable(apm_suspend_waitqueue, as->suspend_state != SUSPEND_ACKED)) msleep(10); break; case SUSPEND_ACKTO: as->suspend_result = -ETIMEDOUT; mutex_unlock(&state_lock); break; default: as->suspend_state = SUSPEND_WAIT; mutex_unlock(&state_lock); /* * Otherwise it is a request to suspend the system. * Just invoke pm_suspend(), we'll handle it from * there via the notifier. */ as->suspend_result = pm_suspend(PM_SUSPEND_MEM); } mutex_lock(&state_lock); err = as->suspend_result; as->suspend_state = SUSPEND_NONE; mutex_unlock(&state_lock); break; } return err; } static int apm_release(struct inode * inode, struct file * filp) { struct apm_user *as = filp->private_data; filp->private_data = NULL; down_write(&user_list_lock); list_del(&as->list); up_write(&user_list_lock); /* * We are now unhooked from the chain. As far as new * events are concerned, we no longer exist. */ mutex_lock(&state_lock); if (as->suspend_state == SUSPEND_PENDING || as->suspend_state == SUSPEND_READ) atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); wake_up(&apm_suspend_waitqueue); kfree(as); return 0; } static int apm_open(struct inode * inode, struct file * filp) { struct apm_user *as; as = kzalloc(sizeof(*as), GFP_KERNEL); if (as) { /* * XXX - this is a tiny bit broken, when we consider BSD * process accounting. If the device is opened by root, we * instantly flag that we used superuser privs. Who knows, * we might close the device immediately without doing a * privileged operation -- cevans */ as->suser = capable(CAP_SYS_ADMIN); as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; down_write(&user_list_lock); list_add(&as->list, &apm_user_list); up_write(&user_list_lock); filp->private_data = as; } return as ? 0 : -ENOMEM; } static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = apm_read, .poll = apm_poll, .unlocked_ioctl = apm_ioctl, .open = apm_open, .release = apm_release, .llseek = noop_llseek, }; static struct miscdevice apm_device = { .minor = APM_MINOR_DEV, .name = "apm_bios", .fops = &apm_bios_fops }; #ifdef CONFIG_PROC_FS /* * Arguments, with symbols from linux/apm_bios.h. * * 0) Linux driver version (this will change if format changes) * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. * 2) APM flags from APM Installation Check (0x00): * bit 0: APM_16_BIT_SUPPORT * bit 1: APM_32_BIT_SUPPORT * bit 2: APM_IDLE_SLOWS_CLOCK * bit 3: APM_BIOS_DISABLED * bit 4: APM_BIOS_DISENGAGED * 3) AC line status * 0x00: Off-line * 0x01: On-line * 0x02: On backup power (BIOS >= 1.1 only) * 0xff: Unknown * 4) Battery status * 0x00: High * 0x01: Low * 0x02: Critical * 0x03: Charging * 0x04: Selected battery not present (BIOS >= 1.2 only) * 0xff: Unknown * 5) Battery flag * bit 0: High * bit 1: Low * bit 2: Critical * bit 3: Charging * bit 7: No system battery * 0xff: Unknown * 6) Remaining battery life (percentage of charge): * 0-100: valid * -1: Unknown * 7) Remaining battery life (time units): * Number of remaining minutes or seconds * -1: Unknown * 8) min = minutes; sec = seconds */ static int proc_apm_show(struct seq_file *m, void *v) { struct apm_power_info info; char *units; info.ac_line_status = 0xff; info.battery_status = 0xff; info.battery_flag = 0xff; info.battery_life = -1; info.time = -1; info.units = -1; if (apm_get_power_status) apm_get_power_status(&info); switch (info.units) { default: units = "?"; break; case 0: units = "min"; break; case 1: units = "sec"; break; } seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", driver_version, APM_32_BIT_SUPPORT, info.ac_line_status, info.battery_status, info.battery_flag, info.battery_life, info.time, units); return 0; } #endif static int kapmd(void *arg) { do { apm_event_t event; wait_event_interruptible(kapmd_wait, !queue_empty(&kapmd_queue) || kthread_should_stop()); if (kthread_should_stop()) break; spin_lock_irq(&kapmd_queue_lock); event = 0; if (!queue_empty(&kapmd_queue)) event = queue_get_event(&kapmd_queue); spin_unlock_irq(&kapmd_queue_lock); switch (event) { case 0: break; case APM_LOW_BATTERY: case APM_POWER_STATUS_CHANGE: queue_event(event); break; case APM_USER_SUSPEND: case APM_SYS_SUSPEND: pm_suspend(PM_SUSPEND_MEM); break; case APM_CRITICAL_SUSPEND: atomic_inc(&userspace_notification_inhibit); pm_suspend(PM_SUSPEND_MEM); atomic_dec(&userspace_notification_inhibit); break; } } while (1); return 0; } static int apm_suspend_notifier(struct notifier_block *nb, unsigned long event, void *dummy) { struct apm_user *as; int err; unsigned long apm_event; /* short-cut emergency suspends */ if (atomic_read(&userspace_notification_inhibit)) return NOTIFY_DONE; switch (event) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: apm_event = (event == PM_SUSPEND_PREPARE) ? APM_USER_SUSPEND : APM_USER_HIBERNATION; /* * Queue an event to all "writer" users that we want * to suspend and need their ack. */ mutex_lock(&state_lock); down_read(&user_list_lock); list_for_each_entry(as, &apm_user_list, list) { if (as->suspend_state != SUSPEND_WAIT && as->reader && as->writer && as->suser) { as->suspend_state = SUSPEND_PENDING; atomic_inc(&suspend_acks_pending); queue_add_event(&as->queue, apm_event); } } up_read(&user_list_lock); mutex_unlock(&state_lock); wake_up_interruptible(&apm_waitqueue); /* * Wait for the suspend_acks_pending variable to drop to * zero, meaning everybody acked the suspend event (or the * process was killed.) * * If the app won't answer within a short while we assume it * locked up and ignore it. */ err = wait_event_interruptible_timeout( apm_suspend_waitqueue, atomic_read(&suspend_acks_pending) == 0, 5*HZ); /* timed out */ if (err == 0) { /* * Move anybody who timed out to "ack timeout" state. * * We could time out and the userspace does the ACK * right after we time out but before we enter the * locked section here, but that's fine. */ mutex_lock(&state_lock); down_read(&user_list_lock); list_for_each_entry(as, &apm_user_list, list) { if (as->suspend_state == SUSPEND_PENDING || as->suspend_state == SUSPEND_READ) { as->suspend_state = SUSPEND_ACKTO; atomic_dec(&suspend_acks_pending); } } up_read(&user_list_lock); mutex_unlock(&state_lock); } /* let suspend proceed */ if (err >= 0) return NOTIFY_OK; /* interrupted by signal */ return notifier_from_errno(err); case PM_POST_SUSPEND: case PM_POST_HIBERNATION: apm_event = (event == PM_POST_SUSPEND) ? APM_NORMAL_RESUME : APM_HIBERNATION_RESUME; /* * Anyone on the APM queues will think we're still suspended. * Send a message so everyone knows we're now awake again. */ queue_event(apm_event); /* * Finally, wake up anyone who is sleeping on the suspend. */ mutex_lock(&state_lock); down_read(&user_list_lock); list_for_each_entry(as, &apm_user_list, list) { if (as->suspend_state == SUSPEND_ACKED) { /* * TODO: maybe grab error code, needs core * changes to push the error to the notifier * chain (could use the second parameter if * implemented) */ as->suspend_result = 0; as->suspend_state = SUSPEND_DONE; } } up_read(&user_list_lock); mutex_unlock(&state_lock); wake_up(&apm_suspend_waitqueue); return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block apm_notif_block = { .notifier_call = apm_suspend_notifier, }; static int __init apm_init(void) { int ret; if (apm_disabled) { printk(KERN_NOTICE "apm: disabled on user request.\n"); return -ENODEV; } kapmd_tsk = kthread_create(kapmd, NULL, "kapmd"); if (IS_ERR(kapmd_tsk)) { ret = PTR_ERR(kapmd_tsk); kapmd_tsk = NULL; goto out; } wake_up_process(kapmd_tsk); #ifdef CONFIG_PROC_FS proc_create_single("apm", 0, NULL, proc_apm_show); #endif ret = misc_register(&apm_device); if (ret) goto out_stop; ret = register_pm_notifier(&apm_notif_block); if (ret) goto out_unregister; return 0; out_unregister: misc_deregister(&apm_device); out_stop: remove_proc_entry("apm", NULL); kthread_stop(kapmd_tsk); out: return ret; } static void __exit apm_exit(void) { unregister_pm_notifier(&apm_notif_block); misc_deregister(&apm_device); remove_proc_entry("apm", NULL); kthread_stop(kapmd_tsk); } module_init(apm_init); module_exit(apm_exit); MODULE_AUTHOR("Stephen Rothwell"); MODULE_DESCRIPTION("Advanced Power Management"); MODULE_LICENSE("GPL"); #ifndef MODULE static int __init apm_setup(char *str) { while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) apm_disabled = 1; if (strncmp(str, "on", 2) == 0) apm_disabled = 0; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("apm=", apm_setup); #endif /** * apm_queue_event - queue an APM event for kapmd * @event: APM event * * Queue an APM event for kapmd to process and ultimately take the * appropriate action. Only a subset of events are handled: * %APM_LOW_BATTERY * %APM_POWER_STATUS_CHANGE * %APM_USER_SUSPEND * %APM_SYS_SUSPEND * %APM_CRITICAL_SUSPEND */ void apm_queue_event(apm_event_t event) { unsigned long flags; spin_lock_irqsave(&kapmd_queue_lock, flags); queue_add_event(&kapmd_queue, event); spin_unlock_irqrestore(&kapmd_queue_lock, flags); wake_up_interruptible(&kapmd_wait); } EXPORT_SYMBOL(apm_queue_event);
linux-master
drivers/char/apm-emulation.c
// SPDX-License-Identifier: GPL-2.0-only /* * OPAL Operator Panel Display Driver * * Copyright 2016, Suraj Jitindar Singh, IBM Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/miscdevice.h> #include <asm/opal.h> /* * This driver creates a character device (/dev/op_panel) which exposes the * operator panel (character LCD display) on IBM Power Systems machines * with FSPs. * A character buffer written to the device will be displayed on the * operator panel. */ static DEFINE_MUTEX(oppanel_mutex); static u32 num_lines, oppanel_size; static oppanel_line_t *oppanel_lines; static char *oppanel_data; static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence) { return fixed_size_llseek(filp, offset, whence, oppanel_size); } static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len, loff_t *f_pos) { return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data, oppanel_size); } static int __op_panel_update_display(void) { struct opal_msg msg; int rc, token; token = opal_async_get_token_interruptible(); if (token < 0) { if (token != -ERESTARTSYS) pr_debug("Couldn't get OPAL async token [token=%d]\n", token); return token; } rc = opal_write_oppanel_async(token, oppanel_lines, num_lines); switch (rc) { case OPAL_ASYNC_COMPLETION: rc = opal_async_wait_response(token, &msg); if (rc) { pr_debug("Failed to wait for async response [rc=%d]\n", rc); break; } rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) { pr_debug("OPAL async call returned failed [rc=%d]\n", rc); break; } break; case OPAL_SUCCESS: break; default: pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc); } opal_async_release_token(token); return rc; } static ssize_t oppanel_write(struct file *filp, const char __user *userbuf, size_t len, loff_t *f_pos) { loff_t f_pos_prev = *f_pos; ssize_t ret; int rc; if (!*f_pos) memset(oppanel_data, ' ', oppanel_size); else if (*f_pos >= oppanel_size) return -EFBIG; ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf, len); if (ret > 0) { rc = __op_panel_update_display(); if (rc != OPAL_SUCCESS) { pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n", rc); *f_pos = f_pos_prev; return -EIO; } } return ret; } static int oppanel_open(struct inode *inode, struct file *filp) { if (!mutex_trylock(&oppanel_mutex)) { pr_debug("Device Busy\n"); return -EBUSY; } return 0; } static int oppanel_release(struct inode *inode, struct file *filp) { mutex_unlock(&oppanel_mutex); return 0; } static const struct file_operations oppanel_fops = { .owner = THIS_MODULE, .llseek = oppanel_llseek, .read = oppanel_read, .write = oppanel_write, .open = oppanel_open, .release = oppanel_release }; static struct miscdevice oppanel_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "op_panel", .fops = &oppanel_fops }; static int oppanel_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; u32 line_len; int rc, i; rc = of_property_read_u32(np, "#length", &line_len); if (rc) { pr_err_ratelimited("Operator panel length property not found\n"); return rc; } rc = of_property_read_u32(np, "#lines", &num_lines); if (rc) { pr_err_ratelimited("Operator panel lines property not found\n"); return rc; } oppanel_size = line_len * num_lines; pr_devel("Operator panel of size %u found with %u lines of length %u\n", oppanel_size, num_lines, line_len); oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL); if (!oppanel_data) return -ENOMEM; oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL); if (!oppanel_lines) { rc = -ENOMEM; goto free_oppanel_data; } memset(oppanel_data, ' ', oppanel_size); for (i = 0; i < num_lines; i++) { oppanel_lines[i].line_len = cpu_to_be64(line_len); oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i * line_len])); } rc = misc_register(&oppanel_dev); if (rc) { pr_err_ratelimited("Failed to register as misc device\n"); goto free_oppanel; } return 0; free_oppanel: kfree(oppanel_lines); free_oppanel_data: kfree(oppanel_data); return rc; } static int oppanel_remove(struct platform_device *pdev) { misc_deregister(&oppanel_dev); kfree(oppanel_lines); kfree(oppanel_data); return 0; } static const struct of_device_id oppanel_match[] = { { .compatible = "ibm,opal-oppanel" }, { }, }; static struct platform_driver oppanel_driver = { .driver = { .name = "powernv-op-panel", .of_match_table = oppanel_match, }, .probe = oppanel_probe, .remove = oppanel_remove, }; module_platform_driver(oppanel_driver); MODULE_DEVICE_TABLE(of, oppanel_match); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver"); MODULE_AUTHOR("Suraj Jitindar Singh <[email protected]>");
linux-master
drivers/char/powernv-op-panel.c
// SPDX-License-Identifier: GPL-2.0-only /* * CMOS/NV-RAM driver for Linux * * Copyright (C) 1997 Roman Hodek <[email protected]> * idea by and with help from Richard Jelinek <[email protected]> * Portions copyright (c) 2001,2002 Sun Microsystems ([email protected]) * * This driver allows you to access the contents of the non-volatile memory in * the mc146818rtc.h real-time clock. This chip is built into all PCs and into * many Atari machines. In the former it's called "CMOS-RAM", in the latter * "NVRAM" (NV stands for non-volatile). * * The data are supplied as a (seekable) character device, /dev/nvram. The * size of this file is dependent on the controller. The usual size is 114, * the number of freely available bytes in the memory (i.e., not used by the * RTC itself). * * Checksums over the NVRAM contents are managed by this driver. In case of a * bad checksum, reads and writes return -EIO. The checksum can be initialized * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid * again; use with care!) * * 1.1 Cesar Barros: SMP locking fixes * added changelog * 1.2 Erik Gilling: Cobalt Networks support * Tim Hockin: general cleanup, Cobalt support * 1.3 Wim Van Sebroeck: convert PRINT_PROC to seq_file */ #define NVRAM_VERSION "1.3" #include <linux/module.h> #include <linux/nvram.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/mc146818rtc.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <linux/pagemap.h> #ifdef CONFIG_PPC #include <asm/nvram.h> #endif static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); static int nvram_open_cnt; /* #times opened */ static int nvram_open_mode; /* special open modes */ static ssize_t nvram_size; #define NVRAM_WRITE 1 /* opened for writing (exclusive) */ #define NVRAM_EXCL 2 /* opened with O_EXCL */ #ifdef CONFIG_X86 /* * These functions are provided to be called internally or by other parts of * the kernel. It's up to the caller to ensure correct checksum before reading * or after writing (needs to be done only once). * * It is worth noting that these functions all access bytes of general * purpose memory in the NVRAM - that is to say, they all add the * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not * know about the RTC cruft. */ #define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE) /* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with * rtc_lock held. Due to the index-port/data-port design of the RTC, we * don't want two different things trying to get to it at once. (e.g. the * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) */ static unsigned char __nvram_read_byte(int i) { return CMOS_READ(NVRAM_FIRST_BYTE + i); } static unsigned char pc_nvram_read_byte(int i) { unsigned long flags; unsigned char c; spin_lock_irqsave(&rtc_lock, flags); c = __nvram_read_byte(i); spin_unlock_irqrestore(&rtc_lock, flags); return c; } /* This races nicely with trying to read with checksum checking (nvram_read) */ static void __nvram_write_byte(unsigned char c, int i) { CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); } static void pc_nvram_write_byte(unsigned char c, int i) { unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); __nvram_write_byte(c, i); spin_unlock_irqrestore(&rtc_lock, flags); } /* On PCs, the checksum is built only over bytes 2..31 */ #define PC_CKS_RANGE_START 2 #define PC_CKS_RANGE_END 31 #define PC_CKS_LOC 32 static int __nvram_check_checksum(void) { int i; unsigned short sum = 0; unsigned short expect; for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); expect = __nvram_read_byte(PC_CKS_LOC)<<8 | __nvram_read_byte(PC_CKS_LOC+1); return (sum & 0xffff) == expect; } static void __nvram_set_checksum(void) { int i; unsigned short sum = 0; for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); __nvram_write_byte(sum >> 8, PC_CKS_LOC); __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); } static long pc_nvram_set_checksum(void) { spin_lock_irq(&rtc_lock); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); return 0; } static long pc_nvram_initialize(void) { ssize_t i; spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) __nvram_write_byte(0, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); return 0; } static ssize_t pc_nvram_get_size(void) { return NVRAM_BYTES; } static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos) { char *p = buf; loff_t i; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) { spin_unlock_irq(&rtc_lock); return -EIO; } for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) *p = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); *ppos = i; return p - buf; } static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos) { char *p = buf; loff_t i; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) { spin_unlock_irq(&rtc_lock); return -EIO; } for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) __nvram_write_byte(*p, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); *ppos = i; return p - buf; } const struct nvram_ops arch_nvram_ops = { .read = pc_nvram_read, .write = pc_nvram_write, .read_byte = pc_nvram_read_byte, .write_byte = pc_nvram_write_byte, .get_size = pc_nvram_get_size, .set_checksum = pc_nvram_set_checksum, .initialize = pc_nvram_initialize, }; EXPORT_SYMBOL(arch_nvram_ops); #endif /* CONFIG_X86 */ /* * The are the file operation function for user access to /dev/nvram */ static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin) { return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, nvram_size); } static ssize_t nvram_misc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *tmp; ssize_t ret; if (*ppos >= nvram_size) return 0; count = min_t(size_t, count, nvram_size - *ppos); count = min_t(size_t, count, PAGE_SIZE); tmp = kmalloc(count, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = nvram_read(tmp, count, ppos); if (ret <= 0) goto out; if (copy_to_user(buf, tmp, ret)) { *ppos -= ret; ret = -EFAULT; } out: kfree(tmp); return ret; } static ssize_t nvram_misc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *tmp; ssize_t ret; if (*ppos >= nvram_size) return 0; count = min_t(size_t, count, nvram_size - *ppos); count = min_t(size_t, count, PAGE_SIZE); tmp = memdup_user(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); ret = nvram_write(tmp, count, ppos); kfree(tmp); return ret; } static long nvram_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOTTY; switch (cmd) { #ifdef CONFIG_PPC case OBSOLETE_PMAC_NVRAM_GET_OFFSET: pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); fallthrough; case IOC_NVRAM_GET_OFFSET: ret = -EINVAL; #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { int part, offset; if (copy_from_user(&part, (void __user *)arg, sizeof(part)) != 0) return -EFAULT; if (part < pmac_nvram_OF || part > pmac_nvram_NR) return -EINVAL; offset = pmac_get_partition(part); if (offset < 0) return -EINVAL; if (copy_to_user((void __user *)arg, &offset, sizeof(offset)) != 0) return -EFAULT; ret = 0; } #endif break; #ifdef CONFIG_PPC32 case IOC_NVRAM_SYNC: if (ppc_md.nvram_sync != NULL) { mutex_lock(&nvram_mutex); ppc_md.nvram_sync(); mutex_unlock(&nvram_mutex); } ret = 0; break; #endif #elif defined(CONFIG_X86) || defined(CONFIG_M68K) case NVRAM_INIT: /* initialize NVRAM contents and checksum */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (arch_nvram_ops.initialize != NULL) { mutex_lock(&nvram_mutex); ret = arch_nvram_ops.initialize(); mutex_unlock(&nvram_mutex); } break; case NVRAM_SETCKS: /* just set checksum, contents unchanged (maybe useful after * checksum garbaged somehow...) */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (arch_nvram_ops.set_checksum != NULL) { mutex_lock(&nvram_mutex); ret = arch_nvram_ops.set_checksum(); mutex_unlock(&nvram_mutex); } break; #endif /* CONFIG_X86 || CONFIG_M68K */ } return ret; } static int nvram_misc_open(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); /* Prevent multiple readers/writers if desired. */ if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || (nvram_open_mode & NVRAM_EXCL)) { spin_unlock(&nvram_state_lock); return -EBUSY; } #if defined(CONFIG_X86) || defined(CONFIG_M68K) /* Prevent multiple writers if the set_checksum ioctl is implemented. */ if ((arch_nvram_ops.set_checksum != NULL) && (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) { spin_unlock(&nvram_state_lock); return -EBUSY; } #endif if (file->f_flags & O_EXCL) nvram_open_mode |= NVRAM_EXCL; if (file->f_mode & FMODE_WRITE) nvram_open_mode |= NVRAM_WRITE; nvram_open_cnt++; spin_unlock(&nvram_state_lock); return 0; } static int nvram_misc_release(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); nvram_open_cnt--; /* if only one instance is open, clear the EXCL bit */ if (nvram_open_mode & NVRAM_EXCL) nvram_open_mode &= ~NVRAM_EXCL; if (file->f_mode & FMODE_WRITE) nvram_open_mode &= ~NVRAM_WRITE; spin_unlock(&nvram_state_lock); return 0; } #if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) static const char * const floppy_types[] = { "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", "3.5'' 2.88M", "3.5'' 2.88M" }; static const char * const gfx_types[] = { "EGA, VGA, ... (with BIOS)", "CGA (40 cols)", "CGA (80 cols)", "monochrome", }; static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq, void *offset) { int checksum; int type; spin_lock_irq(&rtc_lock); checksum = __nvram_check_checksum(); spin_unlock_irq(&rtc_lock); seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not "); seq_printf(seq, "# floppies : %d\n", (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0); seq_printf(seq, "Floppy 0 type : "); type = nvram[2] >> 4; if (type < ARRAY_SIZE(floppy_types)) seq_printf(seq, "%s\n", floppy_types[type]); else seq_printf(seq, "%d (unknown)\n", type); seq_printf(seq, "Floppy 1 type : "); type = nvram[2] & 0x0f; if (type < ARRAY_SIZE(floppy_types)) seq_printf(seq, "%s\n", floppy_types[type]); else seq_printf(seq, "%d (unknown)\n", type); seq_printf(seq, "HD 0 type : "); type = nvram[4] >> 4; if (type) seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type); else seq_printf(seq, "none\n"); seq_printf(seq, "HD 1 type : "); type = nvram[4] & 0x0f; if (type) seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type); else seq_printf(seq, "none\n"); seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", nvram[18] | (nvram[19] << 8), nvram[20], nvram[25], nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8)); seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", nvram[39] | (nvram[40] << 8), nvram[41], nvram[46], nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8)); seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8)); seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n", nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8)); seq_printf(seq, "Gfx adapter : %s\n", gfx_types[(nvram[6] >> 4) & 3]); seq_printf(seq, "FPU : %sinstalled\n", (nvram[6] & 2) ? "" : "not "); return; } static int nvram_proc_read(struct seq_file *seq, void *offset) { unsigned char contents[NVRAM_BYTES]; int i = 0; spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) contents[i] = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); pc_nvram_proc_read(contents, seq, offset); return 0; } #endif /* CONFIG_X86 && CONFIG_PROC_FS */ static const struct file_operations nvram_misc_fops = { .owner = THIS_MODULE, .llseek = nvram_misc_llseek, .read = nvram_misc_read, .write = nvram_misc_write, .unlocked_ioctl = nvram_misc_ioctl, .open = nvram_misc_open, .release = nvram_misc_release, }; static struct miscdevice nvram_misc = { NVRAM_MINOR, "nvram", &nvram_misc_fops, }; static int __init nvram_module_init(void) { int ret; nvram_size = nvram_get_size(); if (nvram_size < 0) return nvram_size; ret = misc_register(&nvram_misc); if (ret) { pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR); return ret; } #if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) { pr_err("nvram: can't create /proc/driver/nvram\n"); misc_deregister(&nvram_misc); return -ENOMEM; } #endif pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n"); return 0; } static void __exit nvram_module_exit(void) { #if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) remove_proc_entry("driver/nvram", NULL); #endif misc_deregister(&nvram_misc); } module_init(nvram_module_init); module_exit(nvram_module_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(NVRAM_MINOR); MODULE_ALIAS("devname:nvram");
linux-master
drivers/char/nvram.c
// SPDX-License-Identifier: GPL-2.0-only /* * NetWinder Button Driver- * Copyright (C) Alex Holden <[email protected]> 1998, 1999. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/uaccess.h> #include <asm/irq.h> #include <asm/mach-types.h> #define __NWBUTTON_C /* Tell the header file who we are */ #include "nwbutton.h" static void button_sequence_finished(struct timer_list *unused); static int button_press_count; /* The count of button presses */ /* Times for the end of a sequence */ static DEFINE_TIMER(button_timer, button_sequence_finished); static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */ static char button_output_buffer[32]; /* Stores data to write out of device */ static int bcount; /* The number of bytes in the buffer */ static int bdelay = BUTTON_DELAY; /* The delay, in jiffies */ static struct button_callback button_callback_list[32]; /* The callback list */ static int callback_count; /* The number of callbacks registered */ static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */ /* * This function is called by other drivers to register a callback function * to be called when a particular number of button presses occurs. * The callback list is a static array of 32 entries (I somehow doubt many * people are ever going to want to register more than 32 different actions * to be performed by the kernel on different numbers of button presses ;). * However, if an attempt to register a 33rd entry (perhaps a stuck loop * somewhere registering the same entry over and over?) it will fail to * do so and return -ENOMEM. If an attempt is made to register a null pointer, * it will fail to do so and return -EINVAL. * Because callbacks can be unregistered at random the list can become * fragmented, so we need to search through the list until we find the first * free entry. * * FIXME: Has anyone spotted any locking functions int his code recently ?? */ int button_add_callback (void (*callback) (void), int count) { int lp = 0; if (callback_count == 32) { return -ENOMEM; } if (!callback) { return -EINVAL; } callback_count++; for (; (button_callback_list [lp].callback); lp++); button_callback_list [lp].callback = callback; button_callback_list [lp].count = count; return 0; } /* * This function is called by other drivers to deregister a callback function. * If you attempt to unregister a callback which does not exist, it will fail * with -EINVAL. If there is more than one entry with the same address, * because it searches the list from end to beginning, it will unregister the * last one to be registered first (FILO- First In Last Out). * Note that this is not necessarily true if the entries are not submitted * at the same time, because another driver could have unregistered a callback * between the submissions creating a gap earlier in the list, which would * be filled first at submission time. */ int button_del_callback (void (*callback) (void)) { int lp = 31; if (!callback) { return -EINVAL; } while (lp >= 0) { if ((button_callback_list [lp].callback) == callback) { button_callback_list [lp].callback = NULL; button_callback_list [lp].count = 0; callback_count--; return 0; } lp--; } return -EINVAL; } /* * This function is called by button_sequence_finished to search through the * list of callback functions, and call any of them whose count argument * matches the current count of button presses. It starts at the beginning * of the list and works up to the end. It will refuse to follow a null * pointer (which should never happen anyway). */ static void button_consume_callbacks (int bpcount) { int lp = 0; for (; lp <= 31; lp++) { if ((button_callback_list [lp].count) == bpcount) { if (button_callback_list [lp].callback) { button_callback_list[lp].callback(); } } } } /* * This function is called when the button_timer times out. * ie. When you don't press the button for bdelay jiffies, this is taken to * mean you have ended the sequence of key presses, and this function is * called to wind things up (write the press_count out to /dev/button, call * any matching registered function callbacks, initiate reboot, etc.). */ static void button_sequence_finished(struct timer_list *unused) { if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) && button_press_count == reboot_count) kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ button_consume_callbacks (button_press_count); bcount = sprintf (button_output_buffer, "%d\n", button_press_count); button_press_count = 0; /* Reset the button press counter */ wake_up_interruptible (&button_wait_queue); } /* * This handler is called when the orange button is pressed (GPIO 10 of the * SuperIO chip, which maps to logical IRQ 26). If the press_count is 0, * this is the first press, so it starts a timer and increments the counter. * If it is higher than 0, it deletes the old timer, starts a new one, and * increments the counter. */ static irqreturn_t button_handler (int irq, void *dev_id) { button_press_count++; mod_timer(&button_timer, jiffies + bdelay); return IRQ_HANDLED; } /* * This function is called when a user space program attempts to read * /dev/nwbutton. It puts the device to sleep on the wait queue until * button_sequence_finished writes some data to the buffer and flushes * the queue, at which point it writes the data out to the device and * returns the number of characters it has written. This function is * reentrant, so that many processes can be attempting to read from the * device at any one time. */ static int button_read (struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { DEFINE_WAIT(wait); prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(&button_wait_queue, &wait); return (copy_to_user (buffer, &button_output_buffer, bcount)) ? -EFAULT : bcount; } /* * This structure is the file operations structure, which specifies what * callbacks functions the kernel should call when a user mode process * attempts to perform these operations on the device. */ static const struct file_operations button_fops = { .owner = THIS_MODULE, .read = button_read, .llseek = noop_llseek, }; /* * This structure is the misc device structure, which specifies the minor * device number (158 in this case), the name of the device (for /proc/misc), * and the address of the above file operations structure. */ static struct miscdevice button_misc_device = { BUTTON_MINOR, "nwbutton", &button_fops, }; /* * This function is called to initialise the driver, either from misc.c at * bootup if the driver is compiled into the kernel, or from init_module * below at module insert time. It attempts to register the device node * and the IRQ and fails with a warning message if either fails, though * neither ever should because the device number and IRQ are unique to * this driver. */ static int __init nwbutton_init(void) { if (!machine_is_netwinder()) return -ENODEV; printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden " "<[email protected]> 1998.\n", VERSION); if (misc_register (&button_misc_device)) { printk (KERN_WARNING "nwbutton: Couldn't register device 10, " "%d.\n", BUTTON_MINOR); return -EBUSY; } if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, 0, "nwbutton", NULL)) { printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n", IRQ_NETWINDER_BUTTON); misc_deregister (&button_misc_device); return -EIO; } return 0; } static void __exit nwbutton_exit (void) { free_irq (IRQ_NETWINDER_BUTTON, NULL); misc_deregister (&button_misc_device); } MODULE_AUTHOR("Alex Holden"); MODULE_LICENSE("GPL"); module_init(nwbutton_init); module_exit(nwbutton_exit);
linux-master
drivers/char/nwbutton.c
// SPDX-License-Identifier: GPL-2.0-only /* -*- linux-c -*- * dtlk.c - DoubleTalk PC driver for Linux * * Original author: Chris Pallotta <[email protected]> * Current maintainer: Jim Van Zandt <[email protected]> * * 2000-03-18 Jim Van Zandt: Fix polling. * Eliminate dtlk_timer_active flag and separate dtlk_stop_timer * function. Don't restart timer in dtlk_timer_tick. Restart timer * in dtlk_poll after every poll. dtlk_poll returns mask (duh). * Eliminate unused function dtlk_write_byte. Misc. code cleanups. */ /* This driver is for the DoubleTalk PC, a speech synthesizer manufactured by RC Systems (http://www.rcsys.com/). It was written based on documentation in their User's Manual file and Developer's Tools disk. The DoubleTalk PC contains four voice synthesizers: text-to-speech (TTS), linear predictive coding (LPC), PCM/ADPCM, and CVSD. It also has a tone generator. Output data for LPC are written to the LPC port, and output data for the other modes are written to the TTS port. Two kinds of data can be read from the DoubleTalk: status information (in response to the "\001?" interrogation command) is read from the TTS port, and index markers (which mark the progress of the speech) are read from the LPC port. Not all models of the DoubleTalk PC implement index markers. Both the TTS and LPC ports can also display status flags. The DoubleTalk PC generates no interrupts. These characteristics are mapped into the Unix stream I/O model as follows: "write" sends bytes to the TTS port. It is the responsibility of the user program to switch modes among TTS, PCM/ADPCM, and CVSD. This driver was written for use with the text-to-speech synthesizer. If LPC output is needed some day, other minor device numbers can be used to select among output modes. "read" gets index markers from the LPC port. If the device does not implement index markers, the read will fail with error EINVAL. Status information is available using the DTLK_INTERROGATE ioctl. */ #include <linux/module.h> #define KERNEL #include <linux/types.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/errno.h> /* for -EBUSY */ #include <linux/ioport.h> /* for request_region */ #include <linux/delay.h> /* for loops_per_jiffy */ #include <linux/sched.h> #include <linux/mutex.h> #include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ #include <linux/uaccess.h> /* for get_user, etc. */ #include <linux/wait.h> /* for wait_queue */ #include <linux/init.h> /* for __init, module_{init,exit} */ #include <linux/poll.h> /* for EPOLLIN, etc. */ #include <linux/dtlk.h> /* local header file for DoubleTalk values */ #ifdef TRACING #define TRACE_TEXT(str) printk(str); #define TRACE_RET printk(")") #else /* !TRACING */ #define TRACE_TEXT(str) ((void) 0) #define TRACE_RET ((void) 0) #endif /* TRACING */ static DEFINE_MUTEX(dtlk_mutex); static void dtlk_timer_tick(struct timer_list *unused); static int dtlk_major; static int dtlk_port_lpc; static int dtlk_port_tts; static int dtlk_busy; static int dtlk_has_indexing; static unsigned int dtlk_portlist[] = {0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0}; static wait_queue_head_t dtlk_process_list; static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick); /* prototypes for file_operations struct */ static ssize_t dtlk_read(struct file *, char __user *, size_t nbytes, loff_t * ppos); static ssize_t dtlk_write(struct file *, const char __user *, size_t nbytes, loff_t * ppos); static __poll_t dtlk_poll(struct file *, poll_table *); static int dtlk_open(struct inode *, struct file *); static int dtlk_release(struct inode *, struct file *); static long dtlk_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static const struct file_operations dtlk_fops = { .owner = THIS_MODULE, .read = dtlk_read, .write = dtlk_write, .poll = dtlk_poll, .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, .llseek = no_llseek, }; /* local prototypes */ static int dtlk_dev_probe(void); static struct dtlk_settings *dtlk_interrogate(void); static int dtlk_readable(void); static char dtlk_read_lpc(void); static char dtlk_read_tts(void); static int dtlk_writeable(void); static char dtlk_write_bytes(const char *buf, int n); static char dtlk_write_tts(char); /* static void dtlk_handle_error(char, char, unsigned int); */ static ssize_t dtlk_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file_inode(file)); char ch; int i = 0, retries; TRACE_TEXT("(dtlk_read"); /* printk("DoubleTalk PC - dtlk_read()\n"); */ if (minor != DTLK_MINOR || !dtlk_has_indexing) return -EINVAL; for (retries = 0; retries < loops_per_jiffy; retries++) { while (i < count && dtlk_readable()) { ch = dtlk_read_lpc(); /* printk("dtlk_read() reads 0x%02x\n", ch); */ if (put_user(ch, buf++)) return -EFAULT; i++; } if (i) return i; if (file->f_flags & O_NONBLOCK) break; msleep_interruptible(100); } if (retries == loops_per_jiffy) printk(KERN_ERR "dtlk_read times out\n"); TRACE_RET; return -EAGAIN; } static ssize_t dtlk_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) { int i = 0, retries = 0, ch; TRACE_TEXT("(dtlk_write"); #ifdef TRACING printk(" \""); { int i, ch; for (i = 0; i < count; i++) { if (get_user(ch, buf + i)) return -EFAULT; if (' ' <= ch && ch <= '~') printk("%c", ch); else printk("\\%03o", ch); } printk("\""); } #endif if (iminor(file_inode(file)) != DTLK_MINOR) return -EINVAL; while (1) { while (i < count && !get_user(ch, buf) && (ch == DTLK_CLEAR || dtlk_writeable())) { dtlk_write_tts(ch); buf++; i++; if (i % 5 == 0) /* We yield our time until scheduled again. This reduces the transfer rate to 500 bytes/sec, but that's still enough to keep up with the speech synthesizer. */ msleep_interruptible(1); else { /* the RDY bit goes zero 2-3 usec after writing, and goes 1 again 180-190 usec later. Here, we wait up to 250 usec for the RDY bit to go nonzero. */ for (retries = 0; retries < loops_per_jiffy / (4000/HZ); retries++) if (inb_p(dtlk_port_tts) & TTS_WRITABLE) break; } retries = 0; } if (i == count) return i; if (file->f_flags & O_NONBLOCK) break; msleep_interruptible(1); if (++retries > 10 * HZ) { /* wait no more than 10 sec from last write */ printk("dtlk: write timeout. " "inb_p(dtlk_port_tts) = 0x%02x\n", inb_p(dtlk_port_tts)); TRACE_RET; return -EBUSY; } } TRACE_RET; return -EAGAIN; } static __poll_t dtlk_poll(struct file *file, poll_table * wait) { __poll_t mask = 0; unsigned long expires; TRACE_TEXT(" dtlk_poll"); /* static long int j; printk("."); printk("<%ld>", jiffies-j); j=jiffies; */ poll_wait(file, &dtlk_process_list, wait); if (dtlk_has_indexing && dtlk_readable()) { del_timer(&dtlk_timer); mask = EPOLLIN | EPOLLRDNORM; } if (dtlk_writeable()) { del_timer(&dtlk_timer); mask |= EPOLLOUT | EPOLLWRNORM; } /* there are no exception conditions */ /* There won't be any interrupts, so we set a timer instead. */ expires = jiffies + 3*HZ / 100; mod_timer(&dtlk_timer, expires); return mask; } static void dtlk_timer_tick(struct timer_list *unused) { TRACE_TEXT(" dtlk_timer_tick"); wake_up_interruptible(&dtlk_process_list); } static long dtlk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { char __user *argp = (char __user *)arg; struct dtlk_settings *sp; char portval; TRACE_TEXT(" dtlk_ioctl"); switch (cmd) { case DTLK_INTERROGATE: mutex_lock(&dtlk_mutex); sp = dtlk_interrogate(); mutex_unlock(&dtlk_mutex); if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) return -EINVAL; return 0; case DTLK_STATUS: portval = inb_p(dtlk_port_tts); return put_user(portval, argp); default: return -EINVAL; } } /* Note that nobody ever sets dtlk_busy... */ static int dtlk_open(struct inode *inode, struct file *file) { TRACE_TEXT("(dtlk_open"); switch (iminor(inode)) { case DTLK_MINOR: if (dtlk_busy) return -EBUSY; return stream_open(inode, file); default: return -ENXIO; } } static int dtlk_release(struct inode *inode, struct file *file) { TRACE_TEXT("(dtlk_release"); switch (iminor(inode)) { case DTLK_MINOR: break; default: break; } TRACE_RET; del_timer_sync(&dtlk_timer); return 0; } static int __init dtlk_init(void) { int err; dtlk_port_lpc = 0; dtlk_port_tts = 0; dtlk_busy = 0; dtlk_major = register_chrdev(0, "dtlk", &dtlk_fops); if (dtlk_major < 0) { printk(KERN_ERR "DoubleTalk PC - cannot register device\n"); return dtlk_major; } err = dtlk_dev_probe(); if (err) { unregister_chrdev(dtlk_major, "dtlk"); return err; } printk(", MAJOR %d\n", dtlk_major); init_waitqueue_head(&dtlk_process_list); return 0; } static void __exit dtlk_cleanup (void) { dtlk_write_bytes("goodbye", 8); msleep_interruptible(500); /* nap 0.50 sec but could be awakened earlier by signals... */ dtlk_write_tts(DTLK_CLEAR); unregister_chrdev(dtlk_major, "dtlk"); release_region(dtlk_port_lpc, DTLK_IO_EXTENT); } module_init(dtlk_init); module_exit(dtlk_cleanup); /* ------------------------------------------------------------------------ */ static int dtlk_readable(void) { #ifdef TRACING printk(" dtlk_readable=%u@%u", inb_p(dtlk_port_lpc) != 0x7f, jiffies); #endif return inb_p(dtlk_port_lpc) != 0x7f; } static int dtlk_writeable(void) { /* TRACE_TEXT(" dtlk_writeable"); */ #ifdef TRACINGMORE printk(" dtlk_writeable=%u", (inb_p(dtlk_port_tts) & TTS_WRITABLE)!=0); #endif return inb_p(dtlk_port_tts) & TTS_WRITABLE; } static int __init dtlk_dev_probe(void) { unsigned int testval = 0; int i = 0; struct dtlk_settings *sp; if (dtlk_port_lpc | dtlk_port_tts) return -EBUSY; for (i = 0; dtlk_portlist[i]; i++) { #if 0 printk("DoubleTalk PC - Port %03x = %04x\n", dtlk_portlist[i], (testval = inw_p(dtlk_portlist[i]))); #endif if (!request_region(dtlk_portlist[i], DTLK_IO_EXTENT, "dtlk")) continue; testval = inw_p(dtlk_portlist[i]); if ((testval &= 0xfbff) == 0x107f) { dtlk_port_lpc = dtlk_portlist[i]; dtlk_port_tts = dtlk_port_lpc + 1; sp = dtlk_interrogate(); printk("DoubleTalk PC at %03x-%03x, " "ROM version %s, serial number %u", dtlk_portlist[i], dtlk_portlist[i] + DTLK_IO_EXTENT - 1, sp->rom_version, sp->serial_number); /* put LPC port into known state, so dtlk_readable() gives valid result */ outb_p(0xff, dtlk_port_lpc); /* INIT string and index marker */ dtlk_write_bytes("\036\1@\0\0012I\r", 8); /* posting an index takes 18 msec. Here, we wait up to 100 msec to see whether it appears. */ msleep_interruptible(100); dtlk_has_indexing = dtlk_readable(); #ifdef TRACING printk(", indexing %d\n", dtlk_has_indexing); #endif #ifdef INSCOPE { /* This macro records ten samples read from the LPC port, for later display */ #define LOOK \ for (i = 0; i < 10; i++) \ { \ buffer[b++] = inb_p(dtlk_port_lpc); \ __delay(loops_per_jiffy/(1000000/HZ)); \ } char buffer[1000]; int b = 0, i, j; LOOK outb_p(0xff, dtlk_port_lpc); buffer[b++] = 0; LOOK dtlk_write_bytes("\0012I\r", 4); buffer[b++] = 0; __delay(50 * loops_per_jiffy / (1000/HZ)); outb_p(0xff, dtlk_port_lpc); buffer[b++] = 0; LOOK printk("\n"); for (j = 0; j < b; j++) printk(" %02x", buffer[j]); printk("\n"); } #endif /* INSCOPE */ #ifdef OUTSCOPE { /* This macro records ten samples read from the TTS port, for later display */ #define LOOK \ for (i = 0; i < 10; i++) \ { \ buffer[b++] = inb_p(dtlk_port_tts); \ __delay(loops_per_jiffy/(1000000/HZ)); /* 1 us */ \ } char buffer[1000]; int b = 0, i, j; mdelay(10); /* 10 ms */ LOOK outb_p(0x03, dtlk_port_tts); buffer[b++] = 0; LOOK LOOK printk("\n"); for (j = 0; j < b; j++) printk(" %02x", buffer[j]); printk("\n"); } #endif /* OUTSCOPE */ dtlk_write_bytes("Double Talk found", 18); return 0; } release_region(dtlk_portlist[i], DTLK_IO_EXTENT); } printk(KERN_INFO "DoubleTalk PC - not found\n"); return -ENODEV; } /* static void dtlk_handle_error(char op, char rc, unsigned int minor) { printk(KERN_INFO"\nDoubleTalk PC - MINOR: %d, OPCODE: %d, ERROR: %d\n", minor, op, rc); return; } */ /* interrogate the DoubleTalk PC and return its settings */ static struct dtlk_settings *dtlk_interrogate(void) { unsigned char *t; static char buf[sizeof(struct dtlk_settings) + 1]; int total, i; static struct dtlk_settings status; TRACE_TEXT("(dtlk_interrogate"); dtlk_write_bytes("\030\001?", 3); for (total = 0, i = 0; i < 50; i++) { buf[total] = dtlk_read_tts(); if (total > 2 && buf[total] == 0x7f) break; if (total < sizeof(struct dtlk_settings)) total++; } /* if (i==50) printk("interrogate() read overrun\n"); for (i=0; i<sizeof(buf); i++) printk(" %02x", buf[i]); printk("\n"); */ t = buf; status.serial_number = t[0] + t[1] * 256; /* serial number is little endian */ t += 2; i = 0; while (*t != '\r') { status.rom_version[i] = *t; if (i < sizeof(status.rom_version) - 1) i++; t++; } status.rom_version[i] = 0; t++; status.mode = *t++; status.punc_level = *t++; status.formant_freq = *t++; status.pitch = *t++; status.speed = *t++; status.volume = *t++; status.tone = *t++; status.expression = *t++; status.ext_dict_loaded = *t++; status.ext_dict_status = *t++; status.free_ram = *t++; status.articulation = *t++; status.reverb = *t++; status.eob = *t++; status.has_indexing = dtlk_has_indexing; TRACE_RET; return &status; } static char dtlk_read_tts(void) { int portval, retries = 0; char ch; TRACE_TEXT("(dtlk_read_tts"); /* verify DT is ready, read char, wait for ACK */ do { portval = inb_p(dtlk_port_tts); } while ((portval & TTS_READABLE) == 0 && retries++ < DTLK_MAX_RETRIES); if (retries > DTLK_MAX_RETRIES) printk(KERN_ERR "dtlk_read_tts() timeout\n"); ch = inb_p(dtlk_port_tts); /* input from TTS port */ ch &= 0x7f; outb_p(ch, dtlk_port_tts); retries = 0; do { portval = inb_p(dtlk_port_tts); } while ((portval & TTS_READABLE) != 0 && retries++ < DTLK_MAX_RETRIES); if (retries > DTLK_MAX_RETRIES) printk(KERN_ERR "dtlk_read_tts() timeout\n"); TRACE_RET; return ch; } static char dtlk_read_lpc(void) { int retries = 0; char ch; TRACE_TEXT("(dtlk_read_lpc"); /* no need to test -- this is only called when the port is readable */ ch = inb_p(dtlk_port_lpc); /* input from LPC port */ outb_p(0xff, dtlk_port_lpc); /* acknowledging a read takes 3-4 usec. Here, we wait up to 20 usec for the acknowledgement */ retries = (loops_per_jiffy * 20) / (1000000/HZ); while (inb_p(dtlk_port_lpc) != 0x7f && --retries > 0); if (retries == 0) printk(KERN_ERR "dtlk_read_lpc() timeout\n"); TRACE_RET; return ch; } /* write n bytes to tts port */ static char dtlk_write_bytes(const char *buf, int n) { char val = 0; /* printk("dtlk_write_bytes(\"%-*s\", %d)\n", n, buf, n); */ TRACE_TEXT("(dtlk_write_bytes"); while (n-- > 0) val = dtlk_write_tts(*buf++); TRACE_RET; return val; } static char dtlk_write_tts(char ch) { int retries = 0; #ifdef TRACINGMORE printk(" dtlk_write_tts("); if (' ' <= ch && ch <= '~') printk("'%c'", ch); else printk("0x%02x", ch); #endif if (ch != DTLK_CLEAR) /* no flow control for CLEAR command */ while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 && retries++ < DTLK_MAX_RETRIES) /* DT ready? */ ; if (retries > DTLK_MAX_RETRIES) printk(KERN_ERR "dtlk_write_tts() timeout\n"); outb_p(ch, dtlk_port_tts); /* output to TTS port */ /* the RDY bit goes zero 2-3 usec after writing, and goes 1 again 180-190 usec later. Here, we wait up to 10 usec for the RDY bit to go zero. */ for (retries = 0; retries < loops_per_jiffy / (100000/HZ); retries++) if ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0) break; #ifdef TRACINGMORE printk(")\n"); #endif return 0; } MODULE_LICENSE("GPL");
linux-master
drivers/char/dtlk.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/char/misc.c * * Generic misc open routine by Johan Myreen * * Based on code from Linus * * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's * changes incorporated into 0.97pl4 * by Peter Cervasio (pete%[email protected]) (08SEP92) * See busmouse.c for particulars. * * Made things a lot mode modular - easy to compile in just one or two * of the misc drivers, as they are now completely independent. Linus. * * Support for loadable modules. 8-Sep-95 Philip Blundell <[email protected]> * * Fixed a failing symbol register to free the device registration * Alan Cox <[email protected]> 21-Jan-96 * * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96 * * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96 * * Handling of mouse minor numbers for kerneld: * Idea by Jacques Gelinas <[email protected]>, * adapted by Bjorn Ekwall <[email protected]> * corrected by Alan Cox <[email protected]> * * Changes for kmod (from kerneld): * Cyrus Durgin <[email protected]> * * Added devfs support. Richard Gooch <[email protected]> 10-Jan-1998 */ #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/kmod.h> #include <linux/gfp.h> /* * Head entry for the doubly linked miscdevice list */ static LIST_HEAD(misc_list); static DEFINE_MUTEX(misc_mtx); /* * Assigned numbers, used for dynamic minors */ #define DYNAMIC_MINORS 128 /* like dynamic majors */ static DEFINE_IDA(misc_minors_ida); static int misc_minor_alloc(void) { int ret; ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); if (ret >= 0) { ret = DYNAMIC_MINORS - ret - 1; } else { ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, MINORMASK, GFP_KERNEL); } return ret; } static void misc_minor_free(int minor) { if (minor < DYNAMIC_MINORS) ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1); else if (minor > MISC_DYNAMIC_MINOR) ida_free(&misc_minors_ida, minor); } #ifdef CONFIG_PROC_FS static void *misc_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&misc_mtx); return seq_list_start(&misc_list, *pos); } static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &misc_list, pos); } static void misc_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&misc_mtx); } static int misc_seq_show(struct seq_file *seq, void *v) { const struct miscdevice *p = list_entry(v, struct miscdevice, list); seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : ""); return 0; } static const struct seq_operations misc_seq_ops = { .start = misc_seq_start, .next = misc_seq_next, .stop = misc_seq_stop, .show = misc_seq_show, }; #endif static int misc_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct miscdevice *c = NULL, *iter; int err = -ENODEV; const struct file_operations *new_fops = NULL; mutex_lock(&misc_mtx); list_for_each_entry(iter, &misc_list, list) { if (iter->minor != minor) continue; c = iter; new_fops = fops_get(iter->fops); break; } if (!new_fops) { mutex_unlock(&misc_mtx); request_module("char-major-%d-%d", MISC_MAJOR, minor); mutex_lock(&misc_mtx); list_for_each_entry(iter, &misc_list, list) { if (iter->minor != minor) continue; c = iter; new_fops = fops_get(iter->fops); break; } if (!new_fops) goto fail; } /* * Place the miscdevice in the file's * private_data so it can be used by the * file operations, including f_op->open below */ file->private_data = c; err = 0; replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); fail: mutex_unlock(&misc_mtx); return err; } static char *misc_devnode(const struct device *dev, umode_t *mode) { const struct miscdevice *c = dev_get_drvdata(dev); if (mode && c->mode) *mode = c->mode; if (c->nodename) return kstrdup(c->nodename, GFP_KERNEL); return NULL; } static const struct class misc_class = { .name = "misc", .devnode = misc_devnode, }; static const struct file_operations misc_fops = { .owner = THIS_MODULE, .open = misc_open, .llseek = noop_llseek, }; /** * misc_register - register a miscellaneous device * @misc: device structure * * Register a miscellaneous device with the kernel. If the minor * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned * and placed in the minor field of the structure. For other cases * the minor number requested is used. * * The structure passed is linked into the kernel and may not be * destroyed until it has been unregistered. By default, an open() * syscall to the device sets file->private_data to point to the * structure. Drivers don't need open in fops for this. * * A zero is returned on success and a negative errno code for * failure. */ int misc_register(struct miscdevice *misc) { dev_t dev; int err = 0; bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR); INIT_LIST_HEAD(&misc->list); mutex_lock(&misc_mtx); if (is_dynamic) { int i = misc_minor_alloc(); if (i < 0) { err = -EBUSY; goto out; } misc->minor = i; } else { struct miscdevice *c; list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { err = -EBUSY; goto out; } } } dev = MKDEV(MISC_MAJOR, misc->minor); misc->this_device = device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { if (is_dynamic) { misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); goto out; } /* * Add it to the front, so that later devices can "override" * earlier defaults */ list_add(&misc->list, &misc_list); out: mutex_unlock(&misc_mtx); return err; } EXPORT_SYMBOL(misc_register); /** * misc_deregister - unregister a miscellaneous device * @misc: device to unregister * * Unregister a miscellaneous device that was previously * successfully registered with misc_register(). */ void misc_deregister(struct miscdevice *misc) { if (WARN_ON(list_empty(&misc->list))) return; mutex_lock(&misc_mtx); list_del(&misc->list); device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor)); misc_minor_free(misc->minor); mutex_unlock(&misc_mtx); } EXPORT_SYMBOL(misc_deregister); static int __init misc_init(void) { int err; struct proc_dir_entry *ret; ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops); err = class_register(&misc_class); if (err) goto fail_remove; err = -EIO; if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) goto fail_printk; return 0; fail_printk: pr_err("unable to get major %d for misc devices\n", MISC_MAJOR); class_unregister(&misc_class); fail_remove: if (ret) remove_proc_entry("misc", NULL); return err; } subsys_initcall(misc_init);
linux-master
drivers/char/misc.c
// SPDX-License-Identifier: GPL-2.0-only /* linux/drivers/char/scx200_gpio.c National Semiconductor SCx200 GPIO driver. Allows a user space process to play with the GPIO pins. Copyright (c) 2001,2002 Christer Weinigel <[email protected]> */ #include <linux/device.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/io.h> #include <linux/types.h> #include <linux/cdev.h> #include <linux/scx200_gpio.h> #include <linux/nsc_gpio.h> #define DRVNAME "scx200_gpio" static struct platform_device *pdev; MODULE_AUTHOR("Christer Weinigel <[email protected]>"); MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major = 0; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); #define MAX_PINS 32 /* 64 later, when known ok */ struct nsc_gpio_ops scx200_gpio_ops = { .owner = THIS_MODULE, .gpio_config = scx200_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = scx200_gpio_get, .gpio_set = scx200_gpio_set, .gpio_change = scx200_gpio_change, .gpio_current = scx200_gpio_current }; EXPORT_SYMBOL_GPL(scx200_gpio_ops); static int scx200_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); file->private_data = &scx200_gpio_ops; if (m >= MAX_PINS) return -EINVAL; return nonseekable_open(inode, file); } static int scx200_gpio_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations scx200_gpio_fileops = { .owner = THIS_MODULE, .write = nsc_gpio_write, .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ static int __init scx200_gpio_init(void) { int rc; dev_t devid; if (!scx200_gpio_present()) { printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n"); return -ENODEV; } /* support dev_dbg() with pdev->dev */ pdev = platform_device_alloc(DRVNAME, 0); if (!pdev) return -ENOMEM; rc = platform_device_add(pdev); if (rc) goto undo_malloc; /* nsc_gpio uses dev_dbg(), so needs this */ scx200_gpio_ops.dev = &pdev->dev; if (major) { devid = MKDEV(major, 0); rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio"); } else { rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio"); major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); goto undo_platform_device_add; } cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops); cdev_add(&scx200_gpio_cdev, devid, MAX_PINS); return 0; /* succeed */ undo_platform_device_add: platform_device_del(pdev); undo_malloc: platform_device_put(pdev); return rc; } static void __exit scx200_gpio_cleanup(void) { cdev_del(&scx200_gpio_cdev); /* cdev_put(&scx200_gpio_cdev); */ unregister_chrdev_region(MKDEV(major, 0), MAX_PINS); platform_device_unregister(pdev); } module_init(scx200_gpio_init); module_exit(scx200_gpio_cleanup);
linux-master
drivers/char/scx200_gpio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <[email protected]> * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <[email protected]> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <[email protected]>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/compat.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice *pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; int index; }; /* should we use PARDEVICE_MAX here? */ static struct device *devices[PARPORT_MAX]; static DEFINE_IDA(ida_index); /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 static DEFINE_MUTEX(pp_do_mutex); /* define fixed sized ioctl cmd for y2038 migration */ #define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2]) #define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2]) #define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2]) #define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2]) static inline void pp_enable_irq(struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq(port); } static ssize_t pp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; char *kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) return -ENOMEM; pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout(pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) flags |= PARPORT_W91284PIC; if (pp->flags & PP_FASTREAD) flags |= PARPORT_EPP_FAST; if (pport->ieee1284.mode & IEEE1284_ADDR) fn = pport->ops->epp_read_addr; else fn = pport->ops->epp_read_data; bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read(pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending(current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout(pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree(kbuffer); pp_enable_irq(pp); return bytes_read; } static ssize_t pp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; char *kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) return -ENOMEM; pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout(pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user(kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr(pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data(pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write(pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) bytes_written = wrote; break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending(current)) break; cond_resched(); } parport_set_timeout(pp->pdev, pp->default_inactivity); kfree(kbuffer); pp_enable_irq(pp); return bytes_written; } static void pp_irq(void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control(pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc(&pp->irqc); wake_up_interruptible(&pp->irq_wait); } static int register_device(int minor, struct pp_struct *pp) { struct parport *port; struct pardevice *pdev = NULL; char *name; struct pardev_cb ppdev_cb; int rc = 0, index; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number(minor); if (!port) { pr_warn("%s: no associated port!\n", name); rc = -ENXIO; goto err; } index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, index); parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; ida_simple_remove(&ida_index, index); goto err; } pp->pdev = pdev; pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); err: kfree(name); return rc; } static enum ieee1284_phase init_phase(int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec) { long to_jiffies; if ((tv_sec < 0) || (tv_usec < 0)) return -EINVAL; to_jiffies = usecs_to_jiffies(tv_usec); to_jiffies += tv_sec * HZ; if (to_jiffies <= 0) return -EINVAL; pdev->timeout = to_jiffies; return 0; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; struct parport *port; void __user *argp = (void __user *)arg; struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; s32 time32[2]; s64 time64[2]; struct timespec64 ts; int ret; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { if (pp->flags & PP_CLAIMED) { dev_dbg(&pp->pdev->dev, "you've already got it!\n"); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device(minor, pp); if (err) return err; } ret = parport_claim_or_block(pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq(pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout(pp->pdev, 0); parport_set_timeout(pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { dev_dbg(&pp->pdev->dev, "too late for PPEXCL; already registered\n"); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user(&mode, argp, sizeof(mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase(mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) mode = pp->pdev->port->ieee1284.mode; else mode = pp->state.mode; if (copy_to_user(argp, &mode, sizeof(mode))) return -EFAULT; return 0; } case PPSETPHASE: { int phase; if (copy_from_user(&phase, argp, sizeof(phase))) return -EFAULT; /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) pp->pdev->port->ieee1284.phase = phase; return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) phase = pp->pdev->port->ieee1284.phase; else phase = pp->state.phase; if (copy_to_user(argp, &phase, sizeof(phase))) return -EFAULT; return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number(minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user(argp, &modes, sizeof(modes))) return -EFAULT; return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user(&uflags, argp, sizeof(uflags))) return -EFAULT; pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user(argp, &uflags, sizeof(uflags))) return -EFAULT; return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { case PPRSTATUS: reg = parport_read_status(port); if (copy_to_user(argp, &reg, sizeof(reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data(port); if (copy_to_user(argp, &reg, sizeof(reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control(port); if (copy_to_user(argp, &reg, sizeof(reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking(pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release(pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; parport_write_control(port, reg); return 0; case PPWDATA: if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; parport_write_data(port, reg); return 0; case PPFCONTROL: if (copy_from_user(&mask, argp, sizeof(mask))) return -EFAULT; if (copy_from_user(&reg, 1 + (unsigned char __user *) arg, sizeof(reg))) return -EFAULT; parport_frob_control(port, mask, reg); return 0; case PPDATADIR: if (copy_from_user(&mode, argp, sizeof(mode))) return -EFAULT; if (mode) port->ops->data_reverse(port); else port->ops->data_forward(port); return 0; case PPNEGOT: if (copy_from_user(&mode, argp, sizeof(mode))) return -EFAULT; switch ((ret = parport_negotiate(port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq(pp); return ret; case PPWCTLONIRQ: if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read(&pp->irqc); if (copy_to_user(argp, &ret, sizeof(ret))) return -EFAULT; atomic_sub(ret, &pp->irqc); return 0; case PPSETTIME32: if (copy_from_user(time32, argp, sizeof(time32))) return -EFAULT; if ((time32[0] < 0) || (time32[1] < 0)) return -EINVAL; return pp_set_timeout(pp->pdev, time32[0], time32[1]); case PPSETTIME64: if (copy_from_user(time64, argp, sizeof(time64))) return -EFAULT; if ((time64[0] < 0) || (time64[1] < 0)) return -EINVAL; if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) time64[1] >>= 32; return pp_set_timeout(pp->pdev, time64[0], time64[1]); case PPGETTIME32: jiffies_to_timespec64(pp->pdev->timeout, &ts); time32[0] = ts.tv_sec; time32[1] = ts.tv_nsec / NSEC_PER_USEC; if (copy_to_user(argp, time32, sizeof(time32))) return -EFAULT; return 0; case PPGETTIME64: jiffies_to_timespec64(pp->pdev->timeout, &ts); time64[0] = ts.tv_sec; time64[1] = ts.tv_nsec / NSEC_PER_USEC; if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) time64[1] <<= 32; if (copy_to_user(argp, time64, sizeof(time64))) return -EFAULT; return 0; default: dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase(pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set(&pp->irqc, 0); init_waitqueue_head(&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block(pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT); dev_dbg(&pp->pdev->dev, "negotiated back to compatibility mode because user-space forgot\n"); } if ((pp->flags & PP_CLAIMED) && pp->pdev) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release(pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { parport_unregister_device(pp->pdev); ida_simple_remove(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree(pp); return 0; } /* No kernel lock held - fine */ static __poll_t pp_poll(struct file *file, poll_table *wait) { struct pp_struct *pp = file->private_data; __poll_t mask = 0; poll_wait(file, &pp->irq_wait, wait); if (atomic_read(&pp->irqc)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct class ppdev_class = { .name = CHRDEV, }; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { struct device *ret; if (devices[port->number]) return; ret = device_create(&ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); if (IS_ERR(ret)) { pr_err("Failed to create device parport%d\n", port->number); return; } devices[port->number] = ret; } static void pp_detach(struct parport *port) { if (!devices[port->number]) return; device_destroy(&ppdev_class, MKDEV(PP_MAJOR, port->number)); devices[port->number] = NULL; } static int pp_probe(struct pardevice *par_dev) { struct device_driver *drv = par_dev->dev.driver; int len = strlen(drv->name); if (strncmp(par_dev->name, drv->name, len)) return -ENODEV; return 0; } static struct parport_driver pp_driver = { .name = CHRDEV, .probe = pp_probe, .match_port = pp_attach, .detach = pp_detach, .devmodel = true, }; static int __init ppdev_init(void) { int err = 0; if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) { pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } err = class_register(&ppdev_class); if (err) goto out_chrdev; err = parport_register_driver(&pp_driver); if (err < 0) { pr_warn(CHRDEV ": unable to register with parport\n"); goto out_class; } pr_info(PP_VERSION "\n"); goto out; out_class: class_unregister(&ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup(void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_unregister(&ppdev_class); unregister_chrdev(PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
linux-master
drivers/char/ppdev.c
// SPDX-License-Identifier: GPL-2.0-only /* * PS3 FLASH ROM Storage Driver * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. */ #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/lv1call.h> #include <asm/ps3stor.h> #define DEVICE_NAME "ps3flash" #define FLASH_BLOCK_SIZE (256*1024) struct ps3flash_private { struct mutex mutex; /* Bounce buffer mutex */ u64 chunk_sectors; int tag; /* Start sector of buffer, -1 if invalid */ bool dirty; }; static struct ps3_storage_device *ps3flash_dev; static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, u64 start_sector, int write) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, start_sector, priv->chunk_sectors, write); if (res) { dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, __LINE__, write ? "write" : "read", res); return -EIO; } return 0; } static int ps3flash_writeback(struct ps3_storage_device *dev) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (!priv->dirty || priv->tag < 0) return 0; res = ps3flash_read_write_sectors(dev, priv->tag, 1); if (res) return res; priv->dirty = false; return 0; } static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (start_sector == priv->tag) return 0; res = ps3flash_writeback(dev); if (res) return res; priv->tag = -1; res = ps3flash_read_write_sectors(dev, start_sector, 0); if (res) return res; priv->tag = start_sector; return 0; } static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) { struct ps3_storage_device *dev = ps3flash_dev; return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, dev->regions[dev->region_idx].size*dev->blk_size); } static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res; size_t remaining, n; const void *src; dev_dbg(&dev->sbd.core, "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); src = dev->bounce_buf + offset; mutex_lock(&priv->mutex); res = ps3flash_fetch(dev, sector); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", __func__, __LINE__, n, src, userbuf, kernelbuf); if (userbuf) { if (copy_to_user(userbuf, src, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(kernelbuf, src, n); kernelbuf += n; } mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_write(const char __user *userbuf, const void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res = 0; size_t remaining, n; void *dst; dev_dbg(&dev->sbd.core, "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); dst = dev->bounce_buf + offset; mutex_lock(&priv->mutex); if (n != dev->bounce_size) res = ps3flash_fetch(dev, sector); else if (sector != priv->tag) res = ps3flash_writeback(dev); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", __func__, __LINE__, n, userbuf, kernelbuf, dst); if (userbuf) { if (copy_from_user(dst, userbuf, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(dst, kernelbuf, n); kernelbuf += n; } priv->tag = sector; priv->dirty = true; mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_user_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { return ps3flash_read(buf, NULL, count, pos); } static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { return ps3flash_write(buf, NULL, count, pos); } static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) { return ps3flash_read(NULL, buf, count, &pos); } static ssize_t ps3flash_kernel_write(const void *buf, size_t count, loff_t pos) { ssize_t res; int wb; res = ps3flash_write(NULL, buf, count, &pos); if (res < 0) return res; /* Make kernel writes synchronous */ wb = ps3flash_writeback(ps3flash_dev); if (wb) return wb; return res; } static int ps3flash_flush(struct file *file, fl_owner_t id) { return ps3flash_writeback(ps3flash_dev); } static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file_inode(file); int err; inode_lock(inode); err = ps3flash_writeback(ps3flash_dev); inode_unlock(inode); return err; } static irqreturn_t ps3flash_interrupt(int irq, void *data) { struct ps3_storage_device *dev = data; int res; u64 tag, status; res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); if (tag != dev->tag) dev_err(&dev->sbd.core, "%s:%u: tag mismatch, got %llx, expected %llx\n", __func__, __LINE__, tag, dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", __func__, __LINE__, res, status); } else { dev->lv1_status = status; complete(&dev->done); } return IRQ_HANDLED; } static const struct file_operations ps3flash_fops = { .owner = THIS_MODULE, .llseek = ps3flash_llseek, .read = ps3flash_user_read, .write = ps3flash_user_write, .flush = ps3flash_flush, .fsync = ps3flash_fsync, }; static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { .read = ps3flash_kernel_read, .write = ps3flash_kernel_write, }; static struct miscdevice ps3flash_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DEVICE_NAME, .fops = &ps3flash_fops, }; static int ps3flash_probe(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); struct ps3flash_private *priv; int error; unsigned long tmp; tmp = dev->regions[dev->region_idx].start*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region start %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } tmp = dev->regions[dev->region_idx].size*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region size %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } /* use static buffer, kmalloc cannot allocate 256 KiB */ if (!ps3flash_bounce_buffer.address) return -ENODEV; if (ps3flash_dev) { dev_err(&dev->sbd.core, "Only one FLASH device is supported\n"); return -EBUSY; } ps3flash_dev = dev; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto fail; } ps3_system_bus_set_drvdata(&dev->sbd, priv); mutex_init(&priv->mutex); priv->tag = -1; dev->bounce_size = ps3flash_bounce_buffer.size; dev->bounce_buf = ps3flash_bounce_buffer.address; priv->chunk_sectors = dev->bounce_size / dev->blk_size; error = ps3stor_setup(dev, ps3flash_interrupt); if (error) goto fail_free_priv; ps3flash_misc.parent = &dev->sbd.core; error = misc_register(&ps3flash_misc); if (error) { dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", __func__, __LINE__, error); goto fail_teardown; } dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", __func__, __LINE__, ps3flash_misc.minor); ps3_os_area_flash_register(&ps3flash_kernel_ops); return 0; fail_teardown: ps3stor_teardown(dev); fail_free_priv: kfree(priv); ps3_system_bus_set_drvdata(&dev->sbd, NULL); fail: ps3flash_dev = NULL; return error; } static void ps3flash_remove(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); ps3_os_area_flash_register(NULL); misc_deregister(&ps3flash_misc); ps3stor_teardown(dev); kfree(ps3_system_bus_get_drvdata(&dev->sbd)); ps3_system_bus_set_drvdata(&dev->sbd, NULL); ps3flash_dev = NULL; } static struct ps3_system_bus_driver ps3flash = { .match_id = PS3_MATCH_ID_STOR_FLASH, .core.name = DEVICE_NAME, .core.owner = THIS_MODULE, .probe = ps3flash_probe, .remove = ps3flash_remove, .shutdown = ps3flash_remove, }; static int __init ps3flash_init(void) { return ps3_system_bus_driver_register(&ps3flash); } static void __exit ps3flash_exit(void) { ps3_system_bus_driver_unregister(&ps3flash); } module_init(ps3flash_init); module_exit(ps3flash_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); MODULE_AUTHOR("Sony Corporation"); MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
linux-master
drivers/char/ps3flash.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell CN10K RVU Hardware Random Number Generator. * * Copyright (C) 2021 Marvell. * */ #include <linux/hw_random.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/arm-smccc.h> /* CSRs */ #define RNM_CTL_STATUS 0x000 #define RNM_ENTROPY_STATUS 0x008 #define RNM_CONST 0x030 #define RNM_EBG_ENT 0x048 #define RNM_PF_EBG_HEALTH 0x050 #define RNM_PF_RANDOM 0x400 #define RNM_TRNG_RESULT 0x408 /* Extended TRNG Read and Status Registers */ #define RNM_PF_TRNG_DAT 0x1000 #define RNM_PF_TRNG_RES 0x1008 struct cn10k_rng { void __iomem *reg_base; struct hwrng ops; struct pci_dev *pdev; /* Octeon CN10K-A A0/A1, CNF10K-A A0/A1 and CNF10K-B A0/B0 * does not support extended TRNG registers */ bool extended_trng_regs; }; #define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f #define PCI_SUBSYS_DEVID_CN10K_A_RNG 0xB900 #define PCI_SUBSYS_DEVID_CNF10K_A_RNG 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B_RNG 0xBC00 static bool cn10k_is_extended_trng_regs_supported(struct pci_dev *pdev) { /* CN10K-A A0/A1 */ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RNG) && (!pdev->revision || (pdev->revision & 0xff) == 0x50 || (pdev->revision & 0xff) == 0x51)) return false; /* CNF10K-A A0 */ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_RNG) && (!pdev->revision || (pdev->revision & 0xff) == 0x60 || (pdev->revision & 0xff) == 0x61)) return false; /* CNF10K-B A0/B0 */ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_RNG) && (!pdev->revision || (pdev->revision & 0xff) == 0x70 || (pdev->revision & 0xff) == 0x74)) return false; return true; } static unsigned long reset_rng_health_state(struct cn10k_rng *rng) { struct arm_smccc_res res; /* Send SMC service call to reset EBG health state */ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res); return res.a0; } static int check_rng_health(struct cn10k_rng *rng) { u64 status; unsigned long err; /* Skip checking health */ if (!rng->reg_base) return -ENODEV; status = readq(rng->reg_base + RNM_PF_EBG_HEALTH); if (status & BIT_ULL(20)) { err = reset_rng_health_state(rng); if (err) { dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n", status); dev_err(&rng->pdev->dev, "HWRNG: error during reset (error=%lx)\n", err); return -EIO; } } return 0; } /* Returns true when valid data available otherwise return false */ static bool cn10k_read_trng(struct cn10k_rng *rng, u64 *value) { u16 retry_count = 0; u64 upper, lower; u64 status; if (rng->extended_trng_regs) { do { *value = readq(rng->reg_base + RNM_PF_TRNG_DAT); if (*value) return true; status = readq(rng->reg_base + RNM_PF_TRNG_RES); if (!status && (retry_count++ > 0x1000)) return false; } while (!status); } *value = readq(rng->reg_base + RNM_PF_RANDOM); /* HW can run out of entropy if large amount random data is read in * quick succession. Zeros may not be real random data from HW. */ if (!*value) { upper = readq(rng->reg_base + RNM_PF_RANDOM); lower = readq(rng->reg_base + RNM_PF_RANDOM); while (!(upper & 0x00000000FFFFFFFFULL)) upper = readq(rng->reg_base + RNM_PF_RANDOM); while (!(lower & 0xFFFFFFFF00000000ULL)) lower = readq(rng->reg_base + RNM_PF_RANDOM); *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF); } return true; } static int cn10k_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) { struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv; unsigned int size; u8 *pos = data; int err = 0; u64 value; err = check_rng_health(rng); if (err) return err; size = max; while (size >= 8) { if (!cn10k_read_trng(rng, &value)) goto out; *((u64 *)pos) = value; size -= 8; pos += 8; } if (size > 0) { if (!cn10k_read_trng(rng, &value)) goto out; while (size > 0) { *pos = (u8)value; value >>= 8; size--; pos++; } } out: return max - size; } static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cn10k_rng *rng; int err; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; rng->pdev = pdev; pci_set_drvdata(pdev, rng); rng->reg_base = pcim_iomap(pdev, 0, 0); if (!rng->reg_base) return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n"); rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "cn10k-rng-%s", dev_name(&pdev->dev)); if (!rng->ops.name) return -ENOMEM; rng->ops.read = cn10k_rng_read; rng->ops.priv = (unsigned long)rng; rng->extended_trng_regs = cn10k_is_extended_trng_regs_supported(pdev); reset_rng_health_state(rng); err = devm_hwrng_register(&pdev->dev, &rng->ops); if (err) return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n"); return 0; } static const struct pci_device_id cn10k_rng_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */ {0,}, }; MODULE_DEVICE_TABLE(pci, cn10k_rng_id_table); static struct pci_driver cn10k_rng_driver = { .name = "cn10k_rng", .id_table = cn10k_rng_id_table, .probe = cn10k_rng_probe, }; module_pci_driver(cn10k_rng_driver); MODULE_AUTHOR("Sunil Goutham <[email protected]>"); MODULE_DESCRIPTION("Marvell CN10K HW RNG Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/cn10k-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Hardware Random Number Generator support. * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ #include <linux/hw_random.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <asm/arch_timer.h> /* PCI device IDs */ #define PCI_DEVID_CAVIUM_RNG_PF 0xA018 #define PCI_DEVID_CAVIUM_RNG_VF 0xA033 #define HEALTH_STATUS_REG 0x38 /* RST device info */ #define PCI_DEVICE_ID_RST_OTX2 0xA085 #define RST_BOOT_REG 0x1600ULL #define CLOCK_BASE_RATE 50000000ULL #define MSEC_TO_NSEC(x) (x * 1000000) struct cavium_rng { struct hwrng ops; void __iomem *result; void __iomem *pf_regbase; struct pci_dev *pdev; u64 clock_rate; u64 prev_error; u64 prev_time; }; static inline bool is_octeontx(struct pci_dev *pdev) { if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(3, 0)) || midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(3, 0)) || midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(3, 0))) return true; return false; } static u64 rng_get_coprocessor_clkrate(void) { u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */ struct pci_dev *pdev; void __iomem *base; pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_RST_OTX2, NULL); if (!pdev) goto error; base = pci_ioremap_bar(pdev, 0); if (!base) goto error_put_pdev; /* RST: PNR_MUL * 50Mhz gives clockrate */ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F); iounmap(base); error_put_pdev: pci_dev_put(pdev); error: return ret; } static int check_rng_health(struct cavium_rng *rng) { u64 cur_err, cur_time; u64 status, cycles; u64 time_elapsed; /* Skip checking health for OcteonTx */ if (!rng->pf_regbase) return 0; status = readq(rng->pf_regbase + HEALTH_STATUS_REG); if (status & BIT_ULL(0)) { dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n"); return -EIO; } cycles = status >> 1; if (!cycles) return 0; cur_time = arch_timer_read_counter(); /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE] * Number of coprocessor cycles times 2 since the last failure. * This field doesn't get cleared/updated until another failure. */ cycles = cycles / 2; cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ /* Ignore errors that happenned a long time ago, these * are most likely false positive errors. */ if (cur_err > MSEC_TO_NSEC(10)) { rng->prev_error = 0; rng->prev_time = 0; return 0; } if (rng->prev_error) { /* Calculate time elapsed since last error * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz. */ time_elapsed = (cur_time - rng->prev_time) * 10; time_elapsed += rng->prev_error; /* Check if current error is a new one or the old one itself. * If error is a new one then consider there is a persistent * issue with entropy, declare hardware failure. */ if (cur_err < time_elapsed) { dev_err(&rng->pdev->dev, "HWRNG failure detected\n"); rng->prev_error = cur_err; rng->prev_time = cur_time; return -EIO; } } rng->prev_error = cur_err; rng->prev_time = cur_time; return 0; } /* Read data from the RNG unit */ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) { struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); unsigned int size = max; int err = 0; err = check_rng_health(p); if (err) return err; while (size >= 8) { *((u64 *)dat) = readq(p->result); size -= 8; dat += 8; } while (size > 0) { *((u8 *)dat) = readb(p->result); size--; dat++; } return max; } static int cavium_map_pf_regs(struct cavium_rng *rng) { struct pci_dev *pdev; /* Health status is not supported on 83xx, skip mapping PF CSRs */ if (is_octeontx(rng->pdev)) { rng->pf_regbase = NULL; return 0; } pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_PF, NULL); if (!pdev) { pr_err("Cannot find RNG PF device\n"); return -EIO; } rng->pf_regbase = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!rng->pf_regbase) { dev_err(&pdev->dev, "Failed to map PF CSR region\n"); pci_dev_put(pdev); return -ENOMEM; } pci_dev_put(pdev); /* Get co-processor clock rate */ rng->clock_rate = rng_get_coprocessor_clkrate(); return 0; } /* Map Cavium RNG to an HWRNG object */ static int cavium_rng_probe_vf(struct pci_dev *pdev, const struct pci_device_id *id) { struct cavium_rng *rng; int ret; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; rng->pdev = pdev; /* Map the RNG result */ rng->result = pcim_iomap(pdev, 0, 0); if (!rng->result) { dev_err(&pdev->dev, "Error iomap failed retrieving result.\n"); return -ENOMEM; } rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "cavium-rng-%s", dev_name(&pdev->dev)); if (!rng->ops.name) return -ENOMEM; rng->ops.read = cavium_rng_read; pci_set_drvdata(pdev, rng); /* Health status is available only at PF, hence map PF registers. */ ret = cavium_map_pf_regs(rng); if (ret) return ret; ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) { dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); return ret; } return 0; } /* Remove the VF */ static void cavium_rng_remove_vf(struct pci_dev *pdev) { struct cavium_rng *rng; rng = pci_get_drvdata(pdev); iounmap(rng->pf_regbase); } static const struct pci_device_id cavium_rng_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); static struct pci_driver cavium_rng_vf_driver = { .name = "cavium_rng_vf", .id_table = cavium_rng_vf_id_table, .probe = cavium_rng_probe_vf, .remove = cavium_rng_remove_vf, }; module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/cavium-rng-vf.c
// SPDX-License-Identifier: GPL-2.0-only /* * ST Random Number Generator Driver ST's Platforms * * Author: Pankaj Dev: <[email protected]> * Lee Jones <[email protected]> * * Copyright (C) 2015 STMicroelectronics (R&D) Limited */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> /* Registers */ #define ST_RNG_STATUS_REG 0x20 #define ST_RNG_DATA_REG 0x24 /* Registers fields */ #define ST_RNG_STATUS_BAD_SEQUENCE BIT(0) #define ST_RNG_STATUS_BAD_ALTERNANCE BIT(1) #define ST_RNG_STATUS_FIFO_FULL BIT(5) #define ST_RNG_SAMPLE_SIZE 2 /* 2 Byte (16bit) samples */ #define ST_RNG_FIFO_DEPTH 4 #define ST_RNG_FIFO_SIZE (ST_RNG_FIFO_DEPTH * ST_RNG_SAMPLE_SIZE) /* * Samples are documented to be available every 0.667us, so in theory * the 4 sample deep FIFO should take 2.668us to fill. However, during * thorough testing, it became apparent that filling the FIFO actually * takes closer to 12us. We then multiply by 2 in order to account for * the lack of udelay()'s reliability, suggested by Russell King. */ #define ST_RNG_FILL_FIFO_TIMEOUT (12 * 2) struct st_rng_data { void __iomem *base; struct hwrng ops; }; static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct st_rng_data *ddata = (struct st_rng_data *)rng->priv; u32 status; int i; /* Wait until FIFO is full - max 4uS*/ for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) { status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG); if (status & ST_RNG_STATUS_FIFO_FULL) break; udelay(1); } if (i == ST_RNG_FILL_FIFO_TIMEOUT) return 0; for (i = 0; i < ST_RNG_FIFO_SIZE && i < max; i += 2) *(u16 *)(data + i) = readl_relaxed(ddata->base + ST_RNG_DATA_REG); return i; /* No of bytes read */ } static int st_rng_probe(struct platform_device *pdev) { struct st_rng_data *ddata; struct clk *clk; void __iomem *base; int ret; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); ddata->ops.priv = (unsigned long)ddata; ddata->ops.read = st_rng_read; ddata->ops.name = pdev->name; ddata->base = base; ret = devm_hwrng_register(&pdev->dev, &ddata->ops); if (ret) { dev_err(&pdev->dev, "Failed to register HW RNG\n"); return ret; } dev_info(&pdev->dev, "Successfully registered HW RNG\n"); return 0; } static const struct of_device_id st_rng_match[] __maybe_unused = { { .compatible = "st,rng" }, {}, }; MODULE_DEVICE_TABLE(of, st_rng_match); static struct platform_driver st_rng_driver = { .driver = { .name = "st-hwrandom", .of_match_table = of_match_ptr(st_rng_match), }, .probe = st_rng_probe, }; module_platform_driver(st_rng_driver); MODULE_AUTHOR("Pankaj Dev <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/st-rng.c
/* * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family * * Copyright (C) 2009 Nokia Corporation * Author: Juha Yrjola <[email protected]> * * Copyright (C) 2013 Pali Rohár <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/init.h> #include <linux/random.h> #include <linux/hw_random.h> #include <linux/workqueue.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define RNG_RESET 0x01 #define RNG_GEN_PRNG_HW_INIT 0x02 #define RNG_GEN_HW 0x08 struct omap_rom_rng { struct clk *clk; struct device *dev; struct hwrng ops; u32 (*rom_rng_call)(u32 ptr, u32 count, u32 flag); }; static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w) { struct omap_rom_rng *ddata; u32 ptr; int r; ddata = (struct omap_rom_rng *)rng->priv; r = pm_runtime_get_sync(ddata->dev); if (r < 0) { pm_runtime_put_noidle(ddata->dev); return r; } ptr = virt_to_phys(data); r = ddata->rom_rng_call(ptr, 4, RNG_GEN_HW); if (r != 0) r = -EINVAL; else r = 4; pm_runtime_mark_last_busy(ddata->dev); pm_runtime_put_autosuspend(ddata->dev); return r; } static int __maybe_unused omap_rom_rng_runtime_suspend(struct device *dev) { struct omap_rom_rng *ddata; int r; ddata = dev_get_drvdata(dev); r = ddata->rom_rng_call(0, 0, RNG_RESET); if (r != 0) dev_err(dev, "reset failed: %d\n", r); clk_disable_unprepare(ddata->clk); return 0; } static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev) { struct omap_rom_rng *ddata; int r; ddata = dev_get_drvdata(dev); r = clk_prepare_enable(ddata->clk); if (r < 0) return r; r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); if (r != 0) { clk_disable_unprepare(ddata->clk); dev_err(dev, "HW init failed: %d\n", r); return -EIO; } return 0; } static void omap_rom_rng_finish(void *data) { struct omap_rom_rng *ddata = data; pm_runtime_dont_use_autosuspend(ddata->dev); pm_runtime_disable(ddata->dev); } static int omap3_rom_rng_probe(struct platform_device *pdev) { struct omap_rom_rng *ddata; int ret = 0; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; ddata->dev = &pdev->dev; ddata->ops.priv = (unsigned long)ddata; ddata->ops.name = "omap3-rom"; ddata->ops.read = of_device_get_match_data(&pdev->dev); ddata->ops.quality = 900; if (!ddata->ops.read) { dev_err(&pdev->dev, "missing rom code handler\n"); return -ENODEV; } dev_set_drvdata(ddata->dev, ddata); ddata->rom_rng_call = pdev->dev.platform_data; if (!ddata->rom_rng_call) { dev_err(ddata->dev, "rom_rng_call is NULL\n"); return -EINVAL; } ddata->clk = devm_clk_get(ddata->dev, "ick"); if (IS_ERR(ddata->clk)) { dev_err(ddata->dev, "unable to get RNG clock\n"); return PTR_ERR(ddata->clk); } pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); ret = devm_add_action_or_reset(ddata->dev, omap_rom_rng_finish, ddata); if (ret) return ret; return devm_hwrng_register(ddata->dev, &ddata->ops); } static const struct of_device_id omap_rom_rng_match[] = { { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, omap_rom_rng_match); static const struct dev_pm_ops omap_rom_rng_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_rom_rng_runtime_suspend, omap_rom_rng_runtime_resume) }; static struct platform_driver omap3_rom_rng_driver = { .driver = { .name = "omap3-rom-rng", .of_match_table = omap_rom_rng_match, .pm = &omap_rom_rng_pm_ops, }, .probe = omap3_rom_rng_probe, }; module_platform_driver(omap3_rom_rng_driver); MODULE_ALIAS("platform:omap3-rom-rng"); MODULE_AUTHOR("Juha Yrjola"); MODULE_AUTHOR("Pali Rohár <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/omap3-rom-rng.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Nuvoton Technology corporation. #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/init.h> #include <linux/random.h> #include <linux/err.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #define NPCM_RNGCS_REG 0x00 /* Control and status register */ #define NPCM_RNGD_REG 0x04 /* Data register */ #define NPCM_RNGMODE_REG 0x08 /* Mode register */ #define NPCM_RNG_CLK_SET_62_5MHZ BIT(2) /* 60-80 MHz */ #define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */ #define NPCM_RNG_DATA_VALID BIT(1) #define NPCM_RNG_ENABLE BIT(0) #define NPCM_RNG_M1ROSEL BIT(1) #define NPCM_RNG_TIMEOUT_USEC 20000 #define NPCM_RNG_POLL_USEC 1000 #define to_npcm_rng(p) container_of(p, struct npcm_rng, rng) struct npcm_rng { void __iomem *base; struct hwrng rng; u32 clkp; }; static int npcm_rng_init(struct hwrng *rng) { struct npcm_rng *priv = to_npcm_rng(rng); writel(priv->clkp | NPCM_RNG_ENABLE, priv->base + NPCM_RNGCS_REG); return 0; } static void npcm_rng_cleanup(struct hwrng *rng) { struct npcm_rng *priv = to_npcm_rng(rng); writel(priv->clkp, priv->base + NPCM_RNGCS_REG); } static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct npcm_rng *priv = to_npcm_rng(rng); int retval = 0; int ready; pm_runtime_get_sync((struct device *)priv->rng.priv); while (max) { if (wait) { if (readb_poll_timeout(priv->base + NPCM_RNGCS_REG, ready, ready & NPCM_RNG_DATA_VALID, NPCM_RNG_POLL_USEC, NPCM_RNG_TIMEOUT_USEC)) break; } else { if ((readb(priv->base + NPCM_RNGCS_REG) & NPCM_RNG_DATA_VALID) == 0) break; } *(u8 *)buf = readb(priv->base + NPCM_RNGD_REG); retval++; buf++; max--; } pm_runtime_mark_last_busy((struct device *)priv->rng.priv); pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); return retval || !wait ? retval : -EIO; } static int npcm_rng_probe(struct platform_device *pdev) { struct npcm_rng *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, 100); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); #ifndef CONFIG_PM priv->rng.init = npcm_rng_init; priv->rng.cleanup = npcm_rng_cleanup; #endif priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; priv->rng.priv = (unsigned long)&pdev->dev; priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) { dev_err(&pdev->dev, "Failed to register rng device: %d\n", ret); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return ret; } return 0; } static int npcm_rng_remove(struct platform_device *pdev) { struct npcm_rng *priv = platform_get_drvdata(pdev); devm_hwrng_unregister(&pdev->dev, &priv->rng); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return 0; } #ifdef CONFIG_PM static int npcm_rng_runtime_suspend(struct device *dev) { struct npcm_rng *priv = dev_get_drvdata(dev); npcm_rng_cleanup(&priv->rng); return 0; } static int npcm_rng_runtime_resume(struct device *dev) { struct npcm_rng *priv = dev_get_drvdata(dev); return npcm_rng_init(&priv->rng); } #endif static const struct dev_pm_ops npcm_rng_pm_ops = { SET_RUNTIME_PM_OPS(npcm_rng_runtime_suspend, npcm_rng_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct of_device_id rng_dt_id[] __maybe_unused = { { .compatible = "nuvoton,npcm750-rng", .data = (void *)NPCM_RNG_CLK_SET_25MHZ }, { .compatible = "nuvoton,npcm845-rng", .data = (void *)NPCM_RNG_CLK_SET_62_5MHZ }, {}, }; MODULE_DEVICE_TABLE(of, rng_dt_id); static struct platform_driver npcm_rng_driver = { .driver = { .name = "npcm-rng", .pm = &npcm_rng_pm_ops, .of_match_table = of_match_ptr(rng_dt_id), }, .probe = npcm_rng_probe, .remove = npcm_rng_remove, }; module_platform_driver(npcm_rng_driver); MODULE_DESCRIPTION("Nuvoton NPCM Random Number Generator Driver"); MODULE_AUTHOR("Tomer Maimon <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/npcm-rng.c
// SPDX-License-Identifier: GPL-2.0-only /* * drivers/char/hw_random/timeriomem-rng.c * * Copyright (C) 2009 Alexander Clouter <[email protected]> * * Derived from drivers/char/hw_random/omap-rng.c * Copyright 2005 (c) MontaVista Software, Inc. * Author: Deepak Saxena <[email protected]> * * Overview: * This driver is useful for platforms that have an IO range that provides * periodic random data from a single IO memory address. All the platform * has to do is provide the address and 'wait time' that new data becomes * available. * * TODO: add support for reading sizes other than 32bits and masking */ #include <linux/completion.h> #include <linux/delay.h> #include <linux/hrtimer.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/timeriomem-rng.h> struct timeriomem_rng_private { void __iomem *io_base; ktime_t period; unsigned int present:1; struct hrtimer timer; struct completion completion; struct hwrng rng_ops; }; static int timeriomem_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) { struct timeriomem_rng_private *priv = container_of(hwrng, struct timeriomem_rng_private, rng_ops); int retval = 0; int period_us = ktime_to_us(priv->period); /* * There may not have been enough time for new data to be generated * since the last request. If the caller doesn't want to wait, let them * bail out. Otherwise, wait for the completion. If the new data has * already been generated, the completion should already be available. */ if (!wait && !priv->present) return 0; wait_for_completion(&priv->completion); do { /* * After the first read, all additional reads will need to wait * for the RNG to generate new data. Since the period can have * a wide range of values (1us to 1s have been observed), allow * for 1% tolerance in the sleep time rather than a fixed value. */ if (retval > 0) usleep_range(period_us, period_us + max(1, period_us / 100)); *(u32 *)data = readl(priv->io_base); retval += sizeof(u32); data += sizeof(u32); max -= sizeof(u32); } while (wait && max > sizeof(u32)); /* * Block any new callers until the RNG has had time to generate new * data. */ priv->present = 0; reinit_completion(&priv->completion); hrtimer_forward_now(&priv->timer, priv->period); hrtimer_restart(&priv->timer); return retval; } static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer) { struct timeriomem_rng_private *priv = container_of(timer, struct timeriomem_rng_private, timer); priv->present = 1; complete(&priv->completion); return HRTIMER_NORESTART; } static int timeriomem_rng_probe(struct platform_device *pdev) { struct timeriomem_rng_data *pdata = pdev->dev.platform_data; struct timeriomem_rng_private *priv; struct resource *res; int err = 0; int period; if (!pdev->dev.of_node && !pdata) { dev_err(&pdev->dev, "timeriomem_rng_data is missing\n"); return -EINVAL; } /* Allocate memory for the device structure (and zero it) */ priv = devm_kzalloc(&pdev->dev, sizeof(struct timeriomem_rng_private), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->io_base)) return PTR_ERR(priv->io_base); if (res->start % 4 != 0 || resource_size(res) < 4) { dev_err(&pdev->dev, "address must be at least four bytes wide and 32-bit aligned\n"); return -EINVAL; } if (pdev->dev.of_node) { int i; if (!of_property_read_u32(pdev->dev.of_node, "period", &i)) period = i; else { dev_err(&pdev->dev, "missing period\n"); return -EINVAL; } if (!of_property_read_u32(pdev->dev.of_node, "quality", &i)) priv->rng_ops.quality = i; } else { period = pdata->period; priv->rng_ops.quality = pdata->quality; } priv->period = ns_to_ktime(period * NSEC_PER_USEC); init_completion(&priv->completion); hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); priv->timer.function = timeriomem_rng_trigger; priv->rng_ops.name = dev_name(&pdev->dev); priv->rng_ops.read = timeriomem_rng_read; /* Assume random data is already available. */ priv->present = 1; complete(&priv->completion); err = devm_hwrng_register(&pdev->dev, &priv->rng_ops); if (err) { dev_err(&pdev->dev, "problem registering\n"); return err; } dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", priv->io_base, period); return 0; } static int timeriomem_rng_remove(struct platform_device *pdev) { struct timeriomem_rng_private *priv = platform_get_drvdata(pdev); hrtimer_cancel(&priv->timer); return 0; } static const struct of_device_id timeriomem_rng_match[] = { { .compatible = "timeriomem_rng" }, {}, }; MODULE_DEVICE_TABLE(of, timeriomem_rng_match); static struct platform_driver timeriomem_rng_driver = { .driver = { .name = "timeriomem_rng", .of_match_table = timeriomem_rng_match, }, .probe = timeriomem_rng_probe, .remove = timeriomem_rng_remove, }; module_platform_driver(timeriomem_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Clouter <[email protected]>"); MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
linux-master
drivers/char/hw_random/timeriomem-rng.c
/* * omap-rng.c - RNG driver for TI OMAP CPU family * * Author: Deepak Saxena <[email protected]> * * Copyright 2005 (c) MontaVista Software, Inc. * * Mostly based on original driver: * * Copyright (C) 2005 Nokia Corporation * Author: Juha Yrjölä <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/init.h> #include <linux/random.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/io.h> #define RNG_REG_STATUS_RDY (1 << 0) #define RNG_REG_INTACK_RDY_MASK (1 << 0) #define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1) #define RNG_SHUTDOWN_OFLO_MASK (1 << 1) #define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16 #define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16) #define RNG_CONTROL_ENABLE_TRNG_SHIFT 10 #define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10) #define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16 #define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16) #define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0 #define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0) #define RNG_CONTROL_STARTUP_CYCLES 0xff #define RNG_CONFIG_MIN_REFIL_CYCLES 0x21 #define RNG_CONFIG_MAX_REFIL_CYCLES 0x22 #define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0 #define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0) #define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16 #define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16) #define RNG_ALARM_THRESHOLD 0xff #define RNG_SHUTDOWN_THRESHOLD 0x4 #define RNG_REG_FROENABLE_MASK 0xffffff #define RNG_REG_FRODETUNE_MASK 0xffffff #define OMAP2_RNG_OUTPUT_SIZE 0x4 #define OMAP4_RNG_OUTPUT_SIZE 0x8 #define EIP76_RNG_OUTPUT_SIZE 0x10 /* * EIP76 RNG takes approx. 700us to produce 16 bytes of output data * as per testing results. And to account for the lack of udelay()'s * reliability, we keep the timeout as 1000us. */ #define RNG_DATA_FILL_TIMEOUT 100 enum { RNG_OUTPUT_0_REG = 0, RNG_OUTPUT_1_REG, RNG_OUTPUT_2_REG, RNG_OUTPUT_3_REG, RNG_STATUS_REG, RNG_INTMASK_REG, RNG_INTACK_REG, RNG_CONTROL_REG, RNG_CONFIG_REG, RNG_ALARMCNT_REG, RNG_FROENABLE_REG, RNG_FRODETUNE_REG, RNG_ALARMMASK_REG, RNG_ALARMSTOP_REG, RNG_REV_REG, RNG_SYSCONFIG_REG, }; static const u16 reg_map_omap2[] = { [RNG_OUTPUT_0_REG] = 0x0, [RNG_STATUS_REG] = 0x4, [RNG_CONFIG_REG] = 0x28, [RNG_REV_REG] = 0x3c, [RNG_SYSCONFIG_REG] = 0x40, }; static const u16 reg_map_omap4[] = { [RNG_OUTPUT_0_REG] = 0x0, [RNG_OUTPUT_1_REG] = 0x4, [RNG_STATUS_REG] = 0x8, [RNG_INTMASK_REG] = 0xc, [RNG_INTACK_REG] = 0x10, [RNG_CONTROL_REG] = 0x14, [RNG_CONFIG_REG] = 0x18, [RNG_ALARMCNT_REG] = 0x1c, [RNG_FROENABLE_REG] = 0x20, [RNG_FRODETUNE_REG] = 0x24, [RNG_ALARMMASK_REG] = 0x28, [RNG_ALARMSTOP_REG] = 0x2c, [RNG_REV_REG] = 0x1FE0, [RNG_SYSCONFIG_REG] = 0x1FE4, }; static const u16 reg_map_eip76[] = { [RNG_OUTPUT_0_REG] = 0x0, [RNG_OUTPUT_1_REG] = 0x4, [RNG_OUTPUT_2_REG] = 0x8, [RNG_OUTPUT_3_REG] = 0xc, [RNG_STATUS_REG] = 0x10, [RNG_INTACK_REG] = 0x10, [RNG_CONTROL_REG] = 0x14, [RNG_CONFIG_REG] = 0x18, [RNG_ALARMCNT_REG] = 0x1c, [RNG_FROENABLE_REG] = 0x20, [RNG_FRODETUNE_REG] = 0x24, [RNG_ALARMMASK_REG] = 0x28, [RNG_ALARMSTOP_REG] = 0x2c, [RNG_REV_REG] = 0x7c, }; struct omap_rng_dev; /** * struct omap_rng_pdata - RNG IP block-specific data * @regs: Pointer to the register offsets structure. * @data_size: No. of bytes in RNG output. * @data_present: Callback to determine if data is available. * @init: Callback for IP specific initialization sequence. * @cleanup: Callback for IP specific cleanup sequence. */ struct omap_rng_pdata { u16 *regs; u32 data_size; u32 (*data_present)(struct omap_rng_dev *priv); int (*init)(struct omap_rng_dev *priv); void (*cleanup)(struct omap_rng_dev *priv); }; struct omap_rng_dev { void __iomem *base; struct device *dev; const struct omap_rng_pdata *pdata; struct hwrng rng; struct clk *clk; struct clk *clk_reg; }; static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) { return __raw_readl(priv->base + priv->pdata->regs[reg]); } static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, u32 val) { __raw_writel(val, priv->base + priv->pdata->regs[reg]); } static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct omap_rng_dev *priv; int i, present; priv = (struct omap_rng_dev *)rng->priv; if (max < priv->pdata->data_size) return 0; for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) { present = priv->pdata->data_present(priv); if (present || !wait) break; udelay(10); } if (!present) return 0; memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG], priv->pdata->data_size); if (priv->pdata->regs[RNG_INTACK_REG]) omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); return priv->pdata->data_size; } static int omap_rng_init(struct hwrng *rng) { struct omap_rng_dev *priv; priv = (struct omap_rng_dev *)rng->priv; return priv->pdata->init(priv); } static void omap_rng_cleanup(struct hwrng *rng) { struct omap_rng_dev *priv; priv = (struct omap_rng_dev *)rng->priv; priv->pdata->cleanup(priv); } static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) { return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; } static int omap2_rng_init(struct omap_rng_dev *priv) { omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); return 0; } static void omap2_rng_cleanup(struct omap_rng_dev *priv) { omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); } static struct omap_rng_pdata omap2_rng_pdata = { .regs = (u16 *)reg_map_omap2, .data_size = OMAP2_RNG_OUTPUT_SIZE, .data_present = omap2_rng_data_present, .init = omap2_rng_init, .cleanup = omap2_rng_cleanup, }; static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) { return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; } static int eip76_rng_init(struct omap_rng_dev *priv) { u32 val; /* Return if RNG is already running. */ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) return 0; /* Number of 512 bit blocks of raw Noise Source output data that must * be processed by either the Conditioning Function or the * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’ * output value. */ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; /* Number of FRO samples that are XOR-ed together into one bit to be * shifted into the main shift register */ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; omap_rng_write(priv, RNG_CONFIG_REG, val); /* Enable all available FROs */ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); /* Enable TRNG */ val = RNG_CONTROL_ENABLE_TRNG_MASK; omap_rng_write(priv, RNG_CONTROL_REG, val); return 0; } static int omap4_rng_init(struct omap_rng_dev *priv) { u32 val; /* Return if RNG is already running. */ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) return 0; val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; omap_rng_write(priv, RNG_CONFIG_REG, val); omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT; val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT; omap_rng_write(priv, RNG_ALARMCNT_REG, val); val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT; val |= RNG_CONTROL_ENABLE_TRNG_MASK; omap_rng_write(priv, RNG_CONTROL_REG, val); return 0; } static void omap4_rng_cleanup(struct omap_rng_dev *priv) { int val; val = omap_rng_read(priv, RNG_CONTROL_REG); val &= ~RNG_CONTROL_ENABLE_TRNG_MASK; omap_rng_write(priv, RNG_CONTROL_REG, val); } static irqreturn_t omap4_rng_irq(int irq, void *dev_id) { struct omap_rng_dev *priv = dev_id; u32 fro_detune, fro_enable; /* * Interrupt raised by a fro shutdown threshold, do the following: * 1. Clear the alarm events. * 2. De tune the FROs which are shutdown. * 3. Re enable the shutdown FROs. */ omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0); omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0); fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG); fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK; fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG); fro_enable = RNG_REG_FROENABLE_MASK; omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune); omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable); omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK); return IRQ_HANDLED; } static struct omap_rng_pdata omap4_rng_pdata = { .regs = (u16 *)reg_map_omap4, .data_size = OMAP4_RNG_OUTPUT_SIZE, .data_present = omap4_rng_data_present, .init = omap4_rng_init, .cleanup = omap4_rng_cleanup, }; static struct omap_rng_pdata eip76_rng_pdata = { .regs = (u16 *)reg_map_eip76, .data_size = EIP76_RNG_OUTPUT_SIZE, .data_present = omap4_rng_data_present, .init = eip76_rng_init, .cleanup = omap4_rng_cleanup, }; static const struct of_device_id omap_rng_of_match[] __maybe_unused = { { .compatible = "ti,omap2-rng", .data = &omap2_rng_pdata, }, { .compatible = "ti,omap4-rng", .data = &omap4_rng_pdata, }, { .compatible = "inside-secure,safexcel-eip76", .data = &eip76_rng_pdata, }, {}, }; MODULE_DEVICE_TABLE(of, omap_rng_of_match); static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, struct platform_device *pdev) { struct device *dev = &pdev->dev; int irq, err; priv->pdata = of_device_get_match_data(dev); if (!priv->pdata) return -ENODEV; if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; err = devm_request_irq(dev, irq, omap4_rng_irq, IRQF_TRIGGER_NONE, dev_name(dev), priv); if (err) { dev_err(dev, "unable to request irq %d, err = %d\n", irq, err); return err; } /* * On OMAP4, enabling the shutdown_oflo interrupt is * done in the interrupt mask register. There is no * such register on EIP76, and it's enabled by the * same bit in the control register */ if (priv->pdata->regs[RNG_INTMASK_REG]) omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); else omap_rng_write(priv, RNG_CONTROL_REG, RNG_SHUTDOWN_OFLO_MASK); } return 0; } static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) { /* Only OMAP2/3 can be non-DT */ omap_rng->pdata = &omap2_rng_pdata; return 0; } static int omap_rng_probe(struct platform_device *pdev) { struct omap_rng_dev *priv; struct device *dev = &pdev->dev; int ret; priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL); if (!priv) return -ENOMEM; priv->rng.read = omap_rng_do_read; priv->rng.init = omap_rng_init; priv->rng.cleanup = omap_rng_cleanup; priv->rng.quality = 900; priv->rng.priv = (unsigned long)priv; platform_set_drvdata(pdev, priv); priv->dev = dev; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto err_ioremap; } priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); if (!priv->rng.name) { ret = -ENOMEM; goto err_ioremap; } pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); goto err_ioremap; } priv->clk = devm_clk_get(&pdev->dev, NULL); if (PTR_ERR(priv->clk) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR(priv->clk)) { ret = clk_prepare_enable(priv->clk); if (ret) { dev_err(&pdev->dev, "Unable to enable the clk: %d\n", ret); goto err_register; } } priv->clk_reg = devm_clk_get(&pdev->dev, "reg"); if (PTR_ERR(priv->clk_reg) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR(priv->clk_reg)) { ret = clk_prepare_enable(priv->clk_reg); if (ret) { dev_err(&pdev->dev, "Unable to enable the register clk: %d\n", ret); goto err_register; } } ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : get_omap_rng_device_details(priv); if (ret) goto err_register; ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) goto err_register; dev_info(&pdev->dev, "Random Number Generator ver. %02x\n", omap_rng_read(priv, RNG_REV_REG)); return 0; err_register: priv->base = NULL; pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(priv->clk_reg); clk_disable_unprepare(priv->clk); err_ioremap: dev_err(dev, "initialization failed.\n"); return ret; } static int omap_rng_remove(struct platform_device *pdev) { struct omap_rng_dev *priv = platform_get_drvdata(pdev); priv->pdata->cleanup(priv); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk_reg); return 0; } static int __maybe_unused omap_rng_suspend(struct device *dev) { struct omap_rng_dev *priv = dev_get_drvdata(dev); priv->pdata->cleanup(priv); pm_runtime_put_sync(dev); return 0; } static int __maybe_unused omap_rng_resume(struct device *dev) { struct omap_rng_dev *priv = dev_get_drvdata(dev); int ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to runtime_get device: %d\n", ret); return ret; } priv->pdata->init(priv); return 0; } static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume); static struct platform_driver omap_rng_driver = { .driver = { .name = "omap_rng", .pm = &omap_rng_pm, .of_match_table = of_match_ptr(omap_rng_of_match), }, .probe = omap_rng_probe, .remove = omap_rng_remove, }; module_platform_driver(omap_rng_driver); MODULE_ALIAS("platform:omap_rng"); MODULE_AUTHOR("Deepak Saxena (and others)"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/omap-rng.c
/* * Copyright (c) 2011 Peter Korsgaard <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/hw_random.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define TRNG_CR 0x00 #define TRNG_MR 0x04 #define TRNG_ISR 0x1c #define TRNG_ISR_DATRDY BIT(0) #define TRNG_ODATA 0x50 #define TRNG_KEY 0x524e4700 /* RNG */ #define TRNG_HALFR BIT(0) /* generate RN every 168 cycles */ struct atmel_trng_data { bool has_half_rate; }; struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; bool has_half_rate; }; static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait) { int ready; ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY; if (!ready && wait) readl_poll_timeout(trng->base + TRNG_ISR, ready, ready & TRNG_ISR_DATRDY, 1000, 20000); return !!ready; } static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng); u32 *data = buf; int ret; ret = pm_runtime_get_sync((struct device *)trng->rng.priv); if (ret < 0) { pm_runtime_put_sync((struct device *)trng->rng.priv); return ret; } ret = atmel_trng_wait_ready(trng, wait); if (!ret) goto out; *data = readl(trng->base + TRNG_ODATA); /* * ensure data ready is only set again AFTER the next data word is ready * in case it got set between checking ISR and reading ODATA, so we * don't risk re-reading the same word */ readl(trng->base + TRNG_ISR); ret = 4; out: pm_runtime_mark_last_busy((struct device *)trng->rng.priv); pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); return ret; } static int atmel_trng_init(struct atmel_trng *trng) { unsigned long rate; int ret; ret = clk_prepare_enable(trng->clk); if (ret) return ret; if (trng->has_half_rate) { rate = clk_get_rate(trng->clk); /* if peripheral clk is above 100MHz, set HALFR */ if (rate > 100000000) writel(TRNG_HALFR, trng->base + TRNG_MR); } writel(TRNG_KEY | 1, trng->base + TRNG_CR); return 0; } static void atmel_trng_cleanup(struct atmel_trng *trng) { writel(TRNG_KEY, trng->base + TRNG_CR); clk_disable_unprepare(trng->clk); } static int atmel_trng_probe(struct platform_device *pdev) { struct atmel_trng *trng; const struct atmel_trng_data *data; int ret; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; trng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->base)) return PTR_ERR(trng->base); trng->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(trng->clk)) return PTR_ERR(trng->clk); data = of_device_get_match_data(&pdev->dev); if (!data) return -ENODEV; trng->has_half_rate = data->has_half_rate; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; trng->rng.priv = (unsigned long)&pdev->dev; platform_set_drvdata(pdev, trng); #ifndef CONFIG_PM ret = atmel_trng_init(trng); if (ret) return ret; #endif pm_runtime_set_autosuspend_delay(&pdev->dev, 100); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); #ifndef CONFIG_PM atmel_trng_cleanup(trng); #endif } return ret; } static int atmel_trng_remove(struct platform_device *pdev) { struct atmel_trng *trng = platform_get_drvdata(pdev); atmel_trng_cleanup(trng); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return 0; } static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev) { struct atmel_trng *trng = dev_get_drvdata(dev); atmel_trng_cleanup(trng); return 0; } static int __maybe_unused atmel_trng_runtime_resume(struct device *dev) { struct atmel_trng *trng = dev_get_drvdata(dev); return atmel_trng_init(trng); } static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = { SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend, atmel_trng_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct atmel_trng_data at91sam9g45_config = { .has_half_rate = false, }; static const struct atmel_trng_data sam9x60_config = { .has_half_rate = true, }; static const struct of_device_id atmel_trng_dt_ids[] = { { .compatible = "atmel,at91sam9g45-trng", .data = &at91sam9g45_config, }, { .compatible = "microchip,sam9x60-trng", .data = &sam9x60_config, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids); static struct platform_driver atmel_trng_driver = { .probe = atmel_trng_probe, .remove = atmel_trng_remove, .driver = { .name = "atmel-trng", .pm = pm_ptr(&atmel_trng_pm_ops), .of_match_table = atmel_trng_dt_ids, }, }; module_platform_driver(atmel_trng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Peter Korsgaard <[email protected]>"); MODULE_DESCRIPTION("Atmel true random number generator driver");
linux-master
drivers/char/hw_random/atmel-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RNG driver for Freescale RNGC * * Copyright (C) 2008-2012 Freescale Semiconductor, Inc. * Copyright (C) 2017 Martin Kaiser <[email protected]> */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/hw_random.h> #include <linux/completion.h> #include <linux/io.h> #include <linux/bitfield.h> #define RNGC_VER_ID 0x0000 #define RNGC_COMMAND 0x0004 #define RNGC_CONTROL 0x0008 #define RNGC_STATUS 0x000C #define RNGC_ERROR 0x0010 #define RNGC_FIFO 0x0014 /* the fields in the ver id register */ #define RNG_TYPE GENMASK(31, 28) #define RNGC_VER_MAJ_SHIFT 8 /* the rng_type field */ #define RNGC_TYPE_RNGB 0x1 #define RNGC_TYPE_RNGC 0x2 #define RNGC_CMD_CLR_ERR BIT(5) #define RNGC_CMD_CLR_INT BIT(4) #define RNGC_CMD_SEED BIT(1) #define RNGC_CMD_SELF_TEST BIT(0) #define RNGC_CTRL_MASK_ERROR BIT(6) #define RNGC_CTRL_MASK_DONE BIT(5) #define RNGC_CTRL_AUTO_SEED BIT(4) #define RNGC_STATUS_ERROR BIT(16) #define RNGC_STATUS_FIFO_LEVEL_MASK GENMASK(11, 8) #define RNGC_STATUS_SEED_DONE BIT(5) #define RNGC_STATUS_ST_DONE BIT(4) #define RNGC_ERROR_STATUS_STAT_ERR 0x00000008 #define RNGC_TIMEOUT 3000 /* 3 sec */ static bool self_test = true; module_param(self_test, bool, 0); struct imx_rngc { struct device *dev; struct clk *clk; void __iomem *base; struct hwrng rng; struct completion rng_op_done; /* * err_reg is written only by the irq handler and read only * when interrupts are masked, we need no spinlock */ u32 err_reg; }; static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc) { u32 ctrl, cmd; /* mask interrupts */ ctrl = readl(rngc->base + RNGC_CONTROL); ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR; writel(ctrl, rngc->base + RNGC_CONTROL); /* * CLR_INT clears the interrupt only if there's no error * CLR_ERR clear the interrupt and the error register if there * is an error */ cmd = readl(rngc->base + RNGC_COMMAND); cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR; writel(cmd, rngc->base + RNGC_COMMAND); } static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc) { u32 ctrl; ctrl = readl(rngc->base + RNGC_CONTROL); ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR); writel(ctrl, rngc->base + RNGC_CONTROL); } static int imx_rngc_self_test(struct imx_rngc *rngc) { u32 cmd; int ret; imx_rngc_irq_unmask(rngc); /* run self test */ cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); imx_rngc_irq_mask_clear(rngc); if (!ret) return -ETIMEDOUT; return rngc->err_reg ? -EIO : 0; } static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); unsigned int status; int retval = 0; while (max >= sizeof(u32)) { status = readl(rngc->base + RNGC_STATUS); /* is there some error while reading this random number? */ if (status & RNGC_STATUS_ERROR) break; if (status & RNGC_STATUS_FIFO_LEVEL_MASK) { /* retrieve a random number from FIFO */ *(u32 *)data = readl(rngc->base + RNGC_FIFO); retval += sizeof(u32); data += sizeof(u32); max -= sizeof(u32); } } return retval ? retval : -EIO; } static irqreturn_t imx_rngc_irq(int irq, void *priv) { struct imx_rngc *rngc = (struct imx_rngc *)priv; u32 status; /* * clearing the interrupt will also clear the error register * read error and status before clearing */ status = readl(rngc->base + RNGC_STATUS); rngc->err_reg = readl(rngc->base + RNGC_ERROR); imx_rngc_irq_mask_clear(rngc); if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE)) complete(&rngc->rng_op_done); return IRQ_HANDLED; } static int imx_rngc_init(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); u32 cmd, ctrl; int ret; /* clear error */ cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND); imx_rngc_irq_unmask(rngc); /* create seed, repeat while there is some statistical error */ do { /* seed creation */ cmd = readl(rngc->base + RNGC_COMMAND); writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); if (!ret) { ret = -ETIMEDOUT; goto err; } } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); if (rngc->err_reg) { ret = -EIO; goto err; } /* * enable automatic seeding, the rngc creates a new seed automatically * after serving 2^20 random 160-bit words */ ctrl = readl(rngc->base + RNGC_CONTROL); ctrl |= RNGC_CTRL_AUTO_SEED; writel(ctrl, rngc->base + RNGC_CONTROL); /* * if initialisation was successful, we keep the interrupt * unmasked until imx_rngc_cleanup is called * we mask the interrupt ourselves if we return an error */ return 0; err: imx_rngc_irq_mask_clear(rngc); return ret; } static void imx_rngc_cleanup(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); imx_rngc_irq_mask_clear(rngc); } static int __init imx_rngc_probe(struct platform_device *pdev) { struct imx_rngc *rngc; int ret; int irq; u32 ver_id; u8 rng_type; rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL); if (!rngc) return -ENOMEM; rngc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(rngc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ver_id = readl(rngc->base + RNGC_VER_ID); rng_type = FIELD_GET(RNG_TYPE, ver_id); /* * This driver supports only RNGC and RNGB. (There's a different * driver for RNGA.) */ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) return -ENODEV; init_completion(&rngc->rng_op_done); rngc->rng.name = pdev->name; rngc->rng.init = imx_rngc_init; rngc->rng.read = imx_rngc_read; rngc->rng.cleanup = imx_rngc_cleanup; rngc->rng.quality = 19; rngc->dev = &pdev->dev; platform_set_drvdata(pdev, rngc); imx_rngc_irq_mask_clear(rngc); ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); if (ret) return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); if (self_test) { ret = imx_rngc_self_test(rngc); if (ret) return dev_err_probe(&pdev->dev, ret, "self test failed\n"); } ret = devm_hwrng_register(&pdev->dev, &rngc->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); dev_info(&pdev->dev, "Freescale RNG%c registered (HW revision %d.%02d)\n", rng_type == RNGC_TYPE_RNGB ? 'B' : 'C', (ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff); return 0; } static int imx_rngc_suspend(struct device *dev) { struct imx_rngc *rngc = dev_get_drvdata(dev); clk_disable_unprepare(rngc->clk); return 0; } static int imx_rngc_resume(struct device *dev) { struct imx_rngc *rngc = dev_get_drvdata(dev); clk_prepare_enable(rngc->clk); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); static const struct of_device_id imx_rngc_dt_ids[] = { { .compatible = "fsl,imx25-rngb" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { .name = KBUILD_MODNAME, .pm = pm_sleep_ptr(&imx_rngc_pm_ops), .of_match_table = imx_rngc_dt_ids, }, }; module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGC driver for i.MX"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/imx-rngc.c
/* * RNG driver for AMD RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <[email protected]> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <[email protected]> * Copyright 2000,2001 Philipp Rumpf <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #define DRV_NAME "AMD768-HWRNG" #define RNGDATA 0x00 #define RNGDONE 0x04 #define PMBASE_OFFSET 0xF0 #define PMBASE_SIZE 8 /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { { PCI_VDEVICE(AMD, 0x7443), 0, }, { PCI_VDEVICE(AMD, 0x746b), 0, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd768_priv { void __iomem *iobase; struct pci_dev *pcidev; u32 pmbase; }; static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { u32 *data = buf; struct amd768_priv *priv = (struct amd768_priv *)rng->priv; size_t read = 0; /* We will wait at maximum one time per read */ int timeout = max / 4 + 1; /* * RNG data is available when RNGDONE is set to 1 * New random numbers are generated approximately 128 microseconds * after RNGDATA is read */ while (read < max) { if (ioread32(priv->iobase + RNGDONE) == 0) { if (wait) { /* Delay given by datasheet */ usleep_range(128, 196); if (timeout-- == 0) return read; } else { return 0; } } else { *data = ioread32(priv->iobase + RNGDATA); data++; read += 4; } } return read; } static int amd_rng_init(struct hwrng *rng) { struct amd768_priv *priv = (struct amd768_priv *)rng->priv; u8 rnen; pci_read_config_byte(priv->pcidev, 0x40, &rnen); rnen |= BIT(7); /* RNG on */ pci_write_config_byte(priv->pcidev, 0x40, rnen); pci_read_config_byte(priv->pcidev, 0x41, &rnen); rnen |= BIT(7); /* PMIO enable */ pci_write_config_byte(priv->pcidev, 0x41, rnen); return 0; } static void amd_rng_cleanup(struct hwrng *rng) { struct amd768_priv *priv = (struct amd768_priv *)rng->priv; u8 rnen; pci_read_config_byte(priv->pcidev, 0x40, &rnen); rnen &= ~BIT(7); /* RNG off */ pci_write_config_byte(priv->pcidev, 0x40, rnen); } static struct hwrng amd_rng = { .name = "amd", .init = amd_rng_init, .cleanup = amd_rng_cleanup, .read = amd_rng_read, }; static int __init amd_rng_mod_init(void) { int err; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; u32 pmbase; struct amd768_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); if (ent) goto found; } /* Device not found. */ return -ENODEV; found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) goto put_dev; pmbase &= 0x0000FF00; if (pmbase == 0) { err = -EIO; goto put_dev; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto put_dev; } if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); err = -EBUSY; goto out; } priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); if (!priv->iobase) { pr_err(DRV_NAME "Cannot map ioport\n"); err = -EINVAL; goto err_iomap; } amd_rng.priv = (unsigned long)priv; priv->pmbase = pmbase; priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); err = hwrng_register(&amd_rng); if (err) { pr_err(DRV_NAME " registering failed (%d)\n", err); goto err_hwrng; } return 0; err_hwrng: ioport_unmap(priv->iobase); err_iomap: release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); put_dev: pci_dev_put(pdev); return err; } static void __exit amd_rng_mod_exit(void) { struct amd768_priv *priv; priv = (struct amd768_priv *)amd_rng.priv; hwrng_unregister(&amd_rng); ioport_unmap(priv->iobase); release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); pci_dev_put(priv->pcidev); kfree(priv); } module_init(amd_rng_mod_init); module_exit(amd_rng_mod_exit); MODULE_AUTHOR("The Linux Kernel team"); MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/amd-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip PolarFire SoC (MPFS) hardware random driver * * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. * * Author: Conor Dooley <[email protected]> */ #include <linux/module.h> #include <linux/hw_random.h> #include <linux/platform_device.h> #include <soc/microchip/mpfs.h> #define CMD_OPCODE 0x21 #define CMD_DATA_SIZE 0U #define CMD_DATA NULL #define MBOX_OFFSET 0U #define RESP_OFFSET 0U #define RNG_RESP_BYTES 32U struct mpfs_rng { struct mpfs_sys_controller *sys_controller; struct hwrng rng; }; static int mpfs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct mpfs_rng *rng_priv = container_of(rng, struct mpfs_rng, rng); u32 response_msg[RNG_RESP_BYTES / sizeof(u32)]; unsigned int count = 0, copy_size_bytes; int ret; struct mpfs_mss_response response = { .resp_status = 0U, .resp_msg = (u32 *)response_msg, .resp_size = RNG_RESP_BYTES }; struct mpfs_mss_msg msg = { .cmd_opcode = CMD_OPCODE, .cmd_data_size = CMD_DATA_SIZE, .response = &response, .cmd_data = CMD_DATA, .mbox_offset = MBOX_OFFSET, .resp_offset = RESP_OFFSET }; while (count < max) { ret = mpfs_blocking_transaction(rng_priv->sys_controller, &msg); if (ret) return ret; copy_size_bytes = max - count > RNG_RESP_BYTES ? RNG_RESP_BYTES : max - count; memcpy(buf + count, response_msg, copy_size_bytes); count += copy_size_bytes; if (!wait) break; } return count; } static int mpfs_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mpfs_rng *rng_priv; int ret; rng_priv = devm_kzalloc(dev, sizeof(*rng_priv), GFP_KERNEL); if (!rng_priv) return -ENOMEM; rng_priv->sys_controller = mpfs_sys_controller_get(&pdev->dev); if (IS_ERR(rng_priv->sys_controller)) return dev_err_probe(dev, PTR_ERR(rng_priv->sys_controller), "Failed to register system controller hwrng sub device\n"); rng_priv->rng.read = mpfs_rng_read; rng_priv->rng.name = pdev->name; platform_set_drvdata(pdev, rng_priv); ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n"); dev_info(&pdev->dev, "Registered MPFS hwrng\n"); return 0; } static struct platform_driver mpfs_rng_driver = { .driver = { .name = "mpfs-rng", }, .probe = mpfs_rng_probe, }; module_platform_driver(mpfs_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Conor Dooley <[email protected]>"); MODULE_DESCRIPTION("PolarFire SoC (MPFS) hardware random driver");
linux-master
drivers/char/hw_random/mpfs-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Hardware Random Number Generator support. * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ #include <linux/hw_random.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> #define THUNDERX_RNM_ENT_EN 0x1 #define THUNDERX_RNM_RNG_EN 0x2 struct cavium_rng_pf { void __iomem *control_status; }; /* Enable the RNG hardware and activate the VF */ static int cavium_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cavium_rng_pf *rng; int iov_err; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; /*Map the RNG control */ rng->control_status = pcim_iomap(pdev, 0, 0); if (!rng->control_status) { dev_err(&pdev->dev, "Error iomap failed retrieving control_status.\n"); return -ENOMEM; } /* Enable the RNG hardware and entropy source */ writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN, rng->control_status); pci_set_drvdata(pdev, rng); /* Enable the Cavium RNG as a VF */ iov_err = pci_enable_sriov(pdev, 1); if (iov_err != 0) { /* Disable the RNG hardware and entropy source */ writeq(0, rng->control_status); dev_err(&pdev->dev, "Error initializing RNG virtual function,(%i).\n", iov_err); return iov_err; } return 0; } /* Disable VF and RNG Hardware */ static void cavium_rng_remove(struct pci_dev *pdev) { struct cavium_rng_pf *rng; rng = pci_get_drvdata(pdev); /* Remove the VF */ pci_disable_sriov(pdev); /* Disable the RNG hardware and entropy source */ writeq(0, rng->control_status); } static const struct pci_device_id cavium_rng_pf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */ {0,}, }; MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table); static struct pci_driver cavium_rng_pf_driver = { .name = "cavium_rng_pf", .id_table = cavium_rng_pf_id_table, .probe = cavium_rng_probe, .remove = cavium_rng_remove, }; module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/cavium-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * APM X-Gene SoC RNG Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Author: Rameshwar Prasad Sahu <[email protected]> * Shamal Winchurkar <[email protected]> * Feng Kan <[email protected]> */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/timer.h> #define RNG_MAX_DATUM 4 #define MAX_TRY 100 #define XGENE_RNG_RETRY_COUNT 20 #define XGENE_RNG_RETRY_INTERVAL 10 /* RNG Registers */ #define RNG_INOUT_0 0x00 #define RNG_INTR_STS_ACK 0x10 #define RNG_CONTROL 0x14 #define RNG_CONFIG 0x18 #define RNG_ALARMCNT 0x1c #define RNG_FROENABLE 0x20 #define RNG_FRODETUNE 0x24 #define RNG_ALARMMASK 0x28 #define RNG_ALARMSTOP 0x2c #define RNG_OPTIONS 0x78 #define RNG_EIP_REV 0x7c #define MONOBIT_FAIL_MASK BIT(7) #define POKER_FAIL_MASK BIT(6) #define LONG_RUN_FAIL_MASK BIT(5) #define RUN_FAIL_MASK BIT(4) #define NOISE_FAIL_MASK BIT(3) #define STUCK_OUT_MASK BIT(2) #define SHUTDOWN_OFLO_MASK BIT(1) #define READY_MASK BIT(0) #define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24) #define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20) #define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16) #define MAX_REFILL_CYCLES_SET(dst, src) \ ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000)) #define MIN_REFILL_CYCLES_SET(dst, src) \ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) #define ALARM_THRESHOLD_SET(dst, src) \ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) #define ENABLE_RNG_SET(dst, src) \ ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10))) #define REGSPEC_TEST_MODE_SET(dst, src) \ ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8))) #define MONOBIT_FAIL_MASK_SET(dst, src) \ ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7))) #define POKER_FAIL_MASK_SET(dst, src) \ ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6))) #define LONG_RUN_FAIL_MASK_SET(dst, src) \ ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5))) #define RUN_FAIL_MASK_SET(dst, src) \ ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4))) #define NOISE_FAIL_MASK_SET(dst, src) \ ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3))) #define STUCK_OUT_MASK_SET(dst, src) \ ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2))) #define SHUTDOWN_OFLO_MASK_SET(dst, src) \ ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1))) struct xgene_rng_dev { u32 irq; void __iomem *csr_base; u32 revision; u32 datum_size; u32 failure_cnt; /* Failure count last minute */ unsigned long failure_ts;/* First failure timestamp */ struct timer_list failure_timer; struct device *dev; }; static void xgene_rng_expired_timer(struct timer_list *t) { struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer); /* Clear failure counter as timer expired */ disable_irq(ctx->irq); ctx->failure_cnt = 0; del_timer(&ctx->failure_timer); enable_irq(ctx->irq); } static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) { ctx->failure_timer.expires = jiffies + 120 * HZ; add_timer(&ctx->failure_timer); } /* * Initialize or reinit free running oscillators (FROs) */ static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val) { writel(fro_val, ctx->csr_base + RNG_FRODETUNE); writel(0x00000000, ctx->csr_base + RNG_ALARMMASK); writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP); writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE); } static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx) { u32 val; val = readl(ctx->csr_base + RNG_INTR_STS_ACK); if (val & MONOBIT_FAIL_MASK) /* * LFSR detected an out-of-bounds number of 1s after * checking 20,000 bits (test T1 as specified in the * AIS-31 standard) */ dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val); if (val & POKER_FAIL_MASK) /* * LFSR detected an out-of-bounds value in at least one * of the 16 poker_count_X counters or an out of bounds sum * of squares value after checking 20,000 bits (test T2 as * specified in the AIS-31 standard) */ dev_err(ctx->dev, "test poker failure error 0x%08X\n", val); if (val & LONG_RUN_FAIL_MASK) /* * LFSR detected a sequence of 34 identical bits * (test T4 as specified in the AIS-31 standard) */ dev_err(ctx->dev, "test long run failure error 0x%08X\n", val); if (val & RUN_FAIL_MASK) /* * LFSR detected an outof-bounds value for at least one * of the running counters after checking 20,000 bits * (test T3 as specified in the AIS-31 standard) */ dev_err(ctx->dev, "test run failure error 0x%08X\n", val); if (val & NOISE_FAIL_MASK) /* LFSR detected a sequence of 48 identical bits */ dev_err(ctx->dev, "noise failure error 0x%08X\n", val); if (val & STUCK_OUT_MASK) /* * Detected output data registers generated same value twice * in a row */ dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val); if (val & SHUTDOWN_OFLO_MASK) { u32 frostopped; /* FROs shut down after a second error event. Try recover. */ if (++ctx->failure_cnt == 1) { /* 1st time, just recover */ ctx->failure_ts = jiffies; frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); xgene_rng_init_fro(ctx, frostopped); /* * We must start a timer to clear out this error * in case the system timer wrap around */ xgene_rng_start_timer(ctx); } else { /* 2nd time failure in lesser than 1 minute? */ if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) { dev_err(ctx->dev, "FRO shutdown failure error 0x%08X\n", val); } else { /* 2nd time failure after 1 minutes, recover */ ctx->failure_ts = jiffies; ctx->failure_cnt = 1; /* * We must start a timer to clear out this * error in case the system timer wrap * around */ xgene_rng_start_timer(ctx); } frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); xgene_rng_init_fro(ctx, frostopped); } } /* Clear them all */ writel(val, ctx->csr_base + RNG_INTR_STS_ACK); } static irqreturn_t xgene_rng_irq_handler(int irq, void *id) { struct xgene_rng_dev *ctx = id; /* RNG Alarm Counter overflow */ xgene_rng_chk_overflow(ctx); return IRQ_HANDLED; } static int xgene_rng_data_present(struct hwrng *rng, int wait) { struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; u32 i, val = 0; for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) { val = readl(ctx->csr_base + RNG_INTR_STS_ACK); if ((val & READY_MASK) || !wait) break; udelay(XGENE_RNG_RETRY_INTERVAL); } return (val & READY_MASK); } static int xgene_rng_data_read(struct hwrng *rng, u32 *data) { struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; int i; for (i = 0; i < ctx->datum_size; i++) data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4); /* Clear ready bit to start next transaction */ writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); return ctx->datum_size << 2; } static void xgene_rng_init_internal(struct xgene_rng_dev *ctx) { u32 val; writel(0x00000000, ctx->csr_base + RNG_CONTROL); val = MAX_REFILL_CYCLES_SET(0, 10); val = MIN_REFILL_CYCLES_SET(val, 10); writel(val, ctx->csr_base + RNG_CONFIG); val = ALARM_THRESHOLD_SET(0, 0xFF); writel(val, ctx->csr_base + RNG_ALARMCNT); xgene_rng_init_fro(ctx, 0); writel(MONOBIT_FAIL_MASK | POKER_FAIL_MASK | LONG_RUN_FAIL_MASK | RUN_FAIL_MASK | NOISE_FAIL_MASK | STUCK_OUT_MASK | SHUTDOWN_OFLO_MASK | READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); val = ENABLE_RNG_SET(0, 1); val = MONOBIT_FAIL_MASK_SET(val, 1); val = POKER_FAIL_MASK_SET(val, 1); val = LONG_RUN_FAIL_MASK_SET(val, 1); val = RUN_FAIL_MASK_SET(val, 1); val = NOISE_FAIL_MASK_SET(val, 1); val = STUCK_OUT_MASK_SET(val, 1); val = SHUTDOWN_OFLO_MASK_SET(val, 1); writel(val, ctx->csr_base + RNG_CONTROL); } static int xgene_rng_init(struct hwrng *rng) { struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; ctx->failure_cnt = 0; timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0); ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); dev_dbg(ctx->dev, "Rev %d.%d.%d\n", MAJOR_HW_REV_RD(ctx->revision), MINOR_HW_REV_RD(ctx->revision), HW_PATCH_LEVEL_RD(ctx->revision)); dev_dbg(ctx->dev, "Options 0x%08X", readl(ctx->csr_base + RNG_OPTIONS)); xgene_rng_init_internal(ctx); ctx->datum_size = RNG_MAX_DATUM; return 0; } #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_rng_acpi_match[] = { { "APMC0D18", }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_rng_acpi_match); #endif static struct hwrng xgene_rng_func = { .name = "xgene-rng", .init = xgene_rng_init, .data_present = xgene_rng_data_present, .data_read = xgene_rng_data_read, }; static int xgene_rng_probe(struct platform_device *pdev) { struct xgene_rng_dev *ctx; struct clk *clk; int rc = 0; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = &pdev->dev; platform_set_drvdata(pdev, ctx); ctx->csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->csr_base)) return PTR_ERR(ctx->csr_base); rc = platform_get_irq(pdev, 0); if (rc < 0) return rc; ctx->irq = rc; dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", ctx->csr_base, ctx->irq); rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0, dev_name(&pdev->dev), ctx); if (rc) return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n"); /* Enable IP clock */ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n"); xgene_rng_func.priv = (unsigned long) ctx; rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func); if (rc) return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n"); rc = device_init_wakeup(&pdev->dev, 1); if (rc) return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n"); return 0; } static int xgene_rng_remove(struct platform_device *pdev) { int rc; rc = device_init_wakeup(&pdev->dev, 0); if (rc) dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); return 0; } static const struct of_device_id xgene_rng_of_match[] = { { .compatible = "apm,xgene-rng" }, { } }; MODULE_DEVICE_TABLE(of, xgene_rng_of_match); static struct platform_driver xgene_rng_driver = { .probe = xgene_rng_probe, .remove = xgene_rng_remove, .driver = { .name = "xgene-rng", .of_match_table = xgene_rng_of_match, .acpi_match_table = ACPI_PTR(xgene_rng_acpi_match), }, }; module_platform_driver(xgene_rng_driver); MODULE_DESCRIPTION("APM X-Gene RNG driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/xgene-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * TRNG driver for the StarFive JH7110 SoC * * Copyright (C) 2022 StarFive Technology Co. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/random.h> #include <linux/reset.h> /* trng register offset */ #define STARFIVE_CTRL 0x00 #define STARFIVE_STAT 0x04 #define STARFIVE_MODE 0x08 #define STARFIVE_SMODE 0x0C #define STARFIVE_IE 0x10 #define STARFIVE_ISTAT 0x14 #define STARFIVE_RAND0 0x20 #define STARFIVE_RAND1 0x24 #define STARFIVE_RAND2 0x28 #define STARFIVE_RAND3 0x2C #define STARFIVE_RAND4 0x30 #define STARFIVE_RAND5 0x34 #define STARFIVE_RAND6 0x38 #define STARFIVE_RAND7 0x3C #define STARFIVE_AUTO_RQSTS 0x60 #define STARFIVE_AUTO_AGE 0x64 /* CTRL CMD */ #define STARFIVE_CTRL_EXEC_NOP 0x0 #define STARFIVE_CTRL_GENE_RANDNUM 0x1 #define STARFIVE_CTRL_EXEC_RANDRESEED 0x2 /* STAT */ #define STARFIVE_STAT_NONCE_MODE BIT(2) #define STARFIVE_STAT_R256 BIT(3) #define STARFIVE_STAT_MISSION_MODE BIT(8) #define STARFIVE_STAT_SEEDED BIT(9) #define STARFIVE_STAT_LAST_RESEED(x) ((x) << 16) #define STARFIVE_STAT_SRVC_RQST BIT(27) #define STARFIVE_STAT_RAND_GENERATING BIT(30) #define STARFIVE_STAT_RAND_SEEDING BIT(31) /* MODE */ #define STARFIVE_MODE_R256 BIT(3) /* SMODE */ #define STARFIVE_SMODE_NONCE_MODE BIT(2) #define STARFIVE_SMODE_MISSION_MODE BIT(8) #define STARFIVE_SMODE_MAX_REJECTS(x) ((x) << 16) /* IE */ #define STARFIVE_IE_RAND_RDY_EN BIT(0) #define STARFIVE_IE_SEED_DONE_EN BIT(1) #define STARFIVE_IE_LFSR_LOCKUP_EN BIT(4) #define STARFIVE_IE_GLBL_EN BIT(31) #define STARFIVE_IE_ALL (STARFIVE_IE_GLBL_EN | \ STARFIVE_IE_RAND_RDY_EN | \ STARFIVE_IE_SEED_DONE_EN | \ STARFIVE_IE_LFSR_LOCKUP_EN) /* ISTAT */ #define STARFIVE_ISTAT_RAND_RDY BIT(0) #define STARFIVE_ISTAT_SEED_DONE BIT(1) #define STARFIVE_ISTAT_LFSR_LOCKUP BIT(4) #define STARFIVE_RAND_LEN sizeof(u32) #define to_trng(p) container_of(p, struct starfive_trng, rng) enum reseed { RANDOM_RESEED, NONCE_RESEED, }; enum mode { PRNG_128BIT, PRNG_256BIT, }; struct starfive_trng { struct device *dev; void __iomem *base; struct clk *hclk; struct clk *ahb; struct reset_control *rst; struct hwrng rng; struct completion random_done; struct completion reseed_done; u32 mode; u32 mission; u32 reseed; /* protects against concurrent write to ctrl register */ spinlock_t write_lock; }; static u16 autoreq; module_param(autoreq, ushort, 0); MODULE_PARM_DESC(autoreq, "Auto-reseeding after random number requests by host reaches specified counter:\n" " 0 - disable counter\n" " other - reload value for internal counter"); static u16 autoage; module_param(autoage, ushort, 0); MODULE_PARM_DESC(autoage, "Auto-reseeding after specified timer countdowns to 0:\n" " 0 - disable timer\n" " other - reload value for internal timer"); static inline int starfive_trng_wait_idle(struct starfive_trng *trng) { u32 stat; return readl_relaxed_poll_timeout(trng->base + STARFIVE_STAT, stat, !(stat & (STARFIVE_STAT_RAND_GENERATING | STARFIVE_STAT_RAND_SEEDING)), 10, 100000); } static inline void starfive_trng_irq_mask_clear(struct starfive_trng *trng) { /* clear register: ISTAT */ u32 data = readl(trng->base + STARFIVE_ISTAT); writel(data, trng->base + STARFIVE_ISTAT); } static int starfive_trng_cmd(struct starfive_trng *trng, u32 cmd, bool wait) { int wait_time = 1000; /* allow up to 40 us for wait == 0 */ if (!wait) wait_time = 40; switch (cmd) { case STARFIVE_CTRL_GENE_RANDNUM: reinit_completion(&trng->random_done); spin_lock_irq(&trng->write_lock); writel(cmd, trng->base + STARFIVE_CTRL); spin_unlock_irq(&trng->write_lock); if (!wait_for_completion_timeout(&trng->random_done, usecs_to_jiffies(wait_time))) return -ETIMEDOUT; break; case STARFIVE_CTRL_EXEC_RANDRESEED: reinit_completion(&trng->reseed_done); spin_lock_irq(&trng->write_lock); writel(cmd, trng->base + STARFIVE_CTRL); spin_unlock_irq(&trng->write_lock); if (!wait_for_completion_timeout(&trng->reseed_done, usecs_to_jiffies(wait_time))) return -ETIMEDOUT; break; default: return -EINVAL; } return 0; } static int starfive_trng_init(struct hwrng *rng) { struct starfive_trng *trng = to_trng(rng); u32 mode, intr = 0; /* setup Auto Request/Age register */ writel(autoage, trng->base + STARFIVE_AUTO_AGE); writel(autoreq, trng->base + STARFIVE_AUTO_RQSTS); /* clear register: ISTAT */ starfive_trng_irq_mask_clear(trng); intr |= STARFIVE_IE_ALL; writel(intr, trng->base + STARFIVE_IE); mode = readl(trng->base + STARFIVE_MODE); switch (trng->mode) { case PRNG_128BIT: mode &= ~STARFIVE_MODE_R256; break; case PRNG_256BIT: mode |= STARFIVE_MODE_R256; break; default: mode |= STARFIVE_MODE_R256; break; } writel(mode, trng->base + STARFIVE_MODE); return starfive_trng_cmd(trng, STARFIVE_CTRL_EXEC_RANDRESEED, 1); } static irqreturn_t starfive_trng_irq(int irq, void *priv) { u32 status; struct starfive_trng *trng = (struct starfive_trng *)priv; status = readl(trng->base + STARFIVE_ISTAT); if (status & STARFIVE_ISTAT_RAND_RDY) { writel(STARFIVE_ISTAT_RAND_RDY, trng->base + STARFIVE_ISTAT); complete(&trng->random_done); } if (status & STARFIVE_ISTAT_SEED_DONE) { writel(STARFIVE_ISTAT_SEED_DONE, trng->base + STARFIVE_ISTAT); complete(&trng->reseed_done); } if (status & STARFIVE_ISTAT_LFSR_LOCKUP) { writel(STARFIVE_ISTAT_LFSR_LOCKUP, trng->base + STARFIVE_ISTAT); /* SEU occurred, reseeding required*/ spin_lock(&trng->write_lock); writel(STARFIVE_CTRL_EXEC_RANDRESEED, trng->base + STARFIVE_CTRL); spin_unlock(&trng->write_lock); } return IRQ_HANDLED; } static void starfive_trng_cleanup(struct hwrng *rng) { struct starfive_trng *trng = to_trng(rng); writel(0, trng->base + STARFIVE_CTRL); reset_control_assert(trng->rst); clk_disable_unprepare(trng->hclk); clk_disable_unprepare(trng->ahb); } static int starfive_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct starfive_trng *trng = to_trng(rng); int ret; pm_runtime_get_sync(trng->dev); if (trng->mode == PRNG_256BIT) max = min_t(size_t, max, (STARFIVE_RAND_LEN * 8)); else max = min_t(size_t, max, (STARFIVE_RAND_LEN * 4)); if (wait) { ret = starfive_trng_wait_idle(trng); if (ret) return -ETIMEDOUT; } ret = starfive_trng_cmd(trng, STARFIVE_CTRL_GENE_RANDNUM, wait); if (ret) return ret; memcpy_fromio(buf, trng->base + STARFIVE_RAND0, max); pm_runtime_put_sync_autosuspend(trng->dev); return max; } static int starfive_trng_probe(struct platform_device *pdev) { int ret; int irq; struct starfive_trng *trng; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; platform_set_drvdata(pdev, trng); trng->dev = &pdev->dev; trng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->base)) return dev_err_probe(&pdev->dev, PTR_ERR(trng->base), "Error remapping memory for platform device.\n"); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; init_completion(&trng->random_done); init_completion(&trng->reseed_done); spin_lock_init(&trng->write_lock); ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name, (void *)trng); if (ret) return dev_err_probe(&pdev->dev, irq, "Failed to register interrupt handler\n"); trng->hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR(trng->hclk)) return dev_err_probe(&pdev->dev, PTR_ERR(trng->hclk), "Error getting hardware reference clock\n"); trng->ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(trng->ahb)) return dev_err_probe(&pdev->dev, PTR_ERR(trng->ahb), "Error getting ahb reference clock\n"); trng->rst = devm_reset_control_get_shared(&pdev->dev, NULL); if (IS_ERR(trng->rst)) return dev_err_probe(&pdev->dev, PTR_ERR(trng->rst), "Error getting hardware reset line\n"); clk_prepare_enable(trng->hclk); clk_prepare_enable(trng->ahb); reset_control_deassert(trng->rst); trng->rng.name = dev_driver_string(&pdev->dev); trng->rng.init = starfive_trng_init; trng->rng.cleanup = starfive_trng_cleanup; trng->rng.read = starfive_trng_read; trng->mode = PRNG_256BIT; trng->mission = 1; trng->reseed = RANDOM_RESEED; pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 100); pm_runtime_enable(&pdev->dev); ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { pm_runtime_disable(&pdev->dev); reset_control_assert(trng->rst); clk_disable_unprepare(trng->ahb); clk_disable_unprepare(trng->hclk); return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n"); } return 0; } static int __maybe_unused starfive_trng_suspend(struct device *dev) { struct starfive_trng *trng = dev_get_drvdata(dev); clk_disable_unprepare(trng->hclk); clk_disable_unprepare(trng->ahb); return 0; } static int __maybe_unused starfive_trng_resume(struct device *dev) { struct starfive_trng *trng = dev_get_drvdata(dev); clk_prepare_enable(trng->hclk); clk_prepare_enable(trng->ahb); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend, starfive_trng_resume); static const struct of_device_id trng_dt_ids[] __maybe_unused = { { .compatible = "starfive,jh7110-trng" }, { } }; MODULE_DEVICE_TABLE(of, trng_dt_ids); static struct platform_driver starfive_trng_driver = { .probe = starfive_trng_probe, .driver = { .name = "jh7110-trng", .pm = &starfive_trng_pm_ops, .of_match_table = of_match_ptr(trng_dt_ids), }, }; module_platform_driver(starfive_trng_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("StarFive True Random Number Generator");
linux-master
drivers/char/hw_random/jh7110-trng.c
// SPDX-License-Identifier: GPL-2.0-only /* * Random Number Generator driver for the Keystone SOC * * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com * * Authors: Sandeep Nair * Vitaly Andrianov */ #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/delay.h> #include <linux/timekeeping.h> #define SA_CMD_STATUS_OFS 0x8 /* TRNG enable control in SA System module*/ #define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3) /* TRNG start control in TRNG module */ #define TRNG_CNTL_REG_TRNG_ENABLE BIT(10) /* Data ready indicator in STATUS register */ #define TRNG_STATUS_REG_READY BIT(0) /* Data ready clear control in INTACK register */ #define TRNG_INTACK_REG_READY BIT(0) /* * Number of samples taken to gather entropy during startup. * If value is 0, the number of samples is 2^24 else * equals value times 2^8. */ #define TRNG_DEF_STARTUP_CYCLES 0 #define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16 /* * Minimum number of samples taken to regenerate entropy * If value is 0, the number of samples is 2^24 else * equals value times 2^6. */ #define TRNG_DEF_MIN_REFILL_CYCLES 1 #define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0 /* * Maximum number of samples taken to regenerate entropy * If value is 0, the number of samples is 2^24 else * equals value times 2^8. */ #define TRNG_DEF_MAX_REFILL_CYCLES 0 #define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16 /* Number of CLK input cycles between samples */ #define TRNG_DEF_CLK_DIV_CYCLES 0 #define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8 /* Maximum retries to get rng data */ #define SA_MAX_RNG_DATA_RETRIES 5 /* Delay between retries (in usecs) */ #define SA_RNG_DATA_RETRY_DELAY 5 struct trng_regs { u32 output_l; u32 output_h; u32 status; u32 intmask; u32 intack; u32 control; u32 config; }; struct ks_sa_rng { struct device *dev; struct hwrng rng; struct clk *clk; struct regmap *regmap_cfg; struct trng_regs __iomem *reg_rng; u64 ready_ts; unsigned int refill_delay_ns; }; static unsigned int cycles_to_ns(unsigned long clk_rate, unsigned int cycles) { return DIV_ROUND_UP_ULL((TRNG_DEF_CLK_DIV_CYCLES + 1) * 1000000000ull * cycles, clk_rate); } static unsigned int startup_delay_ns(unsigned long clk_rate) { if (!TRNG_DEF_STARTUP_CYCLES) return cycles_to_ns(clk_rate, BIT(24)); return cycles_to_ns(clk_rate, 256 * TRNG_DEF_STARTUP_CYCLES); } static unsigned int refill_delay_ns(unsigned long clk_rate) { if (!TRNG_DEF_MAX_REFILL_CYCLES) return cycles_to_ns(clk_rate, BIT(24)); return cycles_to_ns(clk_rate, 256 * TRNG_DEF_MAX_REFILL_CYCLES); } static int ks_sa_rng_init(struct hwrng *rng) { u32 value; struct device *dev = (struct device *)rng->priv; struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk); /* Enable RNG module */ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS, SA_CMD_STATUS_REG_TRNG_ENABLE, SA_CMD_STATUS_REG_TRNG_ENABLE); /* Configure RNG module */ writel(0, &ks_sa_rng->reg_rng->control); value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT; writel(value, &ks_sa_rng->reg_rng->control); value = (TRNG_DEF_MIN_REFILL_CYCLES << TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) | (TRNG_DEF_MAX_REFILL_CYCLES << TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) | (TRNG_DEF_CLK_DIV_CYCLES << TRNG_CFG_REG_SAMPLE_DIV_SHIFT); writel(value, &ks_sa_rng->reg_rng->config); /* Disable all interrupts from TRNG */ writel(0, &ks_sa_rng->reg_rng->intmask); /* Enable RNG */ value = readl(&ks_sa_rng->reg_rng->control); value |= TRNG_CNTL_REG_TRNG_ENABLE; writel(value, &ks_sa_rng->reg_rng->control); ks_sa_rng->refill_delay_ns = refill_delay_ns(clk_rate); ks_sa_rng->ready_ts = ktime_get_ns() + startup_delay_ns(clk_rate); return 0; } static void ks_sa_rng_cleanup(struct hwrng *rng) { struct device *dev = (struct device *)rng->priv; struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); /* Disable RNG */ writel(0, &ks_sa_rng->reg_rng->control); regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS, SA_CMD_STATUS_REG_TRNG_ENABLE, 0); } static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data) { struct device *dev = (struct device *)rng->priv; struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); /* Read random data */ data[0] = readl(&ks_sa_rng->reg_rng->output_l); data[1] = readl(&ks_sa_rng->reg_rng->output_h); writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack); ks_sa_rng->ready_ts = ktime_get_ns() + ks_sa_rng->refill_delay_ns; return sizeof(u32) * 2; } static int ks_sa_rng_data_present(struct hwrng *rng, int wait) { struct device *dev = (struct device *)rng->priv; struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev); u64 now = ktime_get_ns(); u32 ready; int j; if (wait && now < ks_sa_rng->ready_ts) { /* Max delay expected here is 81920000 ns */ unsigned long min_delay = DIV_ROUND_UP((u32)(ks_sa_rng->ready_ts - now), 1000); usleep_range(min_delay, min_delay + SA_RNG_DATA_RETRY_DELAY); } for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) { ready = readl(&ks_sa_rng->reg_rng->status); ready &= TRNG_STATUS_REG_READY; if (ready || !wait) break; udelay(SA_RNG_DATA_RETRY_DELAY); } return ready; } static int ks_sa_rng_probe(struct platform_device *pdev) { struct ks_sa_rng *ks_sa_rng; struct device *dev = &pdev->dev; int ret; ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL); if (!ks_sa_rng) return -ENOMEM; ks_sa_rng->dev = dev; ks_sa_rng->rng = (struct hwrng) { .name = "ks_sa_hwrng", .init = ks_sa_rng_init, .data_read = ks_sa_rng_data_read, .data_present = ks_sa_rng_data_present, .cleanup = ks_sa_rng_cleanup, }; ks_sa_rng->rng.priv = (unsigned long)dev; ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ks_sa_rng->reg_rng)) return PTR_ERR(ks_sa_rng->reg_rng); ks_sa_rng->regmap_cfg = syscon_regmap_lookup_by_phandle(dev->of_node, "ti,syscon-sa-cfg"); if (IS_ERR(ks_sa_rng->regmap_cfg)) { dev_err(dev, "syscon_node_to_regmap failed\n"); return -EINVAL; } pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to enable SA power-domain\n"); pm_runtime_disable(dev); return ret; } platform_set_drvdata(pdev, ks_sa_rng); return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng); } static int ks_sa_rng_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id ks_sa_rng_dt_match[] = { { .compatible = "ti,keystone-rng", }, { }, }; MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match); static struct platform_driver ks_sa_rng_driver = { .driver = { .name = "ks-sa-rng", .of_match_table = ks_sa_rng_dt_match, }, .probe = ks_sa_rng_probe, .remove = ks_sa_rng_remove, }; module_platform_driver(ks_sa_rng_driver); MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver"); MODULE_AUTHOR("Vitaly Andrianov <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/ks-sa-rng.c
/* * Hardware Random Number Generator support for Cavium Networks * Octeon processor family. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2009 Cavium Networks */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/gfp.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-rnm-defs.h> struct octeon_rng { struct hwrng ops; void __iomem *control_status; void __iomem *result; }; static int octeon_rng_init(struct hwrng *rng) { union cvmx_rnm_ctl_status ctl; struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); ctl.u64 = 0; ctl.s.ent_en = 1; /* Enable the entropy source. */ ctl.s.rng_en = 1; /* Enable the RNG hardware. */ cvmx_write_csr((__force u64)p->control_status, ctl.u64); return 0; } static void octeon_rng_cleanup(struct hwrng *rng) { union cvmx_rnm_ctl_status ctl; struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); ctl.u64 = 0; /* Disable everything. */ cvmx_write_csr((__force u64)p->control_status, ctl.u64); } static int octeon_rng_data_read(struct hwrng *rng, u32 *data) { struct octeon_rng *p = container_of(rng, struct octeon_rng, ops); *data = cvmx_read64_uint32((__force u64)p->result); return sizeof(u32); } static int octeon_rng_probe(struct platform_device *pdev) { struct resource *res_ports; struct resource *res_result; struct octeon_rng *rng; int ret; struct hwrng ops = { .name = "octeon", .init = octeon_rng_init, .cleanup = octeon_rng_cleanup, .data_read = octeon_rng_data_read }; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_ports) return -ENOENT; res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_result) return -ENOENT; rng->control_status = devm_ioremap(&pdev->dev, res_ports->start, sizeof(u64)); if (!rng->control_status) return -ENOENT; rng->result = devm_ioremap(&pdev->dev, res_result->start, sizeof(u64)); if (!rng->result) return -ENOENT; rng->ops = ops; platform_set_drvdata(pdev, &rng->ops); ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) return -ENOENT; dev_info(&pdev->dev, "Octeon Random Number Generator\n"); return 0; } static struct platform_driver octeon_rng_driver = { .driver = { .name = "octeon_rng", }, .probe = octeon_rng_probe, }; module_platform_driver(octeon_rng_driver); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/octeon-rng.c
/* * RNG driver for VIA RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <[email protected]> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <[email protected]> * Copyright 2000,2001 Philipp Rumpf <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <crypto/padlock.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <asm/cpu_device_id.h> #include <asm/io.h> #include <asm/msr.h> #include <asm/cpufeature.h> #include <asm/fpu/api.h> enum { VIA_STRFILT_CNT_SHIFT = 16, VIA_STRFILT_FAIL = (1 << 15), VIA_STRFILT_ENABLE = (1 << 14), VIA_RAWBITS_ENABLE = (1 << 13), VIA_RNG_ENABLE = (1 << 6), VIA_NOISESRC1 = (1 << 8), VIA_NOISESRC2 = (1 << 9), VIA_XSTORE_CNT_MASK = 0x0F, VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF, VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ VIA_RNG_CHUNK_2_MASK = 0xFFFF, VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ VIA_RNG_CHUNK_1_MASK = 0xFF, }; /* * Investigate using the 'rep' prefix to obtain 32 bits of random data * in one insn. The upside is potentially better performance. The * downside is that the instruction becomes no longer atomic. Due to * this, just like familiar issues with /dev/random itself, the worst * case of a 'rep xstore' could potentially pause a cpu for an * unreasonably long time. In practice, this condition would likely * only occur when the hardware is failing. (or so we hope :)) * * Another possible performance boost may come from simply buffering * until we have 4 bytes, thus returning a u32 at a time, * instead of the current u8-at-a-time. * * Padlock instructions can generate a spurious DNA fault, but the * kernel doesn't use CR0.TS, so this doesn't matter. */ static inline u32 xstore(u32 *addr, u32 edx_in) { u32 eax_out; asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); return eax_out; } static int via_rng_data_present(struct hwrng *rng, int wait) { char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); u32 bytes_out; int i; /* We choose the recommended 1-byte-per-instruction RNG rate, * for greater randomness at the expense of speed. Larger * values 2, 4, or 8 bytes-per-instruction yield greater * speed at lesser randomness. * * If you change this to another VIA_CHUNK_n, you must also * change the ->n_bytes values in rng_vendor_ops[] tables. * VIA_CHUNK_8 requires further code changes. * * A copy of MSR_VIA_RNG is placed in eax_out when xstore * completes. */ for (i = 0; i < 20; i++) { *via_rng_datum = 0; /* paranoia, not really necessary */ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); bytes_out &= VIA_XSTORE_CNT_MASK; if (bytes_out || !wait) break; udelay(10); } rng->priv = *via_rng_datum; return bytes_out ? 1 : 0; } static int via_rng_data_read(struct hwrng *rng, u32 *data) { u32 via_rng_datum = (u32)rng->priv; *data = via_rng_datum; return 1; } static int via_rng_init(struct hwrng *rng) { struct cpuinfo_x86 *c = &cpu_data(0); u32 lo, hi, old_lo; /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); return -ENODEV; } return 0; } /* Control the RNG via MSR. Tread lightly and pay very close * attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it * does not say to write them as zero, so I make a guess that * we restore the values we find in the register. */ rdmsr(MSR_VIA_RNG, lo, hi); old_lo = lo; lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT); lo &= ~VIA_XSTORE_CNT_MASK; lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE); lo |= VIA_RNG_ENABLE; lo |= VIA_NOISESRC1; /* Enable secondary noise source on CPUs where it is present. */ /* Nehemiah stepping 8 and higher */ if ((c->x86_model == 9) && (c->x86_stepping > 7)) lo |= VIA_NOISESRC2; /* Esther */ if (c->x86_model >= 10) lo |= VIA_NOISESRC2; if (lo != old_lo) wrmsr(MSR_VIA_RNG, lo, hi); /* perhaps-unnecessary sanity check; remove after testing if unneeded */ rdmsr(MSR_VIA_RNG, lo, hi); if ((lo & VIA_RNG_ENABLE) == 0) { pr_err(PFX "cannot enable VIA C3 RNG, aborting\n"); return -ENODEV; } return 0; } static struct hwrng via_rng = { .name = "via", .init = via_rng_init, .data_present = via_rng_data_present, .data_read = via_rng_data_read, }; static int __init via_rng_mod_init(void) { int err; if (!boot_cpu_has(X86_FEATURE_XSTORE)) return -ENODEV; pr_info("VIA RNG detected\n"); err = hwrng_register(&via_rng); if (err) { pr_err(PFX "RNG registering failed (%d)\n", err); goto out; } out: return err; } module_init(via_rng_mod_init); static void __exit via_rng_mod_exit(void) { hwrng_unregister(&via_rng); } module_exit(via_rng_mod_exit); static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/via-rng.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006-2007 PA Semi, Inc * * Maintained by: Olof Johansson <[email protected]> * * Driver for the PWRficient onchip rng */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <linux/io.h> #define SDCRNG_CTL_REG 0x00 #define SDCRNG_CTL_FVLD_M 0x0000f000 #define SDCRNG_CTL_FVLD_S 12 #define SDCRNG_CTL_KSZ 0x00000800 #define SDCRNG_CTL_RSRC_CRG 0x00000010 #define SDCRNG_CTL_RSRC_RRG 0x00000000 #define SDCRNG_CTL_CE 0x00000004 #define SDCRNG_CTL_RE 0x00000002 #define SDCRNG_CTL_DR 0x00000001 #define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG) #define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG) #define SDCRNG_VAL_REG 0x20 #define MODULE_NAME "pasemi_rng" static int pasemi_rng_data_present(struct hwrng *rng, int wait) { void __iomem *rng_regs = (void __iomem *)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = (in_le32(rng_regs + SDCRNG_CTL_REG) & SDCRNG_CTL_FVLD_M) ? 1 : 0; if (data || !wait) break; udelay(10); } return data; } static int pasemi_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *rng_regs = (void __iomem *)rng->priv; *data = in_le32(rng_regs + SDCRNG_VAL_REG); return 4; } static int pasemi_rng_init(struct hwrng *rng) { void __iomem *rng_regs = (void __iomem *)rng->priv; u32 ctl; ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ; out_le32(rng_regs + SDCRNG_CTL_REG, ctl); out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR); return 0; } static void pasemi_rng_cleanup(struct hwrng *rng) { void __iomem *rng_regs = (void __iomem *)rng->priv; u32 ctl; ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE; out_le32(rng_regs + SDCRNG_CTL_REG, in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl); } static struct hwrng pasemi_rng = { .name = MODULE_NAME, .init = pasemi_rng_init, .cleanup = pasemi_rng_cleanup, .data_present = pasemi_rng_data_present, .data_read = pasemi_rng_data_read, }; static int rng_probe(struct platform_device *pdev) { void __iomem *rng_regs; rng_regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng_regs)) return PTR_ERR(rng_regs); pasemi_rng.priv = (unsigned long)rng_regs; pr_info("Registering PA Semi RNG\n"); return devm_hwrng_register(&pdev->dev, &pasemi_rng); } static const struct of_device_id rng_match[] = { { .compatible = "1682m-rng", }, { .compatible = "pasemi,pwrficient-rng", }, { }, }; MODULE_DEVICE_TABLE(of, rng_match); static struct platform_driver rng_driver = { .driver = { .name = "pasemi-rng", .of_match_table = rng_match, }, .probe = rng_probe, }; module_platform_driver(rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Egor Martovetsky <[email protected]>"); MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
linux-master
drivers/char/hw_random/pasemi-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2019 Linaro Ltd. */ #include <linux/delay.h> #include <linux/of.h> #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tee_drv.h> #include <linux/uuid.h> #define DRIVER_NAME "optee-rng" #define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001 /* * TA_CMD_GET_ENTROPY - Get Entropy from RNG * * param[0] (inout memref) - Entropy buffer memory reference * param[1] unused * param[2] unused * param[3] unused * * Result: * TEE_SUCCESS - Invoke command success * TEE_ERROR_BAD_PARAMETERS - Incorrect input param * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed */ #define TA_CMD_GET_ENTROPY 0x0 /* * TA_CMD_GET_RNG_INFO - Get RNG information * * param[0] (out value) - value.a: RNG data-rate in bytes per second * value.b: Quality/Entropy per 1024 bit of data * param[1] unused * param[2] unused * param[3] unused * * Result: * TEE_SUCCESS - Invoke command success * TEE_ERROR_BAD_PARAMETERS - Incorrect input param */ #define TA_CMD_GET_RNG_INFO 0x1 #define MAX_ENTROPY_REQ_SZ (4 * 1024) /** * struct optee_rng_private - OP-TEE Random Number Generator private data * @dev: OP-TEE based RNG device. * @ctx: OP-TEE context handler. * @session_id: RNG TA session identifier. * @data_rate: RNG data rate. * @entropy_shm_pool: Memory pool shared with RNG device. * @optee_rng: OP-TEE RNG driver structure. */ struct optee_rng_private { struct device *dev; struct tee_context *ctx; u32 session_id; u32 data_rate; struct tee_shm *entropy_shm_pool; struct hwrng optee_rng; }; #define to_optee_rng_private(r) \ container_of(r, struct optee_rng_private, optee_rng) static size_t get_optee_rng_data(struct optee_rng_private *pvt_data, void *buf, size_t req_size) { int ret = 0; u8 *rng_data = NULL; size_t rng_size = 0; struct tee_ioctl_invoke_arg inv_arg; struct tee_param param[4]; memset(&inv_arg, 0, sizeof(inv_arg)); memset(&param, 0, sizeof(param)); /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */ inv_arg.func = TA_CMD_GET_ENTROPY; inv_arg.session = pvt_data->session_id; inv_arg.num_params = 4; /* Fill invoke cmd params */ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; param[0].u.memref.shm = pvt_data->entropy_shm_pool; param[0].u.memref.size = req_size; param[0].u.memref.shm_offs = 0; ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param); if ((ret < 0) || (inv_arg.ret != 0)) { dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n", inv_arg.ret); return 0; } rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0); if (IS_ERR(rng_data)) { dev_err(pvt_data->dev, "tee_shm_get_va failed\n"); return 0; } rng_size = param[0].u.memref.size; memcpy(buf, rng_data, rng_size); return rng_size; } static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct optee_rng_private *pvt_data = to_optee_rng_private(rng); size_t read = 0, rng_size; int timeout = 1; u8 *data = buf; if (max > MAX_ENTROPY_REQ_SZ) max = MAX_ENTROPY_REQ_SZ; while (read < max) { rng_size = get_optee_rng_data(pvt_data, data, (max - read)); data += rng_size; read += rng_size; if (wait && pvt_data->data_rate) { if ((timeout-- == 0) || (read == max)) return read; msleep((1000 * (max - read)) / pvt_data->data_rate); } else { return read; } } return read; } static int optee_rng_init(struct hwrng *rng) { struct optee_rng_private *pvt_data = to_optee_rng_private(rng); struct tee_shm *entropy_shm_pool = NULL; entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx, MAX_ENTROPY_REQ_SZ); if (IS_ERR(entropy_shm_pool)) { dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n"); return PTR_ERR(entropy_shm_pool); } pvt_data->entropy_shm_pool = entropy_shm_pool; return 0; } static void optee_rng_cleanup(struct hwrng *rng) { struct optee_rng_private *pvt_data = to_optee_rng_private(rng); tee_shm_free(pvt_data->entropy_shm_pool); } static struct optee_rng_private pvt_data = { .optee_rng = { .name = DRIVER_NAME, .init = optee_rng_init, .cleanup = optee_rng_cleanup, .read = optee_rng_read, } }; static int get_optee_rng_info(struct device *dev) { int ret = 0; struct tee_ioctl_invoke_arg inv_arg; struct tee_param param[4]; memset(&inv_arg, 0, sizeof(inv_arg)); memset(&param, 0, sizeof(param)); /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */ inv_arg.func = TA_CMD_GET_RNG_INFO; inv_arg.session = pvt_data.session_id; inv_arg.num_params = 4; /* Fill invoke cmd params */ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param); if ((ret < 0) || (inv_arg.ret != 0)) { dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n", inv_arg.ret); return -EINVAL; } pvt_data.data_rate = param[0].u.value.a; pvt_data.optee_rng.quality = param[0].u.value.b; return 0; } static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) { if (ver->impl_id == TEE_IMPL_ID_OPTEE) return 1; else return 0; } static int optee_rng_probe(struct device *dev) { struct tee_client_device *rng_device = to_tee_client_device(dev); int ret = 0, err = -ENODEV; struct tee_ioctl_open_session_arg sess_arg; memset(&sess_arg, 0, sizeof(sess_arg)); /* Open context with TEE driver */ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL); if (IS_ERR(pvt_data.ctx)) return -ENODEV; /* Open session with hwrng Trusted App */ export_uuid(sess_arg.uuid, &rng_device->id.uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL); if ((ret < 0) || (sess_arg.ret != 0)) { dev_err(dev, "tee_client_open_session failed, err: %x\n", sess_arg.ret); err = -EINVAL; goto out_ctx; } pvt_data.session_id = sess_arg.session; err = get_optee_rng_info(dev); if (err) goto out_sess; err = devm_hwrng_register(dev, &pvt_data.optee_rng); if (err) { dev_err(dev, "hwrng registration failed (%d)\n", err); goto out_sess; } pvt_data.dev = dev; return 0; out_sess: tee_client_close_session(pvt_data.ctx, pvt_data.session_id); out_ctx: tee_client_close_context(pvt_data.ctx); return err; } static int optee_rng_remove(struct device *dev) { tee_client_close_session(pvt_data.ctx, pvt_data.session_id); tee_client_close_context(pvt_data.ctx); return 0; } static const struct tee_client_device_id optee_rng_id_table[] = { {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f, 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)}, {} }; MODULE_DEVICE_TABLE(tee, optee_rng_id_table); static struct tee_client_driver optee_rng_driver = { .id_table = optee_rng_id_table, .driver = { .name = DRIVER_NAME, .bus = &tee_bus_type, .probe = optee_rng_probe, .remove = optee_rng_remove, }, }; static int __init optee_rng_mod_init(void) { return driver_register(&optee_rng_driver.driver); } static void __exit optee_rng_mod_exit(void) { driver_unregister(&optee_rng_driver.driver); } module_init(optee_rng_mod_init); module_exit(optee_rng_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sumit Garg <[email protected]>"); MODULE_DESCRIPTION("OP-TEE based random number generator driver");
linux-master
drivers/char/hw_random/optee-rng.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/irqreturn.h> #include <linux/workqueue.h> #include <linux/circ_buf.h> #include <linux/completion.h> #include <linux/of.h> #include <linux/bitfield.h> #include <linux/fips.h> #include "cctrng.h" #define CC_REG_LOW(name) (name ## _BIT_SHIFT) #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1) #define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name)) #define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \ (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val)) #define CC_HW_RESET_LOOP_COUNT 10 #define CC_TRNG_SUSPEND_TIMEOUT 3000 /* data circular buffer in words must be: * - of a power-of-2 size (limitation of circ_buf.h macros) * - at least 6, the size generated in the EHR according to HW implementation */ #define CCTRNG_DATA_BUF_WORDS 32 /* The timeout for the TRNG operation should be calculated with the formula: * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE * while: * - SAMPLE_CNT is input value from the characterisation process * - all the rest are constants */ #define EHR_NUM 1 #define VN_COEFF 4 #define EHR_LENGTH CC_TRNG_EHR_IN_BITS #define SCALE_VALUE 2 #define CCTRNG_TIMEOUT(smpl_cnt) \ (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE) struct cctrng_drvdata { struct platform_device *pdev; void __iomem *cc_base; struct clk *clk; struct hwrng rng; u32 active_rosc; /* Sampling interval for each ring oscillator: * count of ring oscillator cycles between consecutive bits sampling. * Value of 0 indicates non-valid rosc */ u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS]; u32 data_buf[CCTRNG_DATA_BUF_WORDS]; struct circ_buf circ; struct work_struct compwork; struct work_struct startwork; /* pending_hw - 1 when HW is pending, 0 when it is idle */ atomic_t pending_hw; /* protects against multiple concurrent consumers of data_buf */ spinlock_t read_lock; }; /* functions for write/read CC registers */ static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val) { iowrite32(val, (drvdata->cc_base + reg)); } static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg) { return ioread32(drvdata->cc_base + reg); } static int cc_trng_pm_get(struct device *dev) { int rc = 0; rc = pm_runtime_get_sync(dev); /* pm_runtime_get_sync() can return 1 as a valid return code */ return (rc == 1 ? 0 : rc); } static void cc_trng_pm_put_suspend(struct device *dev) { int rc = 0; pm_runtime_mark_last_busy(dev); rc = pm_runtime_put_autosuspend(dev); if (rc) dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc); } static int cc_trng_pm_init(struct cctrng_drvdata *drvdata) { struct device *dev = &(drvdata->pdev->dev); /* must be before the enabling to avoid redundant suspending */ pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */ return pm_runtime_set_active(dev); } static void cc_trng_pm_go(struct cctrng_drvdata *drvdata) { struct device *dev = &(drvdata->pdev->dev); /* enable the PM module*/ pm_runtime_enable(dev); } static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata) { struct device *dev = &(drvdata->pdev->dev); pm_runtime_disable(dev); } static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata) { struct device *dev = &(drvdata->pdev->dev); struct device_node *np = drvdata->pdev->dev.of_node; int rc; int i; /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */ int ret = -EINVAL; rc = of_property_read_u32_array(np, "arm,rosc-ratio", drvdata->smpl_ratio, CC_TRNG_NUM_OF_ROSCS); if (rc) { /* arm,rosc-ratio was not found in device tree */ return rc; } /* verify that at least one rosc has (sampling ratio > 0) */ for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) { dev_dbg(dev, "rosc %d sampling ratio %u", i, drvdata->smpl_ratio[i]); if (drvdata->smpl_ratio[i] > 0) ret = 0; } return ret; } static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata) { struct device *dev = &(drvdata->pdev->dev); dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc); drvdata->active_rosc += 1; while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) { if (drvdata->smpl_ratio[drvdata->active_rosc] > 0) return 0; drvdata->active_rosc += 1; } return -EINVAL; } static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata) { u32 max_cycles; /* Set watchdog threshold to maximal allowed time (in CPU cycles) */ max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]); cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles); /* enable the RND source */ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1); /* unmask RNG interrupts */ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK); } /* increase circular data buffer index (head/tail) */ static inline void circ_idx_inc(int *idx, int bytes) { *idx += (bytes + 3) >> 2; *idx &= (CCTRNG_DATA_BUF_WORDS - 1); } static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata) { return CIRC_SPACE(drvdata->circ.head, drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); } static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { /* current implementation ignores "wait" */ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv; struct device *dev = &(drvdata->pdev->dev); u32 *buf = (u32 *)drvdata->circ.buf; size_t copied = 0; size_t cnt_w; size_t size; size_t left; if (!spin_trylock(&drvdata->read_lock)) { /* concurrent consumers from data_buf cannot be served */ dev_dbg_ratelimited(dev, "unable to hold lock\n"); return 0; } /* copy till end of data buffer (without wrap back) */ cnt_w = CIRC_CNT_TO_END(drvdata->circ.head, drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); size = min((cnt_w<<2), max); memcpy(data, &(buf[drvdata->circ.tail]), size); copied = size; circ_idx_inc(&drvdata->circ.tail, size); /* copy rest of data in data buffer */ left = max - copied; if (left > 0) { cnt_w = CIRC_CNT(drvdata->circ.head, drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); size = min((cnt_w<<2), left); memcpy(data, &(buf[drvdata->circ.tail]), size); copied += size; circ_idx_inc(&drvdata->circ.tail, size); } spin_unlock(&drvdata->read_lock); if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { /* re-check space in buffer to avoid potential race */ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { /* increment device's usage counter */ int rc = cc_trng_pm_get(dev); if (rc) { dev_err(dev, "cc_trng_pm_get returned %x\n", rc); return rc; } /* schedule execution of deferred work handler * for filling of data buffer */ schedule_work(&drvdata->startwork); } else { atomic_set(&drvdata->pending_hw, 0); } } } return copied; } static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata) { u32 tmp_smpl_cnt = 0; struct device *dev = &(drvdata->pdev->dev); dev_dbg(dev, "cctrng hw trigger.\n"); /* enable the HW RND clock */ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); /* do software reset */ cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1); /* in order to verify that the reset has completed, * the sample count need to be verified */ do { /* enable the HW RND clock */ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); /* set sampling ratio (rng_clocks) between consecutive bits */ cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET, drvdata->smpl_ratio[drvdata->active_rosc]); /* read the sampling ratio */ tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET); } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]); /* disable the RND source for setting new parameters in HW */ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF); cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc); /* Debug Control register: set to 0 - no bypasses */ cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0); cc_trng_enable_rnd_source(drvdata); } static void cc_trng_compwork_handler(struct work_struct *w) { u32 isr = 0; u32 ehr_valid = 0; struct cctrng_drvdata *drvdata = container_of(w, struct cctrng_drvdata, compwork); struct device *dev = &(drvdata->pdev->dev); int i; /* stop DMA and the RNG source */ cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0); cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); /* read RNG_ISR and check for errors */ isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET); ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr); dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid); if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) { fips_fail_notify(); /* FIPS error is fatal */ panic("Got HW CRNGT error while fips is enabled!\n"); } /* Clear all pending RNG interrupts */ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr); if (!ehr_valid) { /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */ if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) || CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) { dev_dbg(dev, "cctrng autocorr/timeout error.\n"); goto next_rosc; } /* in case of VN error, ignore it */ } /* read EHR data from registers */ for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) { /* calc word ptr in data_buf */ u32 *buf = (u32 *)drvdata->circ.buf; buf[drvdata->circ.head] = cc_ioread(drvdata, CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32))); /* EHR_DATA registers are cleared on read. In case 0 value was * returned, restart the entropy collection. */ if (buf[drvdata->circ.head] == 0) { dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n", drvdata->active_rosc); goto next_rosc; } circ_idx_inc(&drvdata->circ.head, 1<<2); } atomic_set(&drvdata->pending_hw, 0); /* continue to fill data buffer if needed */ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { /* Re-enable rnd source */ cc_trng_enable_rnd_source(drvdata); return; } } cc_trng_pm_put_suspend(dev); dev_dbg(dev, "compwork handler done\n"); return; next_rosc: if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) && (cc_trng_change_rosc(drvdata) == 0)) { /* trigger trng hw with next rosc */ cc_trng_hw_trigger(drvdata); } else { atomic_set(&drvdata->pending_hw, 0); cc_trng_pm_put_suspend(dev); } } static irqreturn_t cc_isr(int irq, void *dev_id) { struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id; struct device *dev = &(drvdata->pdev->dev); u32 irr; /* if driver suspended return, probably shared interrupt */ if (pm_runtime_suspended(dev)) return IRQ_NONE; /* read the interrupt status */ irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); dev_dbg(dev, "Got IRR=0x%08X\n", irr); if (irr == 0) /* Probably shared interrupt line */ return IRQ_NONE; /* clear interrupt - must be before processing events */ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr); /* RNG interrupt - most probable */ if (irr & CC_HOST_RNG_IRQ_MASK) { /* Mask RNG interrupts - will be unmasked in deferred work */ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF); /* We clear RNG interrupt here, * to avoid it from firing as we'll unmask RNG interrupts. */ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, CC_HOST_RNG_IRQ_MASK); irr &= ~CC_HOST_RNG_IRQ_MASK; /* schedule execution of deferred work handler */ schedule_work(&drvdata->compwork); } if (irr) { dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n", irr); /* Just warning */ } return IRQ_HANDLED; } static void cc_trng_startwork_handler(struct work_struct *w) { struct cctrng_drvdata *drvdata = container_of(w, struct cctrng_drvdata, startwork); drvdata->active_rosc = 0; cc_trng_hw_trigger(drvdata); } static int cctrng_probe(struct platform_device *pdev) { struct cctrng_drvdata *drvdata; struct device *dev = &pdev->dev; int rc = 0; u32 val; int irq; /* Compile time assertion checks */ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); if (!drvdata->rng.name) return -ENOMEM; drvdata->rng.read = cctrng_read; drvdata->rng.priv = (unsigned long)drvdata; drvdata->rng.quality = CC_TRNG_QUALITY; platform_set_drvdata(pdev, drvdata); drvdata->pdev = pdev; drvdata->circ.buf = (char *)drvdata->data_buf; drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drvdata->cc_base)) return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers"); /* Then IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; /* parse sampling rate from device tree */ rc = cc_trng_parse_sampling_ratio(drvdata); if (rc) return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n"); drvdata->clk = devm_clk_get_optional_enabled(dev, NULL); if (IS_ERR(drvdata->clk)) return dev_err_probe(dev, PTR_ERR(drvdata->clk), "Failed to get or enable the clock\n"); INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler); INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler); spin_lock_init(&drvdata->read_lock); /* register the driver isr function */ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata); if (rc) return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq); dev_dbg(dev, "Registered to IRQ: %d\n", irq); /* Clear all pending interrupts */ val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); dev_dbg(dev, "IRR=0x%08X\n", val); cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val); /* unmask HOST RNG interrupt */ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & ~CC_HOST_RNG_IRQ_MASK); /* init PM */ rc = cc_trng_pm_init(drvdata); if (rc) return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n"); /* increment device's usage counter */ rc = cc_trng_pm_get(dev); if (rc) return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc); /* set pending_hw to verify that HW won't be triggered from read */ atomic_set(&drvdata->pending_hw, 1); /* registration of the hwrng device */ rc = devm_hwrng_register(dev, &drvdata->rng); if (rc) { dev_err(dev, "Could not register hwrng device.\n"); goto post_pm_err; } /* trigger HW to start generate data */ drvdata->active_rosc = 0; cc_trng_hw_trigger(drvdata); /* All set, we can allow auto-suspend */ cc_trng_pm_go(drvdata); dev_info(dev, "ARM cctrng device initialized\n"); return 0; post_pm_err: cc_trng_pm_fini(drvdata); return rc; } static int cctrng_remove(struct platform_device *pdev) { struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; dev_dbg(dev, "Releasing cctrng resources...\n"); cc_trng_pm_fini(drvdata); dev_info(dev, "ARM cctrng device terminated\n"); return 0; } static int __maybe_unused cctrng_suspend(struct device *dev) { struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, POWER_DOWN_ENABLE); clk_disable_unprepare(drvdata->clk); return 0; } static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata) { unsigned int val; unsigned int i; for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { /* in cc7x3 NVM_IS_IDLE indicates that CC reset is * completed and device is fully functional */ val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET); if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) { /* hw indicate reset completed */ return true; } /* allow scheduling other process on the processor */ schedule(); } /* reset not completed */ return false; } static int __maybe_unused cctrng_resume(struct device *dev) { struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); int rc; dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); /* Enables the device source clk */ rc = clk_prepare_enable(drvdata->clk); if (rc) { dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); return -EBUSY; } /* unmask HOST RNG interrupt */ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & ~CC_HOST_RNG_IRQ_MASK); cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, POWER_DOWN_DISABLE); return 0; } static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL); static const struct of_device_id arm_cctrng_dt_match[] = { { .compatible = "arm,cryptocell-713-trng", }, { .compatible = "arm,cryptocell-703-trng", }, {}, }; MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match); static struct platform_driver cctrng_driver = { .driver = { .name = "cctrng", .of_match_table = arm_cctrng_dt_match, .pm = &cctrng_pm, }, .probe = cctrng_probe, .remove = cctrng_remove, }; module_platform_driver(cctrng_driver); /* Module description */ MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver"); MODULE_AUTHOR("ARM"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/cctrng.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Michael Neuling IBM Corporation * * Driver for the pseries hardware RNG for POWER7+ and above */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/hw_random.h> #include <asm/vio.h> static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { u64 buffer[PLPAR_HCALL_BUFSIZE]; int rc; rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); if (rc != H_SUCCESS) { pr_err_ratelimited("H_RANDOM call failed %d\n", rc); return -EIO; } memcpy(data, buffer, 8); /* The hypervisor interface returns 64 bits */ return 8; } /* * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations * * This is a required function for a driver to operate in a CMO environment * but this device does not make use of DMA allocations, return 0. * * Return value: * Number of bytes of IO data the driver will need to perform well -> 0 */ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) { return 0; }; static struct hwrng pseries_rng = { .name = KBUILD_MODNAME, .read = pseries_rng_read, }; static int pseries_rng_probe(struct vio_dev *dev, const struct vio_device_id *id) { return hwrng_register(&pseries_rng); } static void pseries_rng_remove(struct vio_dev *dev) { hwrng_unregister(&pseries_rng); } static const struct vio_device_id pseries_rng_driver_ids[] = { { "ibm,random-v1", "ibm,random"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); static struct vio_driver pseries_rng_driver = { .name = KBUILD_MODNAME, .probe = pseries_rng_probe, .remove = pseries_rng_remove, .get_desired_dma = pseries_rng_get_desired_dma, .id_table = pseries_rng_driver_ids }; static int __init rng_init(void) { pr_info("Registering IBM pSeries RNG driver\n"); return vio_register_driver(&pseries_rng_driver); } module_init(rng_init); static void __exit rng_exit(void) { vio_unregister_driver(&pseries_rng_driver); } module_exit(rng_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Neuling <[email protected]>"); MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
linux-master
drivers/char/hw_random/pseries-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2015, Daniel Thompson */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/slab.h> #define RNG_CR 0x00 #define RNG_CR_RNGEN BIT(2) #define RNG_CR_CED BIT(5) #define RNG_SR 0x04 #define RNG_SR_SEIS BIT(6) #define RNG_SR_CEIS BIT(5) #define RNG_SR_DRDY BIT(0) #define RNG_DR 0x08 struct stm32_rng_private { struct hwrng rng; void __iomem *base; struct clk *clk; struct reset_control *rst; bool ced; }; static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); u32 sr; int retval = 0; pm_runtime_get_sync((struct device *) priv->rng.priv); while (max >= sizeof(u32)) { sr = readl_relaxed(priv->base + RNG_SR); /* Manage timeout which is based on timer and take */ /* care of initial delay time when enabling rng */ if (!sr && wait) { int err; err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, sr, 10, 50000); if (err) dev_err((struct device *)priv->rng.priv, "%s: timeout %x!\n", __func__, sr); } /* If error detected or data not ready... */ if (sr != RNG_SR_DRDY) { if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS), "bad RNG status - %x\n", sr)) writel_relaxed(0, priv->base + RNG_SR); break; } *(u32 *)data = readl_relaxed(priv->base + RNG_DR); retval += sizeof(u32); data += sizeof(u32); max -= sizeof(u32); } pm_runtime_mark_last_busy((struct device *) priv->rng.priv); pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); return retval || !wait ? retval : -EIO; } static int stm32_rng_init(struct hwrng *rng) { struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); int err; err = clk_prepare_enable(priv->clk); if (err) return err; if (priv->ced) writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR); else writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED, priv->base + RNG_CR); /* clear error indicators */ writel_relaxed(0, priv->base + RNG_SR); return 0; } static void stm32_rng_cleanup(struct hwrng *rng) { struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng); writel_relaxed(0, priv->base + RNG_CR); clk_disable_unprepare(priv->clk); } static int stm32_rng_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct stm32_rng_private *priv; struct resource res; int err; priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL); if (!priv) return -ENOMEM; err = of_address_to_resource(np, 0, &res); if (err) return err; priv->base = devm_ioremap_resource(dev, &res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->clk = devm_clk_get(&ofdev->dev, NULL); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); priv->rst = devm_reset_control_get(&ofdev->dev, NULL); if (!IS_ERR(priv->rst)) { reset_control_assert(priv->rst); udelay(2); reset_control_deassert(priv->rst); } priv->ced = of_property_read_bool(np, "clock-error-detect"); dev_set_drvdata(dev, priv); priv->rng.name = dev_driver_string(dev); #ifndef CONFIG_PM priv->rng.init = stm32_rng_init; priv->rng.cleanup = stm32_rng_cleanup; #endif priv->rng.read = stm32_rng_read; priv->rng.priv = (unsigned long) dev; priv->rng.quality = 900; pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); return devm_hwrng_register(dev, &priv->rng); } static int stm32_rng_remove(struct platform_device *ofdev) { pm_runtime_disable(&ofdev->dev); return 0; } #ifdef CONFIG_PM static int stm32_rng_runtime_suspend(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); stm32_rng_cleanup(&priv->rng); return 0; } static int stm32_rng_runtime_resume(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); return stm32_rng_init(&priv->rng); } #endif static const struct dev_pm_ops stm32_rng_pm_ops = { SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend, stm32_rng_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct of_device_id stm32_rng_match[] = { { .compatible = "st,stm32-rng", }, {}, }; MODULE_DEVICE_TABLE(of, stm32_rng_match); static struct platform_driver stm32_rng_driver = { .driver = { .name = "stm32-rng", .pm = &stm32_rng_pm_ops, .of_match_table = stm32_rng_match, }, .probe = stm32_rng_probe, .remove = stm32_rng_remove, }; module_platform_driver(stm32_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Thompson <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics STM32 RNG device driver");
linux-master
drivers/char/hw_random/stm32-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Copyright (c) 2023 David Yang */ #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #define RNG_CTRL 0x0 #define RNG_SOURCE GENMASK(1, 0) #define DROP_ENABLE BIT(5) #define POST_PROCESS_ENABLE BIT(7) #define POST_PROCESS_DEPTH GENMASK(15, 8) #define RNG_NUMBER 0x4 #define RNG_STAT 0x8 #define DATA_COUNT GENMASK(2, 0) /* max 4 */ struct histb_rng_priv { struct hwrng rng; void __iomem *base; }; /* * Observed: * depth = 1 -> ~1ms * depth = 255 -> ~16ms */ static int histb_rng_wait(void __iomem *base) { u32 val; return readl_relaxed_poll_timeout(base + RNG_STAT, val, val & DATA_COUNT, 1000, 30 * 1000); } static void histb_rng_init(void __iomem *base, unsigned int depth) { u32 val; val = readl_relaxed(base + RNG_CTRL); val &= ~RNG_SOURCE; val |= 2; val &= ~POST_PROCESS_DEPTH; val |= min(depth, 0xffu) << 8; val |= POST_PROCESS_ENABLE; val |= DROP_ENABLE; writel_relaxed(val, base + RNG_CTRL); } static int histb_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct histb_rng_priv *priv = container_of(rng, typeof(*priv), rng); void __iomem *base = priv->base; for (int i = 0; i < max; i += sizeof(u32)) { if (!(readl_relaxed(base + RNG_STAT) & DATA_COUNT)) { if (!wait) return i; if (histb_rng_wait(base)) { pr_err("failed to generate random number, generated %d\n", i); return i ? i : -ETIMEDOUT; } } *(u32 *) (data + i) = readl_relaxed(base + RNG_NUMBER); } return max; } static unsigned int histb_rng_get_depth(void __iomem *base) { return (readl_relaxed(base + RNG_CTRL) & POST_PROCESS_DEPTH) >> 8; } static ssize_t depth_show(struct device *dev, struct device_attribute *attr, char *buf) { struct histb_rng_priv *priv = dev_get_drvdata(dev); void __iomem *base = priv->base; return sprintf(buf, "%d\n", histb_rng_get_depth(base)); } static ssize_t depth_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct histb_rng_priv *priv = dev_get_drvdata(dev); void __iomem *base = priv->base; unsigned int depth; if (kstrtouint(buf, 0, &depth)) return -ERANGE; histb_rng_init(base, depth); return count; } static DEVICE_ATTR_RW(depth); static struct attribute *histb_rng_attrs[] = { &dev_attr_depth.attr, NULL, }; ATTRIBUTE_GROUPS(histb_rng); static int histb_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct histb_rng_priv *priv; void __iomem *base; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); histb_rng_init(base, 144); if (histb_rng_wait(base)) { dev_err(dev, "cannot bring up device\n"); return -ENODEV; } priv->base = base; priv->rng.name = pdev->name; priv->rng.read = histb_rng_read; ret = devm_hwrng_register(dev, &priv->rng); if (ret) { dev_err(dev, "failed to register hwrng: %d\n", ret); return ret; } platform_set_drvdata(pdev, priv); dev_set_drvdata(dev, priv); return 0; } static const struct of_device_id histb_rng_of_match[] = { { .compatible = "hisilicon,histb-rng", }, { } }; MODULE_DEVICE_TABLE(of, histb_rng_of_match); static struct platform_driver histb_rng_driver = { .probe = histb_rng_probe, .driver = { .name = "histb-rng", .of_match_table = histb_rng_of_match, .dev_groups = histb_rng_groups, }, }; module_platform_driver(histb_rng_driver); MODULE_DESCRIPTION("Hisilicon STB random number generator driver"); MODULE_LICENSE("Dual MIT/GPL"); MODULE_AUTHOR("David Yang <[email protected]>");
linux-master
drivers/char/hw_random/histb-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010-2012 Broadcom. All rights reserved. * Copyright (c) 2013 Lubomir Rintel */ #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/clk.h> #include <linux/reset.h> #define RNG_CTRL 0x0 #define RNG_STATUS 0x4 #define RNG_DATA 0x8 #define RNG_INT_MASK 0x10 /* enable rng */ #define RNG_RBGEN 0x1 /* the initial numbers generated are "less random" so will be discarded */ #define RNG_WARMUP_COUNT 0x40000 #define RNG_INT_OFF 0x1 struct bcm2835_rng_priv { struct hwrng rng; void __iomem *base; bool mask_interrupts; struct clk *clk; struct reset_control *reset; }; static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) { return container_of(rng, struct bcm2835_rng_priv, rng); } static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset) { /* MIPS chips strapped for BE will automagically configure the * peripheral registers for CPU-native byte order. */ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(priv->base + offset); else return readl(priv->base + offset); } static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val, u32 offset) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(val, priv->base + offset); else writel(val, priv->base + offset); } static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct bcm2835_rng_priv *priv = to_rng_priv(rng); u32 max_words = max / sizeof(u32); u32 num_words, count; while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; hwrng_msleep(rng, 1000); } num_words = rng_readl(priv, RNG_STATUS) >> 24; if (num_words > max_words) num_words = max_words; for (count = 0; count < num_words; count++) ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA); return num_words * sizeof(u32); } static int bcm2835_rng_init(struct hwrng *rng) { struct bcm2835_rng_priv *priv = to_rng_priv(rng); int ret = 0; u32 val; ret = clk_prepare_enable(priv->clk); if (ret) return ret; ret = reset_control_reset(priv->reset); if (ret) return ret; if (priv->mask_interrupts) { /* mask the interrupt */ val = rng_readl(priv, RNG_INT_MASK); val |= RNG_INT_OFF; rng_writel(priv, val, RNG_INT_MASK); } /* set warm-up count & enable */ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS); rng_writel(priv, RNG_RBGEN, RNG_CTRL); return ret; } static void bcm2835_rng_cleanup(struct hwrng *rng) { struct bcm2835_rng_priv *priv = to_rng_priv(rng); /* disable rng hardware */ rng_writel(priv, 0, RNG_CTRL); clk_disable_unprepare(priv->clk); } struct bcm2835_rng_of_data { bool mask_interrupts; }; static const struct bcm2835_rng_of_data nsp_rng_of_data = { .mask_interrupts = true, }; static const struct of_device_id bcm2835_rng_of_match[] = { { .compatible = "brcm,bcm2835-rng"}, { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data }, { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data }, { .compatible = "brcm,bcm6368-rng"}, {}, }; static int bcm2835_rng_probe(struct platform_device *pdev) { const struct bcm2835_rng_of_data *of_data; struct device *dev = &pdev->dev; const struct of_device_id *rng_id; struct bcm2835_rng_priv *priv; int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); /* map peripheral */ priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); /* Clock is optional on most platforms */ priv->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); priv->rng.name = pdev->name; priv->rng.init = bcm2835_rng_init; priv->rng.read = bcm2835_rng_read; priv->rng.cleanup = bcm2835_rng_cleanup; if (dev_of_node(dev)) { rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node); if (!rng_id) return -EINVAL; /* Check for rng init function, execute it */ of_data = rng_id->data; if (of_data) priv->mask_interrupts = of_data->mask_interrupts; } /* register driver */ err = devm_hwrng_register(dev, &priv->rng); if (err) dev_err(dev, "hwrng registration failed\n"); else dev_info(dev, "hwrng registered\n"); return err; } MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); static const struct platform_device_id bcm2835_rng_devtype[] = { { .name = "bcm2835-rng" }, { .name = "bcm63xx-rng" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype); static struct platform_driver bcm2835_rng_driver = { .driver = { .name = "bcm2835-rng", .of_match_table = bcm2835_rng_of_match, }, .probe = bcm2835_rng_probe, .id_table = bcm2835_rng_devtype, }; module_platform_driver(bcm2835_rng_driver); MODULE_AUTHOR("Lubomir Rintel <[email protected]>"); MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/bcm2835-rng.c
/* * hw_random/core.c: HWRNG core API * * Copyright 2006 Michael Buesch <[email protected]> * Copyright 2005 (c) MontaVista Software, Inc. * * Please read Documentation/admin-guide/hw_random.rst for details on use. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/uaccess.h> #define RNG_MODULE_NAME "hw_random" static struct hwrng *current_rng; /* the current rng has been explicitly chosen by user via sysfs */ static int cur_rng_set_by_user; static struct task_struct *hwrng_fill; /* list of registered rngs */ static LIST_HEAD(rng_list); /* Protects rng_list and current_rng */ static DEFINE_MUTEX(rng_mutex); /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ static DEFINE_MUTEX(reading_mutex); static int data_avail; static u8 *rng_buffer, *rng_fillbuf; static unsigned short current_quality; static unsigned short default_quality = 1024; /* default to maximum */ module_param(current_quality, ushort, 0644); MODULE_PARM_DESC(current_quality, "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead"); module_param(default_quality, ushort, 0644); MODULE_PARM_DESC(default_quality, "default maximum entropy content of hwrng per 1024 bits of input"); static void drop_current_rng(void); static int hwrng_init(struct hwrng *rng); static int hwrng_fillfn(void *unused); static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait); static size_t rng_buffer_size(void) { return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; } static void add_early_randomness(struct hwrng *rng) { int bytes_read; mutex_lock(&reading_mutex); bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) { size_t entropy = bytes_read * 8 * rng->quality / 1024; add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); } } static inline void cleanup_rng(struct kref *kref) { struct hwrng *rng = container_of(kref, struct hwrng, ref); if (rng->cleanup) rng->cleanup(rng); complete(&rng->cleanup_done); } static int set_current_rng(struct hwrng *rng) { int err; BUG_ON(!mutex_is_locked(&rng_mutex)); err = hwrng_init(rng); if (err) return err; drop_current_rng(); current_rng = rng; /* if necessary, start hwrng thread */ if (!hwrng_fill) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed\n"); hwrng_fill = NULL; } } return 0; } static void drop_current_rng(void) { BUG_ON(!mutex_is_locked(&rng_mutex)); if (!current_rng) return; /* decrease last reference for triggering the cleanup */ kref_put(&current_rng->ref, cleanup_rng); current_rng = NULL; } /* Returns ERR_PTR(), NULL or refcounted hwrng */ static struct hwrng *get_current_rng_nolock(void) { if (current_rng) kref_get(&current_rng->ref); return current_rng; } static struct hwrng *get_current_rng(void) { struct hwrng *rng; if (mutex_lock_interruptible(&rng_mutex)) return ERR_PTR(-ERESTARTSYS); rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); return rng; } static void put_rng(struct hwrng *rng) { /* * Hold rng_mutex here so we serialize in case they set_current_rng * on rng again immediately. */ mutex_lock(&rng_mutex); if (rng) kref_put(&rng->ref, cleanup_rng); mutex_unlock(&rng_mutex); } static int hwrng_init(struct hwrng *rng) { if (kref_get_unless_zero(&rng->ref)) goto skip_init; if (rng->init) { int ret; ret = rng->init(rng); if (ret) return ret; } kref_init(&rng->ref); reinit_completion(&rng->cleanup_done); skip_init: rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; } static int rng_dev_open(struct inode *inode, struct file *filp) { /* enforce read-only access to this chrdev */ if ((filp->f_mode & FMODE_READ) == 0) return -EINVAL; if (filp->f_mode & FMODE_WRITE) return -EINVAL; return 0; } static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait) { int present; BUG_ON(!mutex_is_locked(&reading_mutex)); if (rng->read) return rng->read(rng, (void *)buffer, size, wait); if (rng->data_present) present = rng->data_present(rng, wait); else present = 1; if (present) return rng->data_read(rng, (u32 *)buffer); return 0; } static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { ssize_t ret = 0; int err = 0; int bytes_read, len; struct hwrng *rng; while (size) { rng = get_current_rng(); if (IS_ERR(rng)) { err = PTR_ERR(rng); goto out; } if (!rng) { err = -ENODEV; goto out; } if (mutex_lock_interruptible(&reading_mutex)) { err = -ERESTARTSYS; goto out_put; } if (!data_avail) { bytes_read = rng_get_data(rng, rng_buffer, rng_buffer_size(), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; goto out_unlock_reading; } data_avail = bytes_read; } if (!data_avail) { if (filp->f_flags & O_NONBLOCK) { err = -EAGAIN; goto out_unlock_reading; } } else { len = data_avail; if (len > size) len = size; data_avail -= len; if (copy_to_user(buf + ret, rng_buffer + data_avail, len)) { err = -EFAULT; goto out_unlock_reading; } size -= len; ret += len; } mutex_unlock(&reading_mutex); put_rng(rng); if (need_resched()) schedule_timeout_interruptible(1); if (signal_pending(current)) { err = -ERESTARTSYS; goto out; } } out: return ret ? : err; out_unlock_reading: mutex_unlock(&reading_mutex); out_put: put_rng(rng); goto out; } static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, .llseek = noop_llseek, }; static const struct attribute_group *rng_dev_groups[]; static struct miscdevice rng_miscdev = { .minor = HWRNG_MINOR, .name = RNG_MODULE_NAME, .nodename = "hwrng", .fops = &rng_chrdev_ops, .groups = rng_dev_groups, }; static int enable_best_rng(void) { struct hwrng *rng, *new_rng = NULL; int ret = -ENODEV; BUG_ON(!mutex_is_locked(&rng_mutex)); /* no rng to use? */ if (list_empty(&rng_list)) { drop_current_rng(); cur_rng_set_by_user = 0; return 0; } /* use the rng which offers the best quality */ list_for_each_entry(rng, &rng_list, list) { if (!new_rng || rng->quality > new_rng->quality) new_rng = rng; } ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); if (!ret) cur_rng_set_by_user = 0; return ret; } static ssize_t rng_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int err; struct hwrng *rng, *old_rng, *new_rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; old_rng = current_rng; if (sysfs_streq(buf, "")) { err = enable_best_rng(); } else { list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { err = set_current_rng(rng); if (!err) cur_rng_set_by_user = 1; break; } } } new_rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); if (new_rng) { if (new_rng != old_rng) add_early_randomness(new_rng); put_rng(new_rng); } return err ? : len; } static ssize_t rng_current_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng)) return PTR_ERR(rng); ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; } static ssize_t rng_available_show(struct device *dev, struct device_attribute *attr, char *buf) { int err; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; buf[0] = '\0'; list_for_each_entry(rng, &rng_list, list) { strlcat(buf, rng->name, PAGE_SIZE); strlcat(buf, " ", PAGE_SIZE); } strlcat(buf, "\n", PAGE_SIZE); mutex_unlock(&rng_mutex); return strlen(buf); } static ssize_t rng_selected_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); } static ssize_t rng_quality_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng)) return PTR_ERR(rng); if (!rng) /* no need to put_rng */ return -ENODEV; ret = sysfs_emit(buf, "%hu\n", rng->quality); put_rng(rng); return ret; } static ssize_t rng_quality_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { u16 quality; int ret = -EINVAL; if (len < 2) return -EINVAL; ret = mutex_lock_interruptible(&rng_mutex); if (ret) return -ERESTARTSYS; ret = kstrtou16(buf, 0, &quality); if (ret || quality > 1024) { ret = -EINVAL; goto out; } if (!current_rng) { ret = -ENODEV; goto out; } current_rng->quality = quality; current_quality = quality; /* obsolete */ /* the best available RNG may have changed */ ret = enable_best_rng(); out: mutex_unlock(&rng_mutex); return ret ? ret : len; } static DEVICE_ATTR_RW(rng_current); static DEVICE_ATTR_RO(rng_available); static DEVICE_ATTR_RO(rng_selected); static DEVICE_ATTR_RW(rng_quality); static struct attribute *rng_dev_attrs[] = { &dev_attr_rng_current.attr, &dev_attr_rng_available.attr, &dev_attr_rng_selected.attr, &dev_attr_rng_quality.attr, NULL }; ATTRIBUTE_GROUPS(rng_dev); static void __exit unregister_miscdev(void) { misc_deregister(&rng_miscdev); } static int __init register_miscdev(void) { return misc_register(&rng_miscdev); } static int hwrng_fillfn(void *unused) { size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ long rc; while (!kthread_should_stop()) { unsigned short quality; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng) || !rng) break; mutex_lock(&reading_mutex); rc = rng_get_data(rng, rng_fillbuf, rng_buffer_size(), 1); if (current_quality != rng->quality) rng->quality = current_quality; /* obsolete */ quality = rng->quality; mutex_unlock(&reading_mutex); if (rc <= 0) hwrng_msleep(rng, 10000); put_rng(rng); if (rc <= 0) continue; /* If we cannot credit at least one bit of entropy, * keep track of the remainder for the next iteration */ entropy = rc * quality * 8 + entropy_credit; if ((entropy >> 10) == 0) entropy_credit = entropy; /* Outside lock, sure, but y'know: randomness. */ add_hwgenerator_randomness((void *)rng_fillbuf, rc, entropy >> 10, true); } hwrng_fill = NULL; return 0; } int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) goto out; mutex_lock(&rng_mutex); /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { if (strcmp(tmp->name, rng->name) == 0) goto out_unlock; } list_add_tail(&rng->list, &rng_list); init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); init_completion(&rng->dying); if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* * Set new rng as current as the new rng source * provides better entropy quality and was not * chosen by userspace. */ err = set_current_rng(rng); if (err) goto out_unlock; /* to use current_rng in add_early_randomness() we need * to take a ref */ is_new_current = true; kref_get(&rng->ref); } mutex_unlock(&rng_mutex); if (is_new_current || !rng->init) { /* * Use a new device's input to add some randomness to * the system. If this rng device isn't going to be * used right away, its init function hasn't been * called yet by set_current_rng(); so only use the * randomness from devices that don't need an init callback */ add_early_randomness(rng); } if (is_new_current) put_rng(rng); return 0; out_unlock: mutex_unlock(&rng_mutex); out: return err; } EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { struct hwrng *old_rng, *new_rng; int err; mutex_lock(&rng_mutex); old_rng = current_rng; list_del(&rng->list); complete_all(&rng->dying); if (current_rng == rng) { err = enable_best_rng(); if (err) { drop_current_rng(); cur_rng_set_by_user = 0; } } new_rng = get_current_rng_nolock(); if (list_empty(&rng_list)) { mutex_unlock(&rng_mutex); if (hwrng_fill) kthread_stop(hwrng_fill); } else mutex_unlock(&rng_mutex); if (new_rng) { if (old_rng != new_rng) add_early_randomness(new_rng); put_rng(new_rng); } wait_for_completion(&rng->cleanup_done); } EXPORT_SYMBOL_GPL(hwrng_unregister); static void devm_hwrng_release(struct device *dev, void *res) { hwrng_unregister(*(struct hwrng **)res); } static int devm_hwrng_match(struct device *dev, void *res, void *data) { struct hwrng **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } int devm_hwrng_register(struct device *dev, struct hwrng *rng) { struct hwrng **ptr; int error; ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; error = hwrng_register(rng); if (error) { devres_free(ptr); return error; } *ptr = rng; devres_add(dev, ptr); return 0; } EXPORT_SYMBOL_GPL(devm_hwrng_register); void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) { devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); } EXPORT_SYMBOL_GPL(devm_hwrng_unregister); long hwrng_msleep(struct hwrng *rng, unsigned int msecs) { unsigned long timeout = msecs_to_jiffies(msecs) + 1; return wait_for_completion_interruptible_timeout(&rng->dying, timeout); } EXPORT_SYMBOL_GPL(hwrng_msleep); static int __init hwrng_modinit(void) { int ret; /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_buffer) return -ENOMEM; rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_fillbuf) { kfree(rng_buffer); return -ENOMEM; } ret = register_miscdev(); if (ret) { kfree(rng_fillbuf); kfree(rng_buffer); } return ret; } static void __exit hwrng_modexit(void) { mutex_lock(&rng_mutex); BUG_ON(current_rng); kfree(rng_buffer); kfree(rng_fillbuf); mutex_unlock(&rng_mutex); unregister_miscdev(); } fs_initcall(hwrng_modinit); /* depends on misc_register() */ module_exit(hwrng_modexit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/core.c
/* * RNG driver for AMD Geode RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <[email protected]> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <[email protected]> * Copyright 2000,2001 Philipp Rumpf <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #define PFX KBUILD_MODNAME ": " #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd_geode_priv { struct pci_dev *pcidev; void __iomem *membase; }; static int geode_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *mem = (void __iomem *)rng->priv; *data = readl(mem + GEODE_RNG_DATA_REG); return 4; } static int geode_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = !!(readl(mem + GEODE_RNG_STATUS_REG)); if (data || !wait) break; udelay(10); } return data; } static struct hwrng geode_rng = { .name = "geode", .data_present = geode_rng_data_present, .data_read = geode_rng_data_read, }; static int __init geode_rng_init(void) { int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; struct amd_geode_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); if (ent) goto found; } /* Device not found. */ return err; found: priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto put_dev; } rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) goto free_priv; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) goto free_priv; geode_rng.priv = (unsigned long)priv; priv->membase = mem; priv->pcidev = pdev; pr_info("AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); if (err) { pr_err(PFX "RNG registering failed (%d)\n", err); goto err_unmap; } return err; err_unmap: iounmap(mem); free_priv: kfree(priv); put_dev: pci_dev_put(pdev); return err; } static void __exit geode_rng_exit(void) { struct amd_geode_priv *priv; priv = (struct amd_geode_priv *)geode_rng.priv; hwrng_unregister(&geode_rng); iounmap(priv->membase); pci_dev_put(priv->pcidev); kfree(priv); } module_init(geode_rng_init); module_exit(geode_rng_exit); MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/geode-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Ingenic True Random Number Generator driver * Copyright (c) 2019 漆鹏振 (Qi Pengzhen) <[email protected]> * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <[email protected]> */ #include <linux/clk.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> /* DTRNG register offsets */ #define TRNG_REG_CFG_OFFSET 0x00 #define TRNG_REG_RANDOMNUM_OFFSET 0x04 #define TRNG_REG_STATUS_OFFSET 0x08 /* bits within the CFG register */ #define CFG_GEN_EN BIT(0) /* bits within the STATUS register */ #define STATUS_RANDOM_RDY BIT(0) struct ingenic_trng { void __iomem *base; struct hwrng rng; }; static int ingenic_trng_init(struct hwrng *rng) { struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng); unsigned int ctrl; ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET); ctrl |= CFG_GEN_EN; writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET); return 0; } static void ingenic_trng_cleanup(struct hwrng *rng) { struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng); unsigned int ctrl; ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET); ctrl &= ~CFG_GEN_EN; writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET); } static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng); u32 *data = buf; u32 status; int ret; ret = readl_poll_timeout(trng->base + TRNG_REG_STATUS_OFFSET, status, status & STATUS_RANDOM_RDY, 10, 1000); if (ret == -ETIMEDOUT) { pr_err("%s: Wait for DTRNG data ready timeout\n", __func__); return ret; } *data = readl(trng->base + TRNG_REG_RANDOMNUM_OFFSET); return 4; } static int ingenic_trng_probe(struct platform_device *pdev) { struct ingenic_trng *trng; struct clk *clk; int ret; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; trng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->base)) return dev_err_probe(&pdev->dev, PTR_ERR(trng->base), "%s: Failed to map DTRNG registers\n", __func__); clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(&pdev->dev, PTR_ERR(clk), "%s: Cannot get and enable DTRNG clock\n", __func__); trng->rng.name = pdev->name; trng->rng.init = ingenic_trng_init; trng->rng.cleanup = ingenic_trng_cleanup; trng->rng.read = ingenic_trng_read; ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n"); platform_set_drvdata(pdev, trng); dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n"); return 0; } static const struct of_device_id ingenic_trng_of_match[] = { { .compatible = "ingenic,x1830-dtrng" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ingenic_trng_of_match); static struct platform_driver ingenic_trng_driver = { .probe = ingenic_trng_probe, .driver = { .name = "ingenic-trng", .of_match_table = ingenic_trng_of_match, }, }; module_platform_driver(ingenic_trng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) <[email protected]>"); MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <[email protected]>"); MODULE_DESCRIPTION("Ingenic True Random Number Generator driver");
linux-master
drivers/char/hw_random/ingenic-trng.c
// SPDX-License-Identifier: GPL-2.0-only /* n2-drv.c: Niagara-2 RNG driver. * * Copyright (C) 2008, 2011 David S. Miller <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/preempt.h> #include <linux/hw_random.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/hypervisor.h> #include "n2rng.h" #define DRV_MODULE_NAME "n2rng" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.3" #define DRV_MODULE_RELDATE "Jan 7, 2017" static char version[] = DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_DESCRIPTION("Niagara2 RNG driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); /* The Niagara2 RNG provides a 64-bit read-only random number * register, plus a control register. Access to the RNG is * virtualized through the hypervisor so that both guests and control * nodes can access the device. * * The entropy source consists of raw entropy sources, each * constructed from a voltage controlled oscillator whose phase is * jittered by thermal noise sources. * * The oscillator in each of the three raw entropy sources run at * different frequencies. Normally, all three generator outputs are * gathered, xored together, and fed into a CRC circuit, the output of * which is the 64-bit read-only register. * * Some time is necessary for all the necessary entropy to build up * such that a full 64-bits of entropy are available in the register. * In normal operating mode (RNG_CTL_LFSR is set), the chip implements * an interlock which blocks register reads until sufficient entropy * is available. * * A control register is provided for adjusting various aspects of RNG * operation, and to enable diagnostic modes. Each of the three raw * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also * provided are fields for controlling the minimum time in cycles * between read accesses to the register (RNG_CTL_WAIT, this controls * the interlock described in the previous paragraph). * * The standard setting is to have the mode bit (RNG_CTL_LFSR) set, * all three entropy sources enabled, and the interlock time set * appropriately. * * The CRC polynomial used by the chip is: * * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 + * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 + * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1 * * The RNG_CTL_VCO value of each noise cell must be programmed * separately. This is why 4 control register values must be provided * to the hypervisor. During a write, the hypervisor writes them all, * one at a time, to the actual RNG_CTL register. The first three * values are used to setup the desired RNG_CTL_VCO for each entropy * source, for example: * * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1 * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2 * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3 * * And then the fourth value sets the final chip state and enables * desired. */ static int n2rng_hv_err_trans(unsigned long hv_err) { switch (hv_err) { case HV_EOK: return 0; case HV_EWOULDBLOCK: return -EAGAIN; case HV_ENOACCESS: return -EPERM; case HV_EIO: return -EIO; case HV_EBUSY: return -EBUSY; case HV_EBADALIGN: case HV_ENORADDR: return -EFAULT; default: return -EINVAL; } } static unsigned long n2rng_generic_read_control_v2(unsigned long ra, unsigned long unit) { unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status; int block = 0, busy = 0; while (1) { hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state, &ticks, &watchdog_delta, &watchdog_status); if (hv_err == HV_EOK) break; if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) break; udelay(1); } else if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) break; __delay(ticks); } else break; } return hv_err; } /* In multi-socket situations, the hypervisor might need to * queue up the RNG control register write if it's for a unit * that is on a cpu socket other than the one we are executing on. * * We poll here waiting for a successful read of that control * register to make sure the write has been actually performed. */ static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit) { unsigned long ra = __pa(&np->scratch_control[0]); return n2rng_generic_read_control_v2(ra, unit); } static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit, unsigned long state, unsigned long control_ra, unsigned long watchdog_timeout, unsigned long *ticks) { unsigned long hv_err; if (np->hvapi_major == 1) { hv_err = sun4v_rng_ctl_write_v1(control_ra, state, watchdog_timeout, ticks); } else { hv_err = sun4v_rng_ctl_write_v2(control_ra, state, watchdog_timeout, unit); if (hv_err == HV_EOK) hv_err = n2rng_control_settle_v2(np, unit); *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; } return hv_err; } static int n2rng_generic_read_data(unsigned long data_ra) { unsigned long ticks, hv_err; int block = 0, hcheck = 0; while (1) { hv_err = sun4v_rng_data_read(data_ra, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { if (++hcheck >= N2RNG_HCHECK_LIMIT) return -EIO; udelay(10000); } else return -ENODEV; } } static unsigned long n2rng_read_diag_data_one(struct n2rng *np, unsigned long unit, unsigned long data_ra, unsigned long data_len, unsigned long *ticks) { unsigned long hv_err; if (np->hvapi_major == 1) { hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks); } else { hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len, unit, ticks); if (!*ticks) *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; } return hv_err; } static int n2rng_generic_read_diag_data(struct n2rng *np, unsigned long unit, unsigned long data_ra, unsigned long data_len) { unsigned long ticks, hv_err; int block = 0; while (1) { hv_err = n2rng_read_diag_data_one(np, unit, data_ra, data_len, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { return -EIO; } else return -ENODEV; } } static int n2rng_generic_write_control(struct n2rng *np, unsigned long control_ra, unsigned long unit, unsigned long state) { unsigned long hv_err, ticks; int block = 0, busy = 0; while (1) { hv_err = n2rng_write_ctl_one(np, unit, state, control_ra, np->wd_timeo, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) return -EBUSY; udelay(1); } else return -ENODEV; } } /* Just try to see if we can successfully access the control register * of the RNG on the domain on which we are currently executing. */ static int n2rng_try_read_ctl(struct n2rng *np) { unsigned long hv_err; unsigned long x; if (np->hvapi_major == 1) { hv_err = sun4v_rng_get_diag_ctl(); } else { /* We purposefully give invalid arguments, HV_NOACCESS * is higher priority than the errors we'd get from * these other cases, and that's the error we are * truly interested in. */ hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x); switch (hv_err) { case HV_EWOULDBLOCK: case HV_ENOACCESS: break; default: hv_err = HV_EOK; break; } } return n2rng_hv_err_trans(hv_err); } static u64 n2rng_control_default(struct n2rng *np, int ctl) { u64 val = 0; if (np->data->chip_version == 1) { val = ((2 << RNG_v1_CTL_ASEL_SHIFT) | (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v1_CTL_WAIT_SHIFT) | RNG_CTL_LFSR); switch (ctl) { case 0: val |= (1 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES1; break; case 1: val |= (2 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES2; break; case 2: val |= (3 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES3; break; case 3: val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3; break; default: break; } } else { val = ((2 << RNG_v2_CTL_ASEL_SHIFT) | (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v2_CTL_WAIT_SHIFT) | RNG_CTL_LFSR); switch (ctl) { case 0: val |= (1 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES1; break; case 1: val |= (2 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES2; break; case 2: val |= (3 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES3; break; case 3: val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3; break; default: break; } } return val; } static void n2rng_control_swstate_init(struct n2rng *np) { int i; np->flags |= N2RNG_FLAG_CONTROL; np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT; np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT; np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT; for (i = 0; i < np->num_units; i++) { struct n2rng_unit *up = &np->units[i]; up->control[0] = n2rng_control_default(np, 0); up->control[1] = n2rng_control_default(np, 1); up->control[2] = n2rng_control_default(np, 2); up->control[3] = n2rng_control_default(np, 3); } np->hv_state = HV_RNG_STATE_UNCONFIGURED; } static int n2rng_grab_diag_control(struct n2rng *np) { int i, busy_count, err = -ENODEV; busy_count = 0; for (i = 0; i < 100; i++) { err = n2rng_try_read_ctl(np); if (err != -EAGAIN) break; if (++busy_count > 100) { dev_err(&np->op->dev, "Grab diag control timeout.\n"); return -ENODEV; } udelay(1); } return err; } static int n2rng_init_control(struct n2rng *np) { int err = n2rng_grab_diag_control(np); /* Not in the control domain, that's OK we are only a consumer * of the RNG data, we don't setup and program it. */ if (err == -EPERM) return 0; if (err) return err; n2rng_control_swstate_init(np); return 0; } static int n2rng_data_read(struct hwrng *rng, u32 *data) { struct n2rng *np = (struct n2rng *) rng->priv; unsigned long ra = __pa(&np->test_data); int len; if (!(np->flags & N2RNG_FLAG_READY)) { len = 0; } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) { np->flags &= ~N2RNG_FLAG_BUFFER_VALID; *data = np->buffer; len = 4; } else { int err = n2rng_generic_read_data(ra); if (!err) { np->flags |= N2RNG_FLAG_BUFFER_VALID; np->buffer = np->test_data >> 32; *data = np->test_data & 0xffffffff; len = 4; } else { dev_err(&np->op->dev, "RNG error, retesting\n"); np->flags &= ~N2RNG_FLAG_READY; if (!(np->flags & N2RNG_FLAG_SHUTDOWN)) schedule_delayed_work(&np->work, 0); len = 0; } } return len; } /* On a guest node, just make sure we can read random data properly. * If a control node reboots or reloads it's n2rng driver, this won't * work during that time. So we have to keep probing until the device * becomes usable. */ static int n2rng_guest_check(struct n2rng *np) { unsigned long ra = __pa(&np->test_data); return n2rng_generic_read_data(ra); } static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit, u64 *pre_control, u64 pre_state, u64 *buffer, unsigned long buf_len, u64 *post_control, u64 post_state) { unsigned long post_ctl_ra = __pa(post_control); unsigned long pre_ctl_ra = __pa(pre_control); unsigned long buffer_ra = __pa(buffer); int err; err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state); if (err) return err; err = n2rng_generic_read_diag_data(np, unit, buffer_ra, buf_len); (void) n2rng_generic_write_control(np, post_ctl_ra, unit, post_state); return err; } static u64 advance_polynomial(u64 poly, u64 val, int count) { int i; for (i = 0; i < count; i++) { int highbit_set = ((s64)val < 0); val <<= 1; if (highbit_set) val ^= poly; } return val; } static int n2rng_test_buffer_find(struct n2rng *np, u64 val) { int i, count = 0; /* Purposefully skip over the first word. */ for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) { if (np->test_buffer[i] == val) count++; } return count; } static void n2rng_dump_test_buffer(struct n2rng *np) { int i; for (i = 0; i < SELFTEST_BUFFER_WORDS; i++) dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n", i, np->test_buffer[i]); } static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit) { u64 val; int err, matches, limit; switch (np->data->id) { case N2_n2_rng: case N2_vf_rng: case N2_kt_rng: case N2_m4_rng: /* yes, m4 uses the old value */ val = RNG_v1_SELFTEST_VAL; break; default: val = RNG_v2_SELFTEST_VAL; break; } matches = 0; for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) { matches += n2rng_test_buffer_find(np, val); if (matches >= SELFTEST_MATCH_GOAL) break; val = advance_polynomial(SELFTEST_POLY, val, 1); } err = 0; if (limit >= SELFTEST_LOOPS_MAX) { err = -ENODEV; dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit); n2rng_dump_test_buffer(np); } else dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit); return err; } static int n2rng_control_selftest(struct n2rng *np, unsigned long unit) { int err; u64 base, base3; switch (np->data->id) { case N2_n2_rng: case N2_vf_rng: case N2_kt_rng: base = RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT; base3 = base | RNG_CTL_LFSR | ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v1_CTL_WAIT_SHIFT); break; case N2_m4_rng: base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT; base3 = base | RNG_CTL_LFSR | ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v2_CTL_WAIT_SHIFT); break; default: base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT; base3 = base | RNG_CTL_LFSR | (RNG_v2_SELFTEST_TICKS << RNG_v2_CTL_WAIT_SHIFT); break; } np->test_control[0] = base; np->test_control[1] = base; np->test_control[2] = base; np->test_control[3] = base3; err = n2rng_entropy_diag_read(np, unit, np->test_control, HV_RNG_STATE_HEALTHCHECK, np->test_buffer, sizeof(np->test_buffer), &np->units[unit].control[0], np->hv_state); if (err) return err; return n2rng_check_selftest_buffer(np, unit); } static int n2rng_control_check(struct n2rng *np) { int i; for (i = 0; i < np->num_units; i++) { int err = n2rng_control_selftest(np, i); if (err) return err; } return 0; } /* The sanity checks passed, install the final configuration into the * chip, it's ready to use. */ static int n2rng_control_configure_units(struct n2rng *np) { int unit, err; err = 0; for (unit = 0; unit < np->num_units; unit++) { struct n2rng_unit *up = &np->units[unit]; unsigned long ctl_ra = __pa(&up->control[0]); int esrc; u64 base, shift; if (np->data->chip_version == 1) { base = ((np->accum_cycles << RNG_v1_CTL_WAIT_SHIFT) | (RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT) | RNG_CTL_LFSR); shift = RNG_v1_CTL_VCO_SHIFT; } else { base = ((np->accum_cycles << RNG_v2_CTL_WAIT_SHIFT) | (RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT) | RNG_CTL_LFSR); shift = RNG_v2_CTL_VCO_SHIFT; } /* XXX This isn't the best. We should fetch a bunch * XXX of words using each entropy source combined XXX * with each VCO setting, and see which combinations * XXX give the best random data. */ for (esrc = 0; esrc < 3; esrc++) up->control[esrc] = base | (esrc << shift) | (RNG_CTL_ES1 << esrc); up->control[3] = base | (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3); err = n2rng_generic_write_control(np, ctl_ra, unit, HV_RNG_STATE_CONFIGURED); if (err) break; } return err; } static void n2rng_work(struct work_struct *work) { struct n2rng *np = container_of(work, struct n2rng, work.work); int err = 0; static int retries = 4; if (!(np->flags & N2RNG_FLAG_CONTROL)) { err = n2rng_guest_check(np); } else { preempt_disable(); err = n2rng_control_check(np); preempt_enable(); if (!err) err = n2rng_control_configure_units(np); } if (!err) { np->flags |= N2RNG_FLAG_READY; dev_info(&np->op->dev, "RNG ready\n"); } if (--retries == 0) dev_err(&np->op->dev, "Self-test retries failed, RNG not ready\n"); else if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN)) schedule_delayed_work(&np->work, HZ * 2); } static void n2rng_driver_version(void) { static int n2rng_version_printed; if (n2rng_version_printed++ == 0) pr_info("%s", version); } static const struct of_device_id n2rng_match[]; static int n2rng_probe(struct platform_device *op) { const struct of_device_id *match; int err = -ENOMEM; struct n2rng *np; match = of_match_device(n2rng_match, &op->dev); if (!match) return -EINVAL; n2rng_driver_version(); np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL); if (!np) goto out; np->op = op; np->data = (struct n2rng_template *)match->data; INIT_DELAYED_WORK(&np->work, n2rng_work); if (np->data->multi_capable) np->flags |= N2RNG_FLAG_MULTI; err = -ENODEV; np->hvapi_major = 2; if (sun4v_hvapi_register(HV_GRP_RNG, np->hvapi_major, &np->hvapi_minor)) { np->hvapi_major = 1; if (sun4v_hvapi_register(HV_GRP_RNG, np->hvapi_major, &np->hvapi_minor)) { dev_err(&op->dev, "Cannot register suitable " "HVAPI version.\n"); goto out; } } if (np->flags & N2RNG_FLAG_MULTI) { if (np->hvapi_major < 2) { dev_err(&op->dev, "multi-unit-capable RNG requires " "HVAPI major version 2 or later, got %lu\n", np->hvapi_major); goto out_hvapi_unregister; } np->num_units = of_getintprop_default(op->dev.of_node, "rng-#units", 0); if (!np->num_units) { dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); goto out_hvapi_unregister; } } else { np->num_units = 1; } dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n", np->hvapi_major, np->hvapi_minor); np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units), GFP_KERNEL); err = -ENOMEM; if (!np->units) goto out_hvapi_unregister; err = n2rng_init_control(np); if (err) goto out_hvapi_unregister; dev_info(&op->dev, "Found %s RNG, units: %d\n", ((np->flags & N2RNG_FLAG_MULTI) ? "multi-unit-capable" : "single-unit"), np->num_units); np->hwrng.name = DRV_MODULE_NAME; np->hwrng.data_read = n2rng_data_read; np->hwrng.priv = (unsigned long) np; err = devm_hwrng_register(&op->dev, &np->hwrng); if (err) goto out_hvapi_unregister; platform_set_drvdata(op, np); schedule_delayed_work(&np->work, 0); return 0; out_hvapi_unregister: sun4v_hvapi_unregister(HV_GRP_RNG); out: return err; } static int n2rng_remove(struct platform_device *op) { struct n2rng *np = platform_get_drvdata(op); np->flags |= N2RNG_FLAG_SHUTDOWN; cancel_delayed_work_sync(&np->work); sun4v_hvapi_unregister(HV_GRP_RNG); return 0; } static struct n2rng_template n2_template = { .id = N2_n2_rng, .multi_capable = 0, .chip_version = 1, }; static struct n2rng_template vf_template = { .id = N2_vf_rng, .multi_capable = 1, .chip_version = 1, }; static struct n2rng_template kt_template = { .id = N2_kt_rng, .multi_capable = 1, .chip_version = 1, }; static struct n2rng_template m4_template = { .id = N2_m4_rng, .multi_capable = 1, .chip_version = 2, }; static struct n2rng_template m7_template = { .id = N2_m7_rng, .multi_capable = 1, .chip_version = 2, }; static const struct of_device_id n2rng_match[] = { { .name = "random-number-generator", .compatible = "SUNW,n2-rng", .data = &n2_template, }, { .name = "random-number-generator", .compatible = "SUNW,vf-rng", .data = &vf_template, }, { .name = "random-number-generator", .compatible = "SUNW,kt-rng", .data = &kt_template, }, { .name = "random-number-generator", .compatible = "ORCL,m4-rng", .data = &m4_template, }, { .name = "random-number-generator", .compatible = "ORCL,m7-rng", .data = &m7_template, }, {}, }; MODULE_DEVICE_TABLE(of, n2rng_match); static struct platform_driver n2rng_driver = { .driver = { .name = "n2rng", .of_match_table = n2rng_match, }, .probe = n2rng_probe, .remove = n2rng_remove, }; module_platform_driver(n2rng_driver);
linux-master
drivers/char/hw_random/n2-drv.c
// SPDX-License-Identifier: GPL-2.0-only /* * PIC32 RNG driver * * Joshua Henderson <[email protected]> * Copyright (C) 2016 Microchip Technology Inc. All rights reserved. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #define RNGCON 0x04 #define TRNGEN BIT(8) #define TRNGMOD BIT(11) #define RNGSEED1 0x18 #define RNGSEED2 0x1C #define RNGRCNT 0x20 #define RCNT_MASK 0x7F struct pic32_rng { void __iomem *base; struct hwrng rng; }; /* * The TRNG can generate up to 24Mbps. This is a timeout that should be safe * enough given the instructions in the loop and that the TRNG may not always * be at maximum rate. */ #define RNG_TIMEOUT 500 static int pic32_rng_init(struct hwrng *rng) { struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); /* enable TRNG in enhanced mode */ writel(TRNGEN | TRNGMOD, priv->base + RNGCON); return 0; } static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); u64 *data = buf; u32 t; unsigned int timeout = RNG_TIMEOUT; do { t = readl(priv->base + RNGRCNT) & RCNT_MASK; if (t == 64) { /* TRNG value comes through the seed registers */ *data = ((u64)readl(priv->base + RNGSEED2) << 32) + readl(priv->base + RNGSEED1); return 8; } } while (wait && --timeout); return -EIO; } static void pic32_rng_cleanup(struct hwrng *rng) { struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); writel(0, priv->base + RNGCON); } static int pic32_rng_probe(struct platform_device *pdev) { struct pic32_rng *priv; struct clk *clk; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); priv->rng.name = pdev->name; priv->rng.init = pic32_rng_init; priv->rng.read = pic32_rng_read; priv->rng.cleanup = pic32_rng_cleanup; return devm_hwrng_register(&pdev->dev, &priv->rng); } static const struct of_device_id pic32_rng_of_match[] __maybe_unused = { { .compatible = "microchip,pic32mzda-rng", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pic32_rng_of_match); static struct platform_driver pic32_rng_driver = { .probe = pic32_rng_probe, .driver = { .name = "pic32-rng", .of_match_table = pic32_rng_of_match, }, }; module_platform_driver(pic32_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joshua Henderson <[email protected]>"); MODULE_DESCRIPTION("Microchip PIC32 RNG Driver");
linux-master
drivers/char/hw_random/pic32-rng.c
// SPDX-License-Identifier: GPL-2.0-only /* * s390 TRNG device driver * * Driver for the TRNG (true random number generation) command * available via CPACF extension MSA 7 on the s390 arch. * Copyright IBM Corp. 2017 * Author(s): Harald Freudenberger <[email protected]> */ #define KMSG_COMPONENT "trng" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <linux/atomic.h> #include <linux/random.h> #include <linux/sched/signal.h> #include <asm/debug.h> #include <asm/cpacf.h> #include <asm/archrandom.h> MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 CPACF TRNG device driver"); /* trng related debug feature things */ static debug_info_t *debug_info; #define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__) #define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__) #define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__) #define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__) /* trng helpers */ static atomic64_t trng_dev_counter = ATOMIC64_INIT(0); static atomic64_t trng_hwrng_counter = ATOMIC64_INIT(0); /* file io functions */ static int trng_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } static ssize_t trng_read(struct file *file, char __user *ubuf, size_t nbytes, loff_t *ppos) { u8 buf[32]; u8 *p = buf; unsigned int n; ssize_t ret = 0; /* * use buf for requests <= sizeof(buf), * otherwise allocate one page and fetch * pagewise. */ if (nbytes > sizeof(buf)) { p = (u8 *) __get_free_page(GFP_KERNEL); if (!p) return -ENOMEM; } while (nbytes) { if (need_resched()) { if (signal_pending(current)) { if (ret == 0) ret = -ERESTARTSYS; break; } schedule(); } n = nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes; cpacf_trng(NULL, 0, p, n); atomic64_add(n, &trng_dev_counter); if (copy_to_user(ubuf, p, n)) { ret = -EFAULT; break; } nbytes -= n; ubuf += n; ret += n; } if (p != buf) free_page((unsigned long) p); DEBUG_DBG("trng_read()=%zd\n", ret); return ret; } /* sysfs */ static ssize_t trng_counter_show(struct device *dev, struct device_attribute *attr, char *buf) { u64 dev_counter = atomic64_read(&trng_dev_counter); u64 hwrng_counter = atomic64_read(&trng_hwrng_counter); u64 arch_counter = atomic64_read(&s390_arch_random_counter); return sysfs_emit(buf, "trng: %llu\n" "hwrng: %llu\n" "arch: %llu\n" "total: %llu\n", dev_counter, hwrng_counter, arch_counter, dev_counter + hwrng_counter + arch_counter); } static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL); static struct attribute *trng_dev_attrs[] = { &dev_attr_byte_counter.attr, NULL }; static const struct attribute_group trng_dev_attr_group = { .attrs = trng_dev_attrs }; static const struct attribute_group *trng_dev_attr_groups[] = { &trng_dev_attr_group, NULL }; static const struct file_operations trng_fops = { .owner = THIS_MODULE, .open = &trng_open, .release = NULL, .read = &trng_read, .llseek = noop_llseek, }; static struct miscdevice trng_dev = { .name = "trng", .minor = MISC_DYNAMIC_MINOR, .mode = 0444, .fops = &trng_fops, .groups = trng_dev_attr_groups, }; /* hwrng_register */ static inline void _trng_hwrng_read(u8 *buf, size_t len) { cpacf_trng(NULL, 0, buf, len); atomic64_add(len, &trng_hwrng_counter); } static int trng_hwrng_data_read(struct hwrng *rng, u32 *data) { size_t len = sizeof(*data); _trng_hwrng_read((u8 *) data, len); DEBUG_DBG("trng_hwrng_data_read()=%zu\n", len); return len; } static int trng_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { size_t len = max <= PAGE_SIZE ? max : PAGE_SIZE; _trng_hwrng_read((u8 *) data, len); DEBUG_DBG("trng_hwrng_read()=%zu\n", len); return len; } /* * hwrng register struct * The trng is supposed to have 100% entropy, and thus we register with a very * high quality value. If we ever have a better driver in the future, we should * change this value again when we merge this driver. */ static struct hwrng trng_hwrng_dev = { .name = "s390-trng", .data_read = trng_hwrng_data_read, .read = trng_hwrng_read, }; /* init and exit */ static void __init trng_debug_init(void) { debug_info = debug_register("trng", 1, 1, 4 * sizeof(long)); debug_register_view(debug_info, &debug_sprintf_view); debug_set_level(debug_info, 3); } static void trng_debug_exit(void) { debug_unregister(debug_info); } static int __init trng_init(void) { int ret; trng_debug_init(); /* check if subfunction CPACF_PRNO_TRNG is available */ if (!cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) { DEBUG_INFO("trng_init CPACF_PRNO_TRNG not available\n"); ret = -ENODEV; goto out_dbg; } ret = misc_register(&trng_dev); if (ret) { DEBUG_WARN("trng_init misc_register() failed rc=%d\n", ret); goto out_dbg; } ret = hwrng_register(&trng_hwrng_dev); if (ret) { DEBUG_WARN("trng_init hwrng_register() failed rc=%d\n", ret); goto out_misc; } DEBUG_DBG("trng_init successful\n"); return 0; out_misc: misc_deregister(&trng_dev); out_dbg: trng_debug_exit(); return ret; } static void __exit trng_exit(void) { hwrng_unregister(&trng_hwrng_dev); misc_deregister(&trng_dev); trng_debug_exit(); } module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init); module_exit(trng_exit);
linux-master
drivers/char/hw_random/s390-trng.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <[email protected]> * Copyright (C) 2014 Amlogic, Inc. */ #include <linux/err.h> #include <linux/module.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/of.h> #include <linux/clk.h> #define RNG_DATA 0x00 struct meson_rng_data { void __iomem *base; struct hwrng rng; }; static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct meson_rng_data *data = container_of(rng, struct meson_rng_data, rng); *(u32 *)buf = readl_relaxed(data->base + RNG_DATA); return sizeof(u32); } static int meson_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct meson_rng_data *data; struct clk *core_clk; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) return PTR_ERR(data->base); core_clk = devm_clk_get_optional_enabled(dev, "core"); if (IS_ERR(core_clk)) return dev_err_probe(dev, PTR_ERR(core_clk), "Failed to get core clock\n"); data->rng.name = pdev->name; data->rng.read = meson_rng_read; return devm_hwrng_register(dev, &data->rng); } static const struct of_device_id meson_rng_of_match[] = { { .compatible = "amlogic,meson-rng", }, {}, }; MODULE_DEVICE_TABLE(of, meson_rng_of_match); static struct platform_driver meson_rng_driver = { .probe = meson_rng_probe, .driver = { .name = "meson-rng", .of_match_table = meson_rng_of_match, }, }; module_platform_driver(meson_rng_driver); MODULE_DESCRIPTION("Meson H/W Random Number Generator driver"); MODULE_AUTHOR("Lawrence Mok <[email protected]>"); MODULE_AUTHOR("Neil Armstrong <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/char/hw_random/meson-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * drivers/char/hw_random/ixp4xx-rng.c * * RNG driver for Intel IXP4xx family of NPUs * * Author: Deepak Saxena <[email protected]> * * Copyright 2005 (c) MontaVista Software, Inc. * * Fixes by Michael Buesch */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/hw_random.h> #include <linux/of.h> #include <linux/soc/ixp4xx/cpu.h> #include <asm/io.h> static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer) { void __iomem * rng_base = (void __iomem *)rng->priv; *buffer = __raw_readl(rng_base); return 4; } static struct hwrng ixp4xx_rng_ops = { .name = "ixp4xx", .data_read = ixp4xx_rng_data_read, }; static int ixp4xx_rng_probe(struct platform_device *pdev) { void __iomem * rng_base; struct device *dev = &pdev->dev; if (!cpu_is_ixp46x()) /* includes IXP455 */ return -ENOSYS; rng_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng_base)) return PTR_ERR(rng_base); ixp4xx_rng_ops.priv = (unsigned long)rng_base; return devm_hwrng_register(dev, &ixp4xx_rng_ops); } static const struct of_device_id ixp4xx_rng_of_match[] = { { .compatible = "intel,ixp46x-rng", }, {}, }; MODULE_DEVICE_TABLE(of, ixp4xx_rng_of_match); static struct platform_driver ixp4xx_rng_driver = { .driver = { .name = "ixp4xx-hwrandom", .of_match_table = ixp4xx_rng_of_match, }, .probe = ixp4xx_rng_probe, }; module_platform_driver(ixp4xx_rng_driver); MODULE_AUTHOR("Deepak Saxena <[email protected]>"); MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/ixp4xx-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Nomadik RNG support * Copyright 2009 Alessandro Rubini */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { void __iomem *base = (void __iomem *)rng->priv; /* * The register is 32 bits and gives 16 random bits (low half). * A subsequent read will delay the core for 400ns, so we just read * once and accept the very unlikely very small delay, even if wait==0. */ *(u16 *)data = __raw_readl(base + 8) & 0xffff; return 2; } /* we have at most one RNG per machine, granted */ static struct hwrng nmk_rng = { .name = "nomadik", .read = nmk_rng_read, }; static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) { struct clk *rng_clk; void __iomem *base; int ret; rng_clk = devm_clk_get_enabled(&dev->dev, NULL); if (IS_ERR(rng_clk)) return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n"); ret = amba_request_regions(dev, dev->dev.init_name); if (ret) return ret; ret = -ENOMEM; base = devm_ioremap(&dev->dev, dev->res.start, resource_size(&dev->res)); if (!base) goto out_release; nmk_rng.priv = (unsigned long)base; ret = devm_hwrng_register(&dev->dev, &nmk_rng); if (ret) goto out_release; return 0; out_release: amba_release_regions(dev); return ret; } static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); } static const struct amba_id nmk_rng_ids[] = { { .id = 0x000805e1, .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ }, {0, 0}, }; MODULE_DEVICE_TABLE(amba, nmk_rng_ids); static struct amba_driver nmk_rng_driver = { .drv = { .owner = THIS_MODULE, .name = "rng", }, .probe = nmk_rng_probe, .remove = nmk_rng_remove, .id_table = nmk_rng_ids, }; module_amba_driver(nmk_rng_driver); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/nomadik-rng.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 Silex Insight #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #define BA431_RESET_DELAY 1 /* usec */ #define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */ #define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */ #define BA431_READ_RETRY_INTERVAL 1 /* usec */ #define BA431_REG_CTRL 0x00 #define BA431_REG_FIFO_LEVEL 0x04 #define BA431_REG_STATUS 0x30 #define BA431_REG_FIFODATA 0x80 #define BA431_CTRL_ENABLE BIT(0) #define BA431_CTRL_SOFTRESET BIT(8) #define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3)) #define BA431_STATUS_STATE_OFFSET 1 enum ba431_state { BA431_STATE_RESET, BA431_STATE_STARTUP, BA431_STATE_FIFOFULLON, BA431_STATE_FIFOFULLOFF, BA431_STATE_RUNNING, BA431_STATE_ERROR }; struct ba431_trng { struct device *dev; void __iomem *base; struct hwrng rng; atomic_t reset_pending; struct work_struct reset_work; }; static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg) { return ioread32(ba431->base + reg); } static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg, u32 val) { iowrite32(val, ba431->base + reg); } static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431) { u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS); return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET; } static int ba431_trng_is_in_error(struct ba431_trng *ba431) { enum ba431_state state = ba431_trng_get_state(ba431); if ((state < BA431_STATE_STARTUP) || (state >= BA431_STATE_ERROR)) return 1; return 0; } static int ba431_trng_reset(struct ba431_trng *ba431) { int ret; /* Disable interrupts, random generation and enable the softreset */ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET); udelay(BA431_RESET_DELAY); ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE); /* Wait until the state changed */ if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret, BA431_RESET_READ_STATUS_INTERVAL, BA431_RESET_READ_STATUS_TIMEOUT)) { dev_err(ba431->dev, "reset failed (state: %d)\n", ba431_trng_get_state(ba431)); return -ETIMEDOUT; } dev_info(ba431->dev, "reset done\n"); return 0; } static void ba431_trng_reset_work(struct work_struct *work) { struct ba431_trng *ba431 = container_of(work, struct ba431_trng, reset_work); ba431_trng_reset(ba431); atomic_set(&ba431->reset_pending, 0); } static void ba431_trng_schedule_reset(struct ba431_trng *ba431) { if (atomic_cmpxchg(&ba431->reset_pending, 0, 1)) return; schedule_work(&ba431->reset_work); } static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); u32 *data = buf; unsigned int level, i; int n = 0; while (max > 0) { level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL); if (!level) { if (ba431_trng_is_in_error(ba431)) { ba431_trng_schedule_reset(ba431); break; } if (!wait) break; udelay(BA431_READ_RETRY_INTERVAL); continue; } i = level; do { data[n++] = ba431_trng_read_reg(ba431, BA431_REG_FIFODATA); max -= sizeof(*data); } while (--i && (max > 0)); if (ba431_trng_is_in_error(ba431)) { n -= (level - i); ba431_trng_schedule_reset(ba431); break; } } n *= sizeof(data); return (n || !wait) ? n : -EIO; } static void ba431_trng_cleanup(struct hwrng *rng) { struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0); cancel_work_sync(&ba431->reset_work); } static int ba431_trng_init(struct hwrng *rng) { struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng); return ba431_trng_reset(ba431); } static int ba431_trng_probe(struct platform_device *pdev) { struct ba431_trng *ba431; int ret; ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); if (!ba431) return -ENOMEM; ba431->dev = &pdev->dev; ba431->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ba431->base)) return PTR_ERR(ba431->base); atomic_set(&ba431->reset_pending, 0); INIT_WORK(&ba431->reset_work, ba431_trng_reset_work); ba431->rng.name = pdev->name; ba431->rng.init = ba431_trng_init; ba431->rng.cleanup = ba431_trng_cleanup; ba431->rng.read = ba431_trng_read; ret = devm_hwrng_register(&pdev->dev, &ba431->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n"); dev_info(&pdev->dev, "BA431 TRNG registered\n"); return 0; } static const struct of_device_id ba431_trng_dt_ids[] = { { .compatible = "silex-insight,ba431-rng" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids); static struct platform_driver ba431_trng_driver = { .driver = { .name = "ba431-rng", .of_match_table = ba431_trng_dt_ids, }, .probe = ba431_trng_probe, }; module_platform_driver(ba431_trng_driver); MODULE_AUTHOR("Olivier Sobrie <[email protected]>"); MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/ba431-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Ingenic Random Number Generator driver * Copyright (c) 2017 PrasannaKumar Muralidharan <[email protected]> * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <[email protected]> */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> /* RNG register offsets */ #define RNG_REG_ERNG_OFFSET 0x0 #define RNG_REG_RNG_OFFSET 0x4 /* bits within the ERND register */ #define ERNG_READY BIT(31) #define ERNG_ENABLE BIT(0) enum ingenic_rng_version { ID_JZ4780, ID_X1000, }; /* Device associated memory */ struct ingenic_rng { enum ingenic_rng_version version; void __iomem *base; struct hwrng rng; }; static int ingenic_rng_init(struct hwrng *rng) { struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET); return 0; } static void ingenic_rng_cleanup(struct hwrng *rng) { struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); writel(0, priv->base + RNG_REG_ERNG_OFFSET); } static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng); u32 *data = buf; u32 status; int ret; if (priv->version >= ID_X1000) { ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status, status & ERNG_READY, 10, 1000); if (ret == -ETIMEDOUT) { pr_err("%s: Wait for RNG data ready timeout\n", __func__); return ret; } } else { /* * A delay is required so that the current RNG data is not bit shifted * version of previous RNG data which could happen if random data is * read continuously from this device. */ udelay(20); } *data = readl(priv->base + RNG_REG_RNG_OFFSET); return 4; } static int ingenic_rng_probe(struct platform_device *pdev) { struct ingenic_rng *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { pr_err("%s: Failed to map RNG registers\n", __func__); return PTR_ERR(priv->base); } priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev); priv->rng.name = pdev->name; priv->rng.init = ingenic_rng_init; priv->rng.cleanup = ingenic_rng_cleanup; priv->rng.read = ingenic_rng_read; ret = hwrng_register(&priv->rng); if (ret) { dev_err(&pdev->dev, "Failed to register hwrng\n"); return ret; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Ingenic RNG driver registered\n"); return 0; } static int ingenic_rng_remove(struct platform_device *pdev) { struct ingenic_rng *priv = platform_get_drvdata(pdev); hwrng_unregister(&priv->rng); writel(0, priv->base + RNG_REG_ERNG_OFFSET); return 0; } static const struct of_device_id ingenic_rng_of_match[] = { { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 }, { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ingenic_rng_of_match); static struct platform_driver ingenic_rng_driver = { .probe = ingenic_rng_probe, .remove = ingenic_rng_remove, .driver = { .name = "ingenic-rng", .of_match_table = ingenic_rng_of_match, }, }; module_platform_driver(ingenic_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("PrasannaKumar Muralidharan <[email protected]>"); MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <[email protected]>"); MODULE_DESCRIPTION("Ingenic Random Number Generator driver");
linux-master
drivers/char/hw_random/ingenic-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * Randomness driver for the ARM SMCCC TRNG Firmware Interface * https://developer.arm.com/documentation/den0098/latest/ * * Copyright (C) 2020 Arm Ltd. * * The ARM TRNG firmware interface specifies a protocol to read entropy * from a higher exception level, to abstract from any machine specific * implemenations and allow easier use in hypervisors. * * The firmware interface is realised using the SMCCC specification. */ #include <linux/bits.h> #include <linux/device.h> #include <linux/hw_random.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/arm-smccc.h> #ifdef CONFIG_ARM64 #define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND64 #define MAX_BITS_PER_CALL (3 * 64UL) #else #define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND32 #define MAX_BITS_PER_CALL (3 * 32UL) #endif /* We don't want to allow the firmware to stall us forever. */ #define SMCCC_TRNG_MAX_TRIES 20 #define SMCCC_RET_TRNG_INVALID_PARAMETER -2 #define SMCCC_RET_TRNG_NO_ENTROPY -3 static int copy_from_registers(char *buf, struct arm_smccc_res *res, size_t bytes) { unsigned int chunk, copied; if (bytes == 0) return 0; chunk = min(bytes, sizeof(long)); memcpy(buf, &res->a3, chunk); copied = chunk; if (copied >= bytes) return copied; chunk = min((bytes - copied), sizeof(long)); memcpy(&buf[copied], &res->a2, chunk); copied += chunk; if (copied >= bytes) return copied; chunk = min((bytes - copied), sizeof(long)); memcpy(&buf[copied], &res->a1, chunk); return copied + chunk; } static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct arm_smccc_res res; u8 *buf = data; unsigned int copied = 0; int tries = 0; while (copied < max) { size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE, MAX_BITS_PER_CALL); arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res); switch ((int)res.a0) { case SMCCC_RET_SUCCESS: copied += copy_from_registers(buf + copied, &res, bits / BITS_PER_BYTE); tries = 0; break; case SMCCC_RET_TRNG_NO_ENTROPY: if (!wait) return copied; tries++; if (tries >= SMCCC_TRNG_MAX_TRIES) return copied; cond_resched(); break; default: return -EIO; } } return copied; } static int smccc_trng_probe(struct platform_device *pdev) { struct hwrng *trng; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; trng->name = "smccc_trng"; trng->read = smccc_trng_read; return devm_hwrng_register(&pdev->dev, trng); } static struct platform_driver smccc_trng_driver = { .driver = { .name = "smccc_trng", }, .probe = smccc_trng_probe, }; module_platform_driver(smccc_trng_driver); MODULE_ALIAS("platform:smccc_trng"); MODULE_AUTHOR("Andre Przywara"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/arm_smccc_trng.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Broadcom Corporation * */ /* * DESCRIPTION: The Broadcom iProc RNG200 Driver */ #include <linux/hw_random.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/delay.h> /* Registers */ #define RNG_CTRL_OFFSET 0x00 #define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF #define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001 #define RNG_SOFT_RESET_OFFSET 0x04 #define RNG_SOFT_RESET 0x00000001 #define RBG_SOFT_RESET_OFFSET 0x08 #define RBG_SOFT_RESET 0x00000001 #define RNG_INT_STATUS_OFFSET 0x18 #define RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK 0x80000000 #define RNG_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK 0x00020000 #define RNG_INT_STATUS_NIST_FAIL_IRQ_MASK 0x00000020 #define RNG_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK 0x00000001 #define RNG_FIFO_DATA_OFFSET 0x20 #define RNG_FIFO_COUNT_OFFSET 0x24 #define RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK 0x000000FF struct iproc_rng200_dev { struct hwrng rng; void __iomem *base; }; #define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng) static void iproc_rng200_enable_set(void __iomem *rng_base, bool enable) { u32 val; val = ioread32(rng_base + RNG_CTRL_OFFSET); val &= ~RNG_CTRL_RNG_RBGEN_MASK; if (enable) val |= RNG_CTRL_RNG_RBGEN_ENABLE; iowrite32(val, rng_base + RNG_CTRL_OFFSET); } static void iproc_rng200_restart(void __iomem *rng_base) { uint32_t val; iproc_rng200_enable_set(rng_base, false); /* Clear all interrupt status */ iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET); /* Reset RNG and RBG */ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); val |= RBG_SOFT_RESET; iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET); val |= RNG_SOFT_RESET; iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET); val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET); val &= ~RNG_SOFT_RESET; iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET); val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); val &= ~RBG_SOFT_RESET; iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); iproc_rng200_enable_set(rng_base, true); } static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct iproc_rng200_dev *priv = to_rng_priv(rng); uint32_t num_remaining = max; uint32_t status; #define MAX_RESETS_PER_READ 1 uint32_t num_resets = 0; #define MAX_IDLE_TIME (1 * HZ) unsigned long idle_endtime = jiffies + MAX_IDLE_TIME; while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) { /* Is RNG sane? If not, reset it. */ status = ioread32(priv->base + RNG_INT_STATUS_OFFSET); if ((status & (RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK | RNG_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) { if (num_resets >= MAX_RESETS_PER_READ) return max - num_remaining; iproc_rng200_restart(priv->base); num_resets++; } /* Are there any random numbers available? */ if ((ioread32(priv->base + RNG_FIFO_COUNT_OFFSET) & RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK) > 0) { if (num_remaining >= sizeof(uint32_t)) { /* Buffer has room to store entire word */ *(uint32_t *)buf = ioread32(priv->base + RNG_FIFO_DATA_OFFSET); buf += sizeof(uint32_t); num_remaining -= sizeof(uint32_t); } else { /* Buffer can only store partial word */ uint32_t rnd_number = ioread32(priv->base + RNG_FIFO_DATA_OFFSET); memcpy(buf, &rnd_number, num_remaining); buf += num_remaining; num_remaining = 0; } /* Reset the IDLE timeout */ idle_endtime = jiffies + MAX_IDLE_TIME; } else { if (!wait) /* Cannot wait, return immediately */ return max - num_remaining; /* Can wait, give others chance to run */ usleep_range(min(num_remaining * 10, 500U), 500); } } return max - num_remaining; } static int iproc_rng200_init(struct hwrng *rng) { struct iproc_rng200_dev *priv = to_rng_priv(rng); iproc_rng200_enable_set(priv->base, true); return 0; } static void iproc_rng200_cleanup(struct hwrng *rng) { struct iproc_rng200_dev *priv = to_rng_priv(rng); iproc_rng200_enable_set(priv->base, false); } static int iproc_rng200_probe(struct platform_device *pdev) { struct iproc_rng200_dev *priv; struct device *dev = &pdev->dev; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Map peripheral */ priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { dev_err(dev, "failed to remap rng regs\n"); return PTR_ERR(priv->base); } dev_set_drvdata(dev, priv); priv->rng.name = "iproc-rng200"; priv->rng.read = iproc_rng200_read; priv->rng.init = iproc_rng200_init; priv->rng.cleanup = iproc_rng200_cleanup; /* Register driver */ ret = devm_hwrng_register(dev, &priv->rng); if (ret) { dev_err(dev, "hwrng registration failed\n"); return ret; } dev_info(dev, "hwrng registered\n"); return 0; } static int __maybe_unused iproc_rng200_suspend(struct device *dev) { struct iproc_rng200_dev *priv = dev_get_drvdata(dev); iproc_rng200_cleanup(&priv->rng); return 0; } static int __maybe_unused iproc_rng200_resume(struct device *dev) { struct iproc_rng200_dev *priv = dev_get_drvdata(dev); iproc_rng200_init(&priv->rng); return 0; } static const struct dev_pm_ops iproc_rng200_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume) }; static const struct of_device_id iproc_rng200_of_match[] = { { .compatible = "brcm,bcm2711-rng200", }, { .compatible = "brcm,bcm7211-rng200", }, { .compatible = "brcm,bcm7278-rng200", }, { .compatible = "brcm,iproc-rng200", }, {}, }; MODULE_DEVICE_TABLE(of, iproc_rng200_of_match); static struct platform_driver iproc_rng200_driver = { .driver = { .name = "iproc-rng200", .of_match_table = iproc_rng200_of_match, .pm = &iproc_rng200_pm_ops, }, .probe = iproc_rng200_probe, }; module_platform_driver(iproc_rng200_driver); MODULE_AUTHOR("Broadcom"); MODULE_DESCRIPTION("iProc RNG200 Random Number Generator driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/iproc-rng200.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 HiSilicon Co., Ltd. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/random.h> #define RNG_SEED 0x0 #define RNG_CTRL 0x4 #define RNG_SEED_SEL BIT(2) #define RNG_RING_EN BIT(1) #define RNG_EN BIT(0) #define RNG_RAN_NUM 0x10 #define RNG_PHY_SEED 0x14 #define to_hisi_rng(p) container_of(p, struct hisi_rng, rng) static int seed_sel; module_param(seed_sel, int, S_IRUGO); MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator."); struct hisi_rng { void __iomem *base; struct hwrng rng; }; static int hisi_rng_init(struct hwrng *rng) { struct hisi_rng *hrng = to_hisi_rng(rng); int val = RNG_EN; u32 seed; /* get a random number as initial seed */ get_random_bytes(&seed, sizeof(seed)); writel_relaxed(seed, hrng->base + RNG_SEED); /** * The seed is reload periodically, there are two choice * of seeds, default seed using the value from LFSR, or * will use seed generated by ring oscillator. */ if (seed_sel == 1) val |= RNG_RING_EN | RNG_SEED_SEL; writel_relaxed(val, hrng->base + RNG_CTRL); return 0; } static void hisi_rng_cleanup(struct hwrng *rng) { struct hisi_rng *hrng = to_hisi_rng(rng); writel_relaxed(0, hrng->base + RNG_CTRL); } static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct hisi_rng *hrng = to_hisi_rng(rng); u32 *data = buf; *data = readl_relaxed(hrng->base + RNG_RAN_NUM); return 4; } static int hisi_rng_probe(struct platform_device *pdev) { struct hisi_rng *rng; int ret; rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; platform_set_drvdata(pdev, rng); rng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng->base)) return PTR_ERR(rng->base); rng->rng.name = pdev->name; rng->rng.init = hisi_rng_init; rng->rng.cleanup = hisi_rng_cleanup; rng->rng.read = hisi_rng_read; ret = devm_hwrng_register(&pdev->dev, &rng->rng); if (ret) { dev_err(&pdev->dev, "failed to register hwrng\n"); return ret; } return 0; } static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = { { .compatible = "hisilicon,hip04-rng" }, { .compatible = "hisilicon,hip05-rng" }, { } }; MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids); static struct platform_driver hisi_rng_driver = { .probe = hisi_rng_probe, .driver = { .name = "hisi-rng", .of_match_table = of_match_ptr(hisi_rng_dt_ids), }, }; module_platform_driver(hisi_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>"); MODULE_DESCRIPTION("Hisilicon random number generator driver");
linux-master
drivers/char/hw_random/hisi-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RNG driver for Freescale RNGA * * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Author: Alan Carvalho de Assis <[email protected]> */ /* * * This driver is based on other RNG drivers. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> /* RNGA Registers */ #define RNGA_CONTROL 0x00 #define RNGA_STATUS 0x04 #define RNGA_ENTROPY 0x08 #define RNGA_OUTPUT_FIFO 0x0c #define RNGA_MODE 0x10 #define RNGA_VERIFICATION_CONTROL 0x14 #define RNGA_OSC_CONTROL_COUNTER 0x18 #define RNGA_OSC1_COUNTER 0x1c #define RNGA_OSC2_COUNTER 0x20 #define RNGA_OSC_COUNTER_STATUS 0x24 /* RNGA Registers Range */ #define RNG_ADDR_RANGE 0x28 /* RNGA Control Register */ #define RNGA_CONTROL_SLEEP 0x00000010 #define RNGA_CONTROL_CLEAR_INT 0x00000008 #define RNGA_CONTROL_MASK_INTS 0x00000004 #define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002 #define RNGA_CONTROL_GO 0x00000001 #define RNGA_STATUS_LEVEL_MASK 0x0000ff00 /* RNGA Status Register */ #define RNGA_STATUS_OSC_DEAD 0x80000000 #define RNGA_STATUS_SLEEP 0x00000010 #define RNGA_STATUS_ERROR_INT 0x00000008 #define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004 #define RNGA_STATUS_LAST_READ_STATUS 0x00000002 #define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 struct mxc_rng { struct device *dev; struct hwrng rng; void __iomem *mem; struct clk *clk; }; static int mxc_rnga_data_present(struct hwrng *rng, int wait) { int i; struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); for (i = 0; i < 20; i++) { /* how many random numbers are in FIFO? [0-16] */ int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_LEVEL_MASK) >> 8; if (level || !wait) return !!level; udelay(10); } return 0; } static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) { int err; u32 ctrl; struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); /* retrieve a random number from FIFO */ *data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO); /* some error while reading this random number? */ err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; /* if error: clear error interrupt, but doesn't return random number */ if (err) { dev_dbg(mxc_rng->dev, "Error while reading random number!\n"); ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, mxc_rng->mem + RNGA_CONTROL); return 0; } else return 4; } static int mxc_rnga_init(struct hwrng *rng) { u32 ctrl, osc; struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); /* wake up */ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL); /* verify if oscillator is working */ osc = __raw_readl(mxc_rng->mem + RNGA_STATUS); if (osc & RNGA_STATUS_OSC_DEAD) { dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n"); return -ENODEV; } /* go running */ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); return 0; } static void mxc_rnga_cleanup(struct hwrng *rng) { u32 ctrl; struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng); ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL); /* stop rnga */ __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); } static int __init mxc_rnga_probe(struct platform_device *pdev) { int err; struct mxc_rng *mxc_rng; mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL); if (!mxc_rng) return -ENOMEM; mxc_rng->dev = &pdev->dev; mxc_rng->rng.name = "mxc-rnga"; mxc_rng->rng.init = mxc_rnga_init; mxc_rng->rng.cleanup = mxc_rnga_cleanup; mxc_rng->rng.data_present = mxc_rnga_data_present; mxc_rng->rng.data_read = mxc_rnga_data_read; mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mxc_rng->clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); return PTR_ERR(mxc_rng->clk); } err = clk_prepare_enable(mxc_rng->clk); if (err) return err; mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); goto err_ioremap; } err = hwrng_register(&mxc_rng->rng); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); goto err_ioremap; } return 0; err_ioremap: clk_disable_unprepare(mxc_rng->clk); return err; } static int __exit mxc_rnga_remove(struct platform_device *pdev) { struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); hwrng_unregister(&mxc_rng->rng); clk_disable_unprepare(mxc_rng->clk); return 0; } static const struct of_device_id mxc_rnga_of_match[] = { { .compatible = "fsl,imx21-rnga", }, { .compatible = "fsl,imx31-rnga", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mxc_rnga_of_match); static struct platform_driver mxc_rnga_driver = { .driver = { .name = "mxc_rnga", .of_match_table = mxc_rnga_of_match, }, .remove = __exit_p(mxc_rnga_remove), }; module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/mxc-rnga.c
/* * RNG driver for Intel RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <[email protected]> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <[email protected]> * Copyright 2000,2001 Philipp Rumpf <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stop_machine.h> #include <linux/delay.h> #include <linux/slab.h> #define PFX KBUILD_MODNAME ": " /* * RNG registers */ #define INTEL_RNG_HW_STATUS 0 #define INTEL_RNG_PRESENT 0x40 #define INTEL_RNG_ENABLED 0x01 #define INTEL_RNG_STATUS 1 #define INTEL_RNG_DATA_PRESENT 0x01 #define INTEL_RNG_DATA 2 /* * Magic address at which Intel PCI bridges locate the RNG */ #define INTEL_RNG_ADDR 0xFFBC015F #define INTEL_RNG_ADDR_LEN 3 /* * LPC bridge PCI config space registers */ #define FWH_DEC_EN1_REG_OLD 0xe3 #define FWH_DEC_EN1_REG_NEW 0xd9 /* high byte of 16-bit register */ #define FWH_F8_EN_MASK 0x80 #define BIOS_CNTL_REG_OLD 0x4e #define BIOS_CNTL_REG_NEW 0xdc #define BIOS_CNTL_WRITE_ENABLE_MASK 0x01 #define BIOS_CNTL_LOCK_ENABLE_MASK 0x02 /* * Magic address at which Intel Firmware Hubs get accessed */ #define INTEL_FWH_ADDR 0xffff0000 #define INTEL_FWH_ADDR_LEN 2 /* * Intel Firmware Hub command codes (write to any address inside the device) */ #define INTEL_FWH_RESET_CMD 0xff /* aka READ_ARRAY */ #define INTEL_FWH_READ_ID_CMD 0x90 /* * Intel Firmware Hub Read ID command result addresses */ #define INTEL_FWH_MANUFACTURER_CODE_ADDRESS 0x000000 #define INTEL_FWH_DEVICE_CODE_ADDRESS 0x000001 /* * Intel Firmware Hub Read ID command result values */ #define INTEL_FWH_MANUFACTURER_CODE 0x89 #define INTEL_FWH_DEVICE_CODE_8M 0xac #define INTEL_FWH_DEVICE_CODE_4M 0xad /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { /* AA { PCI_DEVICE(0x8086, 0x2418) }, */ { PCI_DEVICE(0x8086, 0x2410) }, /* AA */ /* AB { PCI_DEVICE(0x8086, 0x2428) }, */ { PCI_DEVICE(0x8086, 0x2420) }, /* AB */ /* ?? { PCI_DEVICE(0x8086, 0x2430) }, */ /* BAM, CAM, DBM, FBM, GxM { PCI_DEVICE(0x8086, 0x2448) }, */ { PCI_DEVICE(0x8086, 0x244c) }, /* BAM */ { PCI_DEVICE(0x8086, 0x248c) }, /* CAM */ { PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */ { PCI_DEVICE(0x8086, 0x2641) }, /* FBM */ { PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */ { PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */ /* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx { PCI_DEVICE(0x8086, 0x244e) }, */ { PCI_DEVICE(0x8086, 0x2440) }, /* BA */ { PCI_DEVICE(0x8086, 0x2480) }, /* CA */ { PCI_DEVICE(0x8086, 0x24c0) }, /* DB */ { PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */ { PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */ { PCI_DEVICE(0x8086, 0x2640) }, /* Fx */ { PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */ /* E { PCI_DEVICE(0x8086, 0x245e) }, */ { PCI_DEVICE(0x8086, 0x2450) }, /* E */ { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); static __initdata int no_fwh_detect; module_param(no_fwh_detect, int, 0); MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n" " positive value - skip if FWH space locked read-only\n" " negative value - skip always"); static inline u8 hwstatus_get(void __iomem *mem) { return readb(mem + INTEL_RNG_HW_STATUS); } static inline u8 hwstatus_set(void __iomem *mem, u8 hw_status) { writeb(hw_status, mem + INTEL_RNG_HW_STATUS); return hwstatus_get(mem); } static int intel_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT); if (data || !wait) break; udelay(10); } return data; } static int intel_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *mem = (void __iomem *)rng->priv; *data = readb(mem + INTEL_RNG_DATA); return 1; } static int intel_rng_init(struct hwrng *rng) { void __iomem *mem = (void __iomem *)rng->priv; u8 hw_status; int err = -EIO; hw_status = hwstatus_get(mem); /* turn RNG h/w on, if it's off */ if ((hw_status & INTEL_RNG_ENABLED) == 0) hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); if ((hw_status & INTEL_RNG_ENABLED) == 0) { pr_err(PFX "cannot enable RNG, aborting\n"); goto out; } err = 0; out: return err; } static void intel_rng_cleanup(struct hwrng *rng) { void __iomem *mem = (void __iomem *)rng->priv; u8 hw_status; hw_status = hwstatus_get(mem); if (hw_status & INTEL_RNG_ENABLED) hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); else pr_warn(PFX "unusual: RNG already disabled\n"); } static struct hwrng intel_rng = { .name = "intel", .init = intel_rng_init, .cleanup = intel_rng_cleanup, .data_present = intel_rng_data_present, .data_read = intel_rng_data_read, }; struct intel_rng_hw { struct pci_dev *dev; void __iomem *mem; u8 bios_cntl_off; u8 bios_cntl_val; u8 fwh_dec_en1_off; u8 fwh_dec_en1_val; }; static int __init intel_rng_hw_init(void *_intel_rng_hw) { struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; u8 mfc, dvc; /* interrupts disabled in stop_machine call */ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->fwh_dec_en1_off, intel_rng_hw->fwh_dec_en1_val | FWH_F8_EN_MASK); if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->bios_cntl_off, intel_rng_hw->bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK); writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem); mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS); dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS); writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); if (!(intel_rng_hw->bios_cntl_val & (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->bios_cntl_off, intel_rng_hw->bios_cntl_val); if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->fwh_dec_en1_off, intel_rng_hw->fwh_dec_en1_val); if (mfc != INTEL_FWH_MANUFACTURER_CODE || (dvc != INTEL_FWH_DEVICE_CODE_8M && dvc != INTEL_FWH_DEVICE_CODE_4M)) { pr_notice(PFX "FWH not detected\n"); return -ENODEV; } return 0; } static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw, struct pci_dev *dev) { intel_rng_hw->bios_cntl_val = 0xff; intel_rng_hw->fwh_dec_en1_val = 0xff; intel_rng_hw->dev = dev; /* Check for Intel 82802 */ if (dev->device < 0x2640) { intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD; intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD; } else { intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW; intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW; } pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off, &intel_rng_hw->fwh_dec_en1_val); pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off, &intel_rng_hw->bios_cntl_val); if ((intel_rng_hw->bios_cntl_val & (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) == BIOS_CNTL_LOCK_ENABLE_MASK) { static __initdata /*const*/ char warning[] = PFX "Firmware space is locked read-only. If you can't or\n" PFX "don't want to disable this in firmware setup, and if\n" PFX "you are certain that your system has a functional\n" PFX "RNG, try using the 'no_fwh_detect' option.\n"; if (no_fwh_detect) return -ENODEV; pr_warn("%s", warning); return -EBUSY; } intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); if (intel_rng_hw->mem == NULL) return -EBUSY; return 0; } static int __init intel_rng_mod_init(void) { int err = -ENODEV; int i; struct pci_dev *dev = NULL; void __iomem *mem; u8 hw_status; struct intel_rng_hw *intel_rng_hw; for (i = 0; !dev && pci_tbl[i].vendor; ++i) dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device, NULL); if (!dev) goto out; /* Device not found. */ if (no_fwh_detect < 0) { pci_dev_put(dev); goto fwh_done; } intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL); if (!intel_rng_hw) { pci_dev_put(dev); goto out; } err = intel_init_hw_struct(intel_rng_hw, dev); if (err) { pci_dev_put(dev); kfree(intel_rng_hw); if (err == -ENODEV) goto fwh_done; goto out; } /* * Since the BIOS code/data is going to disappear from its normal * location with the Read ID command, all activity on the system * must be stopped until the state is back to normal. * * Use stop_machine because IPIs can be blocked by disabling * interrupts. */ err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL); pci_dev_put(dev); iounmap(intel_rng_hw->mem); kfree(intel_rng_hw); if (err) goto out; fwh_done: err = -ENOMEM; mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN); if (!mem) goto out; intel_rng.priv = (unsigned long)mem; /* Check for Random Number Generator */ err = -ENODEV; hw_status = hwstatus_get(mem); if ((hw_status & INTEL_RNG_PRESENT) == 0) { iounmap(mem); goto out; } pr_info("Intel 82802 RNG detected\n"); err = hwrng_register(&intel_rng); if (err) { pr_err(PFX "RNG registering failed (%d)\n", err); iounmap(mem); } out: return err; } static void __exit intel_rng_mod_exit(void) { void __iomem *mem = (void __iomem *)intel_rng.priv; hwrng_unregister(&intel_rng); iounmap(mem); } module_init(intel_rng_mod_init); module_exit(intel_rng_mod_exit); MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/intel-rng.c
// SPDX-License-Identifier: GPL-2.0 /* * RNG driver for Exynos TRNGs * * Author: Łukasz Stelmach <[email protected]> * * Copyright 2017 (c) Samsung Electronics Software, Inc. * * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by * Krzysztof Kozłowski <[email protected]> */ #include <linux/clk.h> #include <linux/crypto.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define EXYNOS_TRNG_CLKDIV (0x0) #define EXYNOS_TRNG_CTRL (0x20) #define EXYNOS_TRNG_CTRL_RNGEN BIT(31) #define EXYNOS_TRNG_POST_CTRL (0x30) #define EXYNOS_TRNG_ONLINE_CTRL (0x40) #define EXYNOS_TRNG_ONLINE_STAT (0x44) #define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48) #define EXYNOS_TRNG_FIFO_CTRL (0x50) #define EXYNOS_TRNG_FIFO_0 (0x80) #define EXYNOS_TRNG_FIFO_1 (0x84) #define EXYNOS_TRNG_FIFO_2 (0x88) #define EXYNOS_TRNG_FIFO_3 (0x8c) #define EXYNOS_TRNG_FIFO_4 (0x90) #define EXYNOS_TRNG_FIFO_5 (0x94) #define EXYNOS_TRNG_FIFO_6 (0x98) #define EXYNOS_TRNG_FIFO_7 (0x9c) #define EXYNOS_TRNG_FIFO_LEN (8) #define EXYNOS_TRNG_CLOCK_RATE (500000) struct exynos_trng_dev { struct device *dev; void __iomem *mem; struct clk *clk; struct hwrng rng; }; static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct exynos_trng_dev *trng; int val; max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4)); trng = (struct exynos_trng_dev *)rng->priv; writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL); val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val, val == 0, 200, 1000000); if (val < 0) return val; memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max); return max; } static int exynos_trng_init(struct hwrng *rng) { struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; unsigned long sss_rate; u32 val; sss_rate = clk_get_rate(trng->clk); /* * For most TRNG circuits the clock frequency of under 500 kHz * is safe. */ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2); if (val > 0x7fff) { dev_err(trng->dev, "clock divider too large: %d", val); return -ERANGE; } val = val << 1; writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV); /* Enable the generator. */ val = EXYNOS_TRNG_CTRL_RNGEN; writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL); /* * Disable post-processing. /dev/hwrng is supposed to deliver * unprocessed data. */ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL); return 0; } static int exynos_trng_probe(struct platform_device *pdev) { struct exynos_trng_dev *trng; int ret = -ENOMEM; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return ret; trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev), GFP_KERNEL); if (!trng->rng.name) return ret; trng->rng.init = exynos_trng_init; trng->rng.read = exynos_trng_do_read; trng->rng.priv = (unsigned long) trng; platform_set_drvdata(pdev, trng); trng->dev = &pdev->dev; trng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->mem)) return PTR_ERR(trng->mem); pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Could not get runtime PM.\n"); goto err_pm_get; } trng->clk = devm_clk_get(&pdev->dev, "secss"); if (IS_ERR(trng->clk)) { ret = PTR_ERR(trng->clk); dev_err(&pdev->dev, "Could not get clock.\n"); goto err_clock; } ret = clk_prepare_enable(trng->clk); if (ret) { dev_err(&pdev->dev, "Could not enable the clk.\n"); goto err_clock; } ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { dev_err(&pdev->dev, "Could not register hwrng device.\n"); goto err_register; } dev_info(&pdev->dev, "Exynos True Random Number Generator.\n"); return 0; err_register: clk_disable_unprepare(trng->clk); err_clock: pm_runtime_put_noidle(&pdev->dev); err_pm_get: pm_runtime_disable(&pdev->dev); return ret; } static int exynos_trng_remove(struct platform_device *pdev) { struct exynos_trng_dev *trng = platform_get_drvdata(pdev); clk_disable_unprepare(trng->clk); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static int exynos_trng_suspend(struct device *dev) { pm_runtime_put_sync(dev); return 0; } static int exynos_trng_resume(struct device *dev) { int ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Could not get runtime PM.\n"); return ret; } return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, exynos_trng_resume); static const struct of_device_id exynos_trng_dt_match[] = { { .compatible = "samsung,exynos5250-trng", }, { }, }; MODULE_DEVICE_TABLE(of, exynos_trng_dt_match); static struct platform_driver exynos_trng_driver = { .driver = { .name = "exynos-trng", .pm = pm_sleep_ptr(&exynos_trng_pm_ops), .of_match_table = exynos_trng_dt_match, }, .probe = exynos_trng_probe, .remove = exynos_trng_remove, }; module_platform_driver(exynos_trng_driver); MODULE_AUTHOR("Łukasz Stelmach"); MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/hw_random/exynos-trng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Mediatek Hardware Random Number Generator * * Copyright (C) 2017 Sean Wang <[email protected]> */ #define MTK_RNG_DEV KBUILD_MODNAME #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> /* Runtime PM autosuspend timeout: */ #define RNG_AUTOSUSPEND_TIMEOUT 100 #define USEC_POLL 2 #define TIMEOUT_POLL 60 #define RNG_CTRL 0x00 #define RNG_EN BIT(0) #define RNG_READY BIT(31) #define RNG_DATA 0x08 #define to_mtk_rng(p) container_of(p, struct mtk_rng, rng) struct mtk_rng { void __iomem *base; struct clk *clk; struct hwrng rng; }; static int mtk_rng_init(struct hwrng *rng) { struct mtk_rng *priv = to_mtk_rng(rng); u32 val; int err; err = clk_prepare_enable(priv->clk); if (err) return err; val = readl(priv->base + RNG_CTRL); val |= RNG_EN; writel(val, priv->base + RNG_CTRL); return 0; } static void mtk_rng_cleanup(struct hwrng *rng) { struct mtk_rng *priv = to_mtk_rng(rng); u32 val; val = readl(priv->base + RNG_CTRL); val &= ~RNG_EN; writel(val, priv->base + RNG_CTRL); clk_disable_unprepare(priv->clk); } static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait) { struct mtk_rng *priv = to_mtk_rng(rng); int ready; ready = readl(priv->base + RNG_CTRL) & RNG_READY; if (!ready && wait) readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready, ready & RNG_READY, USEC_POLL, TIMEOUT_POLL); return !!(ready & RNG_READY); } static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct mtk_rng *priv = to_mtk_rng(rng); int retval = 0; pm_runtime_get_sync((struct device *)priv->rng.priv); while (max >= sizeof(u32)) { if (!mtk_rng_wait_ready(rng, wait)) break; *(u32 *)buf = readl(priv->base + RNG_DATA); retval += sizeof(u32); buf += sizeof(u32); max -= sizeof(u32); } pm_runtime_mark_last_busy((struct device *)priv->rng.priv); pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); return retval || !wait ? retval : -EIO; } static int mtk_rng_probe(struct platform_device *pdev) { int ret; struct mtk_rng *priv; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->rng.name = pdev->name; #ifndef CONFIG_PM priv->rng.init = mtk_rng_init; priv->rng.cleanup = mtk_rng_cleanup; #endif priv->rng.read = mtk_rng_read; priv->rng.priv = (unsigned long)&pdev->dev; priv->rng.quality = 900; priv->clk = devm_clk_get(&pdev->dev, "rng"); if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); dev_err(&pdev->dev, "no clock for device: %d\n", ret); return ret; } priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) { dev_err(&pdev->dev, "failed to register rng device: %d\n", ret); return ret; } dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); dev_info(&pdev->dev, "registered RNG driver\n"); return 0; } #ifdef CONFIG_PM static int mtk_rng_runtime_suspend(struct device *dev) { struct mtk_rng *priv = dev_get_drvdata(dev); mtk_rng_cleanup(&priv->rng); return 0; } static int mtk_rng_runtime_resume(struct device *dev) { struct mtk_rng *priv = dev_get_drvdata(dev); return mtk_rng_init(&priv->rng); } static const struct dev_pm_ops mtk_rng_pm_ops = { SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend, mtk_rng_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; #define MTK_RNG_PM_OPS (&mtk_rng_pm_ops) #else /* CONFIG_PM */ #define MTK_RNG_PM_OPS NULL #endif /* CONFIG_PM */ static const struct of_device_id mtk_rng_match[] = { { .compatible = "mediatek,mt7986-rng" }, { .compatible = "mediatek,mt7623-rng" }, {}, }; MODULE_DEVICE_TABLE(of, mtk_rng_match); static struct platform_driver mtk_rng_driver = { .probe = mtk_rng_probe, .driver = { .name = MTK_RNG_DEV, .pm = MTK_RNG_PM_OPS, .of_match_table = mtk_rng_match, }, }; module_platform_driver(mtk_rng_driver); MODULE_DESCRIPTION("Mediatek Random Number Generator Driver"); MODULE_AUTHOR("Sean Wang <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/mtk-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/random.h> #include <linux/hw_random.h> #include <asm/archrandom.h> static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { unsigned long *buf; int i, len; /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */ len = max / sizeof(unsigned long); buf = (unsigned long *)data; for (i = 0; i < len; i++) pnv_get_random_long(buf++); return len * sizeof(unsigned long); } static struct hwrng powernv_hwrng = { .name = "powernv-rng", .read = powernv_rng_read, }; static int powernv_rng_probe(struct platform_device *pdev) { int rc; rc = devm_hwrng_register(&pdev->dev, &powernv_hwrng); if (rc) { /* We only register one device, ignore any others */ if (rc == -EEXIST) rc = -ENODEV; return rc; } pr_info("Registered powernv hwrng.\n"); return 0; } static const struct of_device_id powernv_rng_match[] = { { .compatible = "ibm,power-rng",}, {}, }; MODULE_DEVICE_TABLE(of, powernv_rng_match); static struct platform_driver powernv_rng_driver = { .driver = { .name = "powernv_rng", .of_match_table = powernv_rng_match, }, .probe = powernv_rng_probe, }; module_platform_driver(powernv_rng_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
linux-master
drivers/char/hw_random/powernv-rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Randomness driver for virtio * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation */ #include <asm/barrier.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_rng.h> #include <linux/module.h> #include <linux/slab.h> static DEFINE_IDA(rng_index_ida); struct virtrng_info { struct hwrng hwrng; struct virtqueue *vq; char name[25]; int index; bool hwrng_register_done; bool hwrng_removed; /* data transfer */ struct completion have_data; unsigned int data_avail; unsigned int data_idx; /* minimal size returned by rng_buffer_size() */ #if SMP_CACHE_BYTES < 32 u8 data[32]; #else u8 data[SMP_CACHE_BYTES]; #endif }; static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vi->vq, &len)) return; smp_store_release(&vi->data_avail, len); complete(&vi->have_data); } static void request_entropy(struct virtrng_info *vi) { struct scatterlist sg; reinit_completion(&vi->have_data); vi->data_idx = 0; sg_init_one(&sg, vi->data, sizeof(vi->data)); /* There should always be room for one buffer. */ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); virtqueue_kick(vi->vq); } static unsigned int copy_data(struct virtrng_info *vi, void *buf, unsigned int size) { size = min_t(unsigned int, size, vi->data_avail); memcpy(buf, vi->data + vi->data_idx, size); vi->data_idx += size; vi->data_avail -= size; if (vi->data_avail == 0) request_entropy(vi); return size; } static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; unsigned int chunk; size_t read; if (vi->hwrng_removed) return -ENODEV; read = 0; /* copy available data */ if (smp_load_acquire(&vi->data_avail)) { chunk = copy_data(vi, buf, size); size -= chunk; read += chunk; } if (!wait) return read; /* We have already copied available entropy, * so either size is 0 or data_avail is 0 */ while (size != 0) { /* data_avail is 0 but a request is pending */ ret = wait_for_completion_killable(&vi->have_data); if (ret < 0) return ret; /* if vi->data_avail is 0, we have been interrupted * by a cleanup, but buffer stays in the queue */ if (vi->data_avail == 0) return read; chunk = copy_data(vi, buf + read, size); size -= chunk; read += chunk; } return read; } static void virtio_cleanup(struct hwrng *rng) { struct virtrng_info *vi = (struct virtrng_info *)rng->priv; complete(&vi->have_data); } static int probe_common(struct virtio_device *vdev) { int err, index; struct virtrng_info *vi = NULL; vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL); if (!vi) return -ENOMEM; vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL); if (index < 0) { err = index; goto err_ida; } sprintf(vi->name, "virtio_rng.%d", index); init_completion(&vi->have_data); vi->hwrng = (struct hwrng) { .read = virtio_read, .cleanup = virtio_cleanup, .priv = (unsigned long)vi, .name = vi->name, }; vdev->priv = vi; /* We expect a single virtqueue. */ vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vi->vq)) { err = PTR_ERR(vi->vq); goto err_find; } virtio_device_ready(vdev); /* we always have a pending entropy request */ request_entropy(vi); return 0; err_find: ida_simple_remove(&rng_index_ida, index); err_ida: kfree(vi); return err; } static void remove_common(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; vi->hwrng_removed = true; vi->data_avail = 0; vi->data_idx = 0; complete(&vi->have_data); if (vi->hwrng_register_done) hwrng_unregister(&vi->hwrng); virtio_reset_device(vdev); vdev->config->del_vqs(vdev); ida_simple_remove(&rng_index_ida, vi->index); kfree(vi); } static int virtrng_probe(struct virtio_device *vdev) { return probe_common(vdev); } static void virtrng_remove(struct virtio_device *vdev) { remove_common(vdev); } static void virtrng_scan(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; int err; err = hwrng_register(&vi->hwrng); if (!err) vi->hwrng_register_done = true; } #ifdef CONFIG_PM_SLEEP static int virtrng_freeze(struct virtio_device *vdev) { remove_common(vdev); return 0; } static int virtrng_restore(struct virtio_device *vdev) { int err; err = probe_common(vdev); if (!err) { struct virtrng_info *vi = vdev->priv; /* * Set hwrng_removed to ensure that virtio_read() * does not block waiting for data before the * registration is complete. */ vi->hwrng_removed = true; err = hwrng_register(&vi->hwrng); if (!err) { vi->hwrng_register_done = true; vi->hwrng_removed = false; } } return err; } #endif static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, .scan = virtrng_scan, #ifdef CONFIG_PM_SLEEP .freeze = virtrng_freeze, .restore = virtrng_restore, #endif }; module_virtio_driver(virtio_rng_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio random number driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/hw_random/virtio-rng.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2020 Xiphera Ltd. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/err.h> #include <linux/io.h> #include <linux/hw_random.h> #include <linux/platform_device.h> #include <linux/delay.h> #define CONTROL_REG 0x00000000 #define STATUS_REG 0x00000004 #define RAND_REG 0x00000000 #define HOST_TO_TRNG_RESET 0x00000001 #define HOST_TO_TRNG_RELEASE_RESET 0x00000002 #define HOST_TO_TRNG_ENABLE 0x80000000 #define HOST_TO_TRNG_ZEROIZE 0x80000004 #define HOST_TO_TRNG_ACK_ZEROIZE 0x80000008 #define HOST_TO_TRNG_READ 0x8000000F /* trng statuses */ #define TRNG_ACK_RESET 0x000000AC #define TRNG_SUCCESSFUL_STARTUP 0x00000057 #define TRNG_FAILED_STARTUP 0x000000FA #define TRNG_NEW_RAND_AVAILABLE 0x000000ED struct xiphera_trng { void __iomem *mem; struct hwrng rng; }; static int xiphera_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct xiphera_trng *trng = container_of(rng, struct xiphera_trng, rng); int ret = 0; while (max >= sizeof(u32)) { /* check for data */ if (readl(trng->mem + STATUS_REG) == TRNG_NEW_RAND_AVAILABLE) { *(u32 *)buf = readl(trng->mem + RAND_REG); /* * Inform the trng of the read * and re-enable it to produce a new random number */ writel(HOST_TO_TRNG_READ, trng->mem + CONTROL_REG); writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG); ret += sizeof(u32); buf += sizeof(u32); max -= sizeof(u32); } else { break; } } return ret; } static int xiphera_trng_probe(struct platform_device *pdev) { int ret; struct xiphera_trng *trng; struct device *dev = &pdev->dev; trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; trng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->mem)) return PTR_ERR(trng->mem); /* * the trng needs to be reset first which might not happen in time, * hence we incorporate a small delay to ensure proper behaviour */ writel(HOST_TO_TRNG_RESET, trng->mem + CONTROL_REG); usleep_range(100, 200); if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) { /* * there is a small chance the trng is just not ready yet, * so we try one more time. If the second time fails, we give up */ usleep_range(100, 200); if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) { dev_err(dev, "failed to reset the trng ip\n"); return -ENODEV; } } /* * once again, to ensure proper behaviour we sleep * for a while after zeroizing the trng */ writel(HOST_TO_TRNG_RELEASE_RESET, trng->mem + CONTROL_REG); writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG); writel(HOST_TO_TRNG_ZEROIZE, trng->mem + CONTROL_REG); msleep(20); if (readl(trng->mem + STATUS_REG) != TRNG_SUCCESSFUL_STARTUP) { /* diagnose the reason for the failure */ if (readl(trng->mem + STATUS_REG) == TRNG_FAILED_STARTUP) { dev_err(dev, "trng ip startup-tests failed\n"); return -ENODEV; } dev_err(dev, "startup-tests yielded no response\n"); return -ENODEV; } writel(HOST_TO_TRNG_ACK_ZEROIZE, trng->mem + CONTROL_REG); trng->rng.name = pdev->name; trng->rng.read = xiphera_trng_read; trng->rng.quality = 900; ret = devm_hwrng_register(dev, &trng->rng); if (ret) { dev_err(dev, "failed to register rng device: %d\n", ret); return ret; } platform_set_drvdata(pdev, trng); return 0; } static const struct of_device_id xiphera_trng_of_match[] = { { .compatible = "xiphera,xip8001b-trng", }, {}, }; MODULE_DEVICE_TABLE(of, xiphera_trng_of_match); static struct platform_driver xiphera_trng_driver = { .driver = { .name = "xiphera-trng", .of_match_table = xiphera_trng_of_match, }, .probe = xiphera_trng_probe, }; module_platform_driver(xiphera_trng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Atte Tommiska"); MODULE_DESCRIPTION("Xiphera FPGA-based true random number generator driver");
linux-master
drivers/char/hw_random/xiphera-trng.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Copyright (C) 2014 Intel Corporation * * Authors: * Jarkko Sakkinen <[email protected]> * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * TPM chip management routines. */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/freezer.h> #include <linux/major.h> #include <linux/tpm_eventlog.h> #include <linux/hw_random.h> #include "tpm.h" DEFINE_IDR(dev_nums_idr); static DEFINE_MUTEX(idr_lock); const struct class tpm_class = { .name = "tpm", .shutdown_pre = tpm_class_shutdown, }; const struct class tpmrm_class = { .name = "tpmrm", }; dev_t tpm_devt; static int tpm_request_locality(struct tpm_chip *chip) { int rc; if (!chip->ops->request_locality) return 0; rc = chip->ops->request_locality(chip, 0); if (rc < 0) return rc; chip->locality = rc; return 0; } static void tpm_relinquish_locality(struct tpm_chip *chip) { int rc; if (!chip->ops->relinquish_locality) return; rc = chip->ops->relinquish_locality(chip, chip->locality); if (rc) dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); chip->locality = -1; } static int tpm_cmd_ready(struct tpm_chip *chip) { if (!chip->ops->cmd_ready) return 0; return chip->ops->cmd_ready(chip); } static int tpm_go_idle(struct tpm_chip *chip) { if (!chip->ops->go_idle) return 0; return chip->ops->go_idle(chip); } static void tpm_clk_enable(struct tpm_chip *chip) { if (chip->ops->clk_enable) chip->ops->clk_enable(chip, true); } static void tpm_clk_disable(struct tpm_chip *chip) { if (chip->ops->clk_enable) chip->ops->clk_enable(chip, false); } /** * tpm_chip_start() - power on the TPM * @chip: a TPM chip to use * * Return: * * The response length - OK * * -errno - A system error */ int tpm_chip_start(struct tpm_chip *chip) { int ret; tpm_clk_enable(chip); if (chip->locality == -1) { ret = tpm_request_locality(chip); if (ret) { tpm_clk_disable(chip); return ret; } } ret = tpm_cmd_ready(chip); if (ret) { tpm_relinquish_locality(chip); tpm_clk_disable(chip); return ret; } return 0; } EXPORT_SYMBOL_GPL(tpm_chip_start); /** * tpm_chip_stop() - power off the TPM * @chip: a TPM chip to use * * Return: * * The response length - OK * * -errno - A system error */ void tpm_chip_stop(struct tpm_chip *chip) { tpm_go_idle(chip); tpm_relinquish_locality(chip); tpm_clk_disable(chip); } EXPORT_SYMBOL_GPL(tpm_chip_stop); /** * tpm_try_get_ops() - Get a ref to the tpm_chip * @chip: Chip to ref * * The caller must already have some kind of locking to ensure that chip is * valid. This function will lock the chip so that the ops member can be * accessed safely. The locking prevents tpm_chip_unregister from * completing, so it should not be held for long periods. * * Returns -ERRNO if the chip could not be got. */ int tpm_try_get_ops(struct tpm_chip *chip) { int rc = -EIO; get_device(&chip->dev); down_read(&chip->ops_sem); if (!chip->ops) goto out_ops; mutex_lock(&chip->tpm_mutex); rc = tpm_chip_start(chip); if (rc) goto out_lock; return 0; out_lock: mutex_unlock(&chip->tpm_mutex); out_ops: up_read(&chip->ops_sem); put_device(&chip->dev); return rc; } EXPORT_SYMBOL_GPL(tpm_try_get_ops); /** * tpm_put_ops() - Release a ref to the tpm_chip * @chip: Chip to put * * This is the opposite pair to tpm_try_get_ops(). After this returns chip may * be kfree'd. */ void tpm_put_ops(struct tpm_chip *chip) { tpm_chip_stop(chip); mutex_unlock(&chip->tpm_mutex); up_read(&chip->ops_sem); put_device(&chip->dev); } EXPORT_SYMBOL_GPL(tpm_put_ops); /** * tpm_default_chip() - find a TPM chip and get a reference to it */ struct tpm_chip *tpm_default_chip(void) { struct tpm_chip *chip, *res = NULL; int chip_num = 0; int chip_prev; mutex_lock(&idr_lock); do { chip_prev = chip_num; chip = idr_get_next(&dev_nums_idr, &chip_num); if (chip) { get_device(&chip->dev); res = chip; break; } } while (chip_prev != chip_num); mutex_unlock(&idr_lock); return res; } EXPORT_SYMBOL_GPL(tpm_default_chip); /** * tpm_find_get_ops() - find and reserve a TPM chip * @chip: a &struct tpm_chip instance, %NULL for the default chip * * Finds a TPM chip and reserves its class device and operations. The chip must * be released with tpm_put_ops() after use. * This function is for internal use only. It supports existing TPM callers * by accepting NULL, but those callers should be converted to pass in a chip * directly. * * Return: * A reserved &struct tpm_chip instance. * %NULL if a chip is not found. * %NULL if the chip is not available. */ struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip) { int rc; if (chip) { if (!tpm_try_get_ops(chip)) return chip; return NULL; } chip = tpm_default_chip(); if (!chip) return NULL; rc = tpm_try_get_ops(chip); /* release additional reference we got from tpm_default_chip() */ put_device(&chip->dev); if (rc) return NULL; return chip; } /** * tpm_dev_release() - free chip memory and the device number * @dev: the character device for the TPM chip * * This is used as the release function for the character device. */ static void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); mutex_lock(&idr_lock); idr_remove(&dev_nums_idr, chip->dev_num); mutex_unlock(&idr_lock); kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); kfree(chip->allocated_banks); kfree(chip); } /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. * * Issues a TPM2_Shutdown command prior to loss of power, as required by the * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code. * * Return: always 0 (i.e. success) */ int tpm_class_shutdown(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); down_write(&chip->ops_sem); if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } } chip->ops = NULL; up_write(&chip->ops_sem); return 0; } /** * tpm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: device to which the chip is associated * At this point pdev mst be initialized, but does not have to * be registered * @ops: struct tpm_class_ops instance * * Allocates a new struct tpm_chip instance and assigns a free * device number for it. Must be paired with put_device(&chip->dev). */ struct tpm_chip *tpm_chip_alloc(struct device *pdev, const struct tpm_class_ops *ops) { struct tpm_chip *chip; int rc; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return ERR_PTR(-ENOMEM); mutex_init(&chip->tpm_mutex); init_rwsem(&chip->ops_sem); chip->ops = ops; mutex_lock(&idr_lock); rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); mutex_unlock(&idr_lock); if (rc < 0) { dev_err(pdev, "No available tpm device numbers\n"); kfree(chip); return ERR_PTR(rc); } chip->dev_num = rc; device_initialize(&chip->dev); chip->dev.class = &tpm_class; chip->dev.release = tpm_dev_release; chip->dev.parent = pdev; chip->dev.groups = chip->groups; if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); if (rc) goto out; if (!pdev) chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); chip->cdev.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { rc = -ENOMEM; goto out; } chip->locality = -1; return chip; out: put_device(&chip->dev); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(tpm_chip_alloc); static void tpm_put_device(void *dev) { put_device(dev); } /** * tpmm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: parent device to which the chip is associated * @ops: struct tpm_class_ops instance * * Same as tpm_chip_alloc except devm is used to do the put_device */ struct tpm_chip *tpmm_chip_alloc(struct device *pdev, const struct tpm_class_ops *ops) { struct tpm_chip *chip; int rc; chip = tpm_chip_alloc(pdev, ops); if (IS_ERR(chip)) return chip; rc = devm_add_action_or_reset(pdev, tpm_put_device, &chip->dev); if (rc) return ERR_PTR(rc); dev_set_drvdata(pdev, chip); return chip; } EXPORT_SYMBOL_GPL(tpmm_chip_alloc); static int tpm_add_char_device(struct tpm_chip *chip) { int rc; rc = cdev_device_add(&chip->cdev, &chip->dev); if (rc) { dev_err(&chip->dev, "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", dev_name(&chip->dev), MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); return rc; } if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) { rc = tpm_devs_add(chip); if (rc) goto err_del_cdev; } /* Make the chip available. */ mutex_lock(&idr_lock); idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); return 0; err_del_cdev: cdev_device_del(&chip->cdev, &chip->dev); return rc; } static void tpm_del_char_device(struct tpm_chip *chip) { cdev_device_del(&chip->cdev, &chip->dev); /* Make the chip unavailable. */ mutex_lock(&idr_lock); idr_replace(&dev_nums_idr, NULL, chip->dev_num); mutex_unlock(&idr_lock); /* Make the driver uncallable. */ down_write(&chip->ops_sem); /* * Check if chip->ops is still valid: In case that the controller * drivers shutdown handler unregisters the controller in its * shutdown handler we are called twice and chip->ops to NULL. */ if (chip->ops) { if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } } chip->ops = NULL; } up_write(&chip->ops_sem); } static void tpm_del_legacy_sysfs(struct tpm_chip *chip) { struct attribute **i; if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) || tpm_is_firmware_upgrade(chip)) return; sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); for (i = chip->groups[0]->attrs; *i != NULL; ++i) sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name); } /* For compatibility with legacy sysfs paths we provide symlinks from the * parent dev directory to selected names within the tpm chip directory. Old * kernel versions created these files directly under the parent. */ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) { struct attribute **i; int rc; if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) || tpm_is_firmware_upgrade(chip)) return 0; rc = compat_only_sysfs_link_entry_to_kobj( &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL); if (rc && rc != -ENOENT) return rc; /* All the names from tpm-sysfs */ for (i = chip->groups[0]->attrs; *i != NULL; ++i) { rc = compat_only_sysfs_link_entry_to_kobj( &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL); if (rc) { tpm_del_legacy_sysfs(chip); return rc; } } return 0; } static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); /* Give back zero bytes, as TPM chip has not yet fully resumed: */ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) return 0; return tpm_get_random(chip, data, max); } static bool tpm_is_hwrng_enabled(struct tpm_chip *chip) { if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM)) return false; if (tpm_is_firmware_upgrade(chip)) return false; if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED) return false; return true; } static int tpm_add_hwrng(struct tpm_chip *chip) { if (!tpm_is_hwrng_enabled(chip)) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), "tpm-rng-%d", chip->dev_num); chip->hwrng.name = chip->hwrng_name; chip->hwrng.read = tpm_hwrng_read; return hwrng_register(&chip->hwrng); } static int tpm_get_pcr_allocation(struct tpm_chip *chip) { int rc; if (tpm_is_firmware_upgrade(chip)) return 0; rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? tpm2_get_pcr_allocation(chip) : tpm1_get_pcr_allocation(chip); if (rc > 0) return -ENODEV; return rc; } /* * tpm_chip_bootstrap() - Boostrap TPM chip after power on * @chip: TPM chip to use. * * Initialize TPM chip after power on. This a one-shot function: subsequent * calls will have no effect. */ int tpm_chip_bootstrap(struct tpm_chip *chip) { int rc; if (chip->flags & TPM_CHIP_FLAG_BOOTSTRAPPED) return 0; rc = tpm_chip_start(chip); if (rc) return rc; rc = tpm_auto_startup(chip); if (rc) goto stop; rc = tpm_get_pcr_allocation(chip); stop: tpm_chip_stop(chip); /* * Unconditionally set, as driver initialization should cease, when the * boostrapping process fails. */ chip->flags |= TPM_CHIP_FLAG_BOOTSTRAPPED; return rc; } EXPORT_SYMBOL_GPL(tpm_chip_bootstrap); /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. * * Creates a character device for the TPM chip and adds sysfs attributes for * the device. As the last step this function adds the chip to the list of TPM * chips available for in-kernel use. * * This function should be only called after the chip initialization is * complete. */ int tpm_chip_register(struct tpm_chip *chip) { int rc; rc = tpm_chip_bootstrap(chip); if (rc) return rc; tpm_sysfs_add_device(chip); tpm_bios_log_setup(chip); tpm_add_ppi(chip); rc = tpm_add_hwrng(chip); if (rc) goto out_ppi; rc = tpm_add_char_device(chip); if (rc) goto out_hwrng; rc = tpm_add_legacy_sysfs(chip); if (rc) { tpm_chip_unregister(chip); return rc; } return 0; out_hwrng: if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); out_ppi: tpm_bios_log_teardown(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_chip_register); /* * tpm_chip_unregister() - release the TPM driver * @chip: TPM chip to use. * * Takes the chip first away from the list of available TPM chips and then * cleans up all the resources reserved by tpm_chip_register(). * * Once this function returns the driver call backs in 'op's will not be * running and will no longer start. * * NOTE: This function should be only called before deinitializing chip * resources. */ void tpm_chip_unregister(struct tpm_chip *chip) { tpm_del_legacy_sysfs(chip); if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister);
linux-master
drivers/char/tpm/tpm-chip.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Copyright (C) 2013 Obsidian Research Corp * Jason Gunthorpe <[email protected]> * * Device file system interface to the TPM */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include "tpm.h" #include "tpm-dev.h" static struct workqueue_struct *tpm_dev_wq; static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, u8 *buf, size_t bufsiz) { struct tpm_header *header = (void *)buf; ssize_t ret, len; ret = tpm2_prepare_space(chip, space, buf, bufsiz); /* If the command is not implemented by the TPM, synthesize a * response with a TPM2_RC_COMMAND_CODE return for user-space. */ if (ret == -EOPNOTSUPP) { header->length = cpu_to_be32(sizeof(*header)); header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | TSS2_RESMGR_TPM_RC_LAYER); ret = sizeof(*header); } if (ret) goto out_rc; len = tpm_transmit(chip, buf, bufsiz); if (len < 0) ret = len; if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); out_rc: return ret ? ret : len; } static void tpm_dev_async_work(struct work_struct *work) { struct file_priv *priv = container_of(work, struct file_priv, async_work); ssize_t ret; mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; ret = tpm_try_get_ops(priv->chip); if (ret) { priv->response_length = ret; goto out; } ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); /* * If ret is > 0 then tpm_dev_transmit returned the size of the * response. If ret is < 0 then tpm_dev_transmit failed and * returned an error code. */ if (ret != 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } out: mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } static void user_reader_timeout(struct timer_list *t) { struct file_priv *priv = from_timer(priv, t, user_read_timer); pr_warn("TPM user space timeout is deprecated (pid=%d)\n", task_tgid_nr(current)); schedule_work(&priv->timeout_work); } static void tpm_timeout_work(struct work_struct *work) { struct file_priv *priv = container_of(work, struct file_priv, timeout_work); mutex_lock(&priv->buffer_mutex); priv->response_read = true; priv->response_length = 0; memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } void tpm_common_open(struct file *file, struct tpm_chip *chip, struct file_priv *priv, struct tpm_space *space) { priv->chip = chip; priv->space = space; priv->response_read = true; mutex_init(&priv->buffer_mutex); timer_setup(&priv->user_read_timer, user_reader_timeout, 0); INIT_WORK(&priv->timeout_work, tpm_timeout_work); INIT_WORK(&priv->async_work, tpm_dev_async_work); init_waitqueue_head(&priv->async_wait); file->private_data = priv; } ssize_t tpm_common_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct file_priv *priv = file->private_data; ssize_t ret_size = 0; int rc; mutex_lock(&priv->buffer_mutex); if (priv->response_length) { priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); if (ret_size <= 0) { priv->response_length = 0; goto out; } rc = copy_to_user(buf, priv->data_buffer + *off, ret_size); if (rc) { memset(priv->data_buffer, 0, TPM_BUFSIZE); priv->response_length = 0; ret_size = -EFAULT; } else { memset(priv->data_buffer + *off, 0, ret_size); priv->response_length -= ret_size; *off += ret_size; } } out: if (!priv->response_length) { *off = 0; del_timer_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); } mutex_unlock(&priv->buffer_mutex); return ret_size; } ssize_t tpm_common_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct file_priv *priv = file->private_data; int ret = 0; if (size > TPM_BUFSIZE) return -E2BIG; mutex_lock(&priv->buffer_mutex); /* Cannot perform a write until the read has cleared either via * tpm_read or a user_read_timer timeout. This also prevents split * buffered writes from blocking here. */ if ((!priv->response_read && priv->response_length) || priv->command_enqueued) { ret = -EBUSY; goto out; } if (copy_from_user(priv->data_buffer, buf, size)) { ret = -EFAULT; goto out; } if (size < 6 || size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) { ret = -EINVAL; goto out; } priv->response_length = 0; priv->response_read = false; *off = 0; /* * If in nonblocking mode schedule an async job to send * the command return the size. * In case of error the err code will be returned in * the subsequent read call. */ if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); mutex_unlock(&priv->buffer_mutex); return size; } /* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open. */ if (tpm_try_get_ops(priv->chip)) { ret = -EPIPE; goto out; } ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); if (ret > 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); ret = size; } out: mutex_unlock(&priv->buffer_mutex); return ret; } __poll_t tpm_common_poll(struct file *file, poll_table *wait) { struct file_priv *priv = file->private_data; __poll_t mask = 0; poll_wait(file, &priv->async_wait, wait); mutex_lock(&priv->buffer_mutex); /* * The response_length indicates if there is still response * (or part of it) to be consumed. Partial reads decrease it * by the number of bytes read, and write resets it the zero. */ if (priv->response_length) mask = EPOLLIN | EPOLLRDNORM; else mask = EPOLLOUT | EPOLLWRNORM; mutex_unlock(&priv->buffer_mutex); return mask; } /* * Called on file close */ void tpm_common_release(struct file *file, struct file_priv *priv) { flush_work(&priv->async_work); del_timer_sync(&priv->user_read_timer); flush_work(&priv->timeout_work); file->private_data = NULL; priv->response_length = 0; } int __init tpm_dev_common_init(void) { tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0); return !tpm_dev_wq ? -ENOMEM : 0; } void __exit tpm_dev_common_exit(void) { if (tpm_dev_wq) { destroy_workqueue(tpm_dev_wq); tpm_dev_wq = NULL; } }
linux-master
drivers/char/tpm/tpm-dev-common.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012,2013 Infineon Technologies * * Authors: * Peter Huewe <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This device driver implements the TPM interface as defined in * the TCG TPM Interface Spec version 1.2, revision 1.0 and the * Infineon I2C Protocol Stack Specification v0.20. * * It is based on the original tpm_tis device driver from Leendert van * Dorn and Kyleen Hall. */ #include <linux/i2c.h> #include <linux/module.h> #include <linux/wait.h> #include "tpm.h" #define TPM_I2C_INFINEON_BUFSIZE 1260 /* max. number of iterations after I2C NAK */ #define MAX_COUNT 3 #define SLEEP_DURATION_LOW 55 #define SLEEP_DURATION_HI 65 /* max. number of iterations after I2C NAK for 'long' commands * we need this especially for sending TPM_READY, since the cleanup after the * transtion to the ready state may take some time, but it is unpredictable * how long it will take. */ #define MAX_COUNT_LONG 50 #define SLEEP_DURATION_LONG_LOW 200 #define SLEEP_DURATION_LONG_HI 220 /* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */ #define SLEEP_DURATION_RESET_LOW 2400 #define SLEEP_DURATION_RESET_HI 2600 /* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */ #define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000) #define TPM_TIMEOUT_US_HI (TPM_TIMEOUT_US_LOW + 2000) /* expected value for DIDVID register */ #define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L #define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L enum i2c_chip_type { SLB9635, SLB9645, UNKNOWN, }; struct tpm_inf_dev { struct i2c_client *client; int locality; /* In addition to the data itself, the buffer must fit the 7-bit I2C * address and the direction bit. */ u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1]; struct tpm_chip *chip; enum i2c_chip_type chip_type; unsigned int adapterlimit; }; static struct tpm_inf_dev tpm_dev; /* * iic_tpm_read() - read from TPM register * @addr: register address to read from * @buffer: provided by caller * @len: number of bytes to read * * Read len bytes from TPM register and put them into * buffer (little-endian format, i.e. first byte is put into buffer[0]). * * NOTE: TPM is big-endian for multi-byte values. Multi-byte * values have to be swapped. * * NOTE: We can't unfortunately use the combined read/write functions * provided by the i2c core as the TPM currently does not support the * repeated start condition and due to it's special requirements. * The i2c_smbus* functions do not work for this chip. * * Return -EIO on error, 0 on success. */ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) { struct i2c_msg msg1 = { .addr = tpm_dev.client->addr, .len = 1, .buf = &addr }; struct i2c_msg msg2 = { .addr = tpm_dev.client->addr, .flags = I2C_M_RD, .len = len, .buf = buffer }; struct i2c_msg msgs[] = {msg1, msg2}; int rc = 0; int count; unsigned int msglen = len; /* Lock the adapter for the duration of the whole sequence. */ if (!tpm_dev.client->adapter->algo->master_xfer) return -EOPNOTSUPP; i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); if (tpm_dev.chip_type == SLB9645) { /* use a combined read for newer chips * unfortunately the smbus functions are not suitable due to * the 32 byte limit of the smbus. * retries should usually not be needed, but are kept just to * be on the safe side. */ for (count = 0; count < MAX_COUNT; count++) { rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2); if (rc > 0) break; /* break here to skip sleep */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); } } else { /* Expect to send one command message and one data message, but * support looping over each or both if necessary. */ while (len > 0) { /* slb9635 protocol should work in all cases */ for (count = 0; count < MAX_COUNT; count++) { rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1); if (rc > 0) break; /* break here to skip sleep */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); } if (rc <= 0) goto out; /* After the TPM has successfully received the register * address it needs some time, thus we're sleeping here * again, before retrieving the data */ for (count = 0; count < MAX_COUNT; count++) { if (tpm_dev.adapterlimit) { msglen = min_t(unsigned int, tpm_dev.adapterlimit, len); msg2.len = msglen; } usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1); if (rc > 0) { /* Since len is unsigned, make doubly * sure we do not underflow it. */ if (msglen > len) len = 0; else len -= msglen; msg2.buf += msglen; break; } /* If the I2C adapter rejected the request (e.g * when the quirk read_max_len < len) fall back * to a sane minimum value and try again. */ if (rc == -EOPNOTSUPP) tpm_dev.adapterlimit = I2C_SMBUS_BLOCK_MAX; } if (rc <= 0) goto out; } } out: i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* take care of 'guard time' */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); /* __i2c_transfer returns the number of successfully transferred * messages. * So rc should be greater than 0 here otherwise we have an error. */ if (rc <= 0) return -EIO; return 0; } static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, unsigned int sleep_low, unsigned int sleep_hi, u8 max_count) { int rc = -EIO; int count; struct i2c_msg msg1 = { .addr = tpm_dev.client->addr, .len = len + 1, .buf = tpm_dev.buf }; if (len > TPM_I2C_INFINEON_BUFSIZE) return -EINVAL; if (!tpm_dev.client->adapter->algo->master_xfer) return -EOPNOTSUPP; i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* prepend the 'register address' to the buffer */ tpm_dev.buf[0] = addr; memcpy(&(tpm_dev.buf[1]), buffer, len); /* * NOTE: We have to use these special mechanisms here and unfortunately * cannot rely on the standard behavior of i2c_transfer. * Even for newer chips the smbus functions are not * suitable due to the 32 byte limit of the smbus. */ for (count = 0; count < max_count; count++) { rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1); if (rc > 0) break; usleep_range(sleep_low, sleep_hi); } i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* take care of 'guard time' */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); /* __i2c_transfer returns the number of successfully transferred * messages. * So rc should be greater than 0 here otherwise we have an error. */ if (rc <= 0) return -EIO; return 0; } /* * iic_tpm_write() - write to TPM register * @addr: register address to write to * @buffer: containing data to be written * @len: number of bytes to write * * Write len bytes from provided buffer to TPM register (little * endian format, i.e. buffer[0] is written as first byte). * * NOTE: TPM is big-endian for multi-byte values. Multi-byte * values have to be swapped. * * NOTE: use this function instead of the iic_tpm_write_generic function. * * Return -EIO on error, 0 on success */ static int iic_tpm_write(u8 addr, u8 *buffer, size_t len) { return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW, SLEEP_DURATION_HI, MAX_COUNT); } /* * This function is needed especially for the cleanup situation after * sending TPM_READY * */ static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len) { return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW, SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG); } enum tis_access { TPM_ACCESS_VALID = 0x80, TPM_ACCESS_ACTIVE_LOCALITY = 0x20, TPM_ACCESS_REQUEST_PENDING = 0x04, TPM_ACCESS_REQUEST_USE = 0x02, }; enum tis_status { TPM_STS_VALID = 0x80, TPM_STS_COMMAND_READY = 0x40, TPM_STS_GO = 0x20, TPM_STS_DATA_AVAIL = 0x10, TPM_STS_DATA_EXPECT = 0x08, }; enum tis_defaults { TIS_SHORT_TIMEOUT = 750, /* ms */ TIS_LONG_TIMEOUT = 2000, /* 2 sec */ }; #define TPM_ACCESS(l) (0x0000 | ((l) << 4)) #define TPM_STS(l) (0x0001 | ((l) << 4)) #define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4)) #define TPM_DID_VID(l) (0x0006 | ((l) << 4)) static bool check_locality(struct tpm_chip *chip, int loc) { u8 buf; int rc; rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1); if (rc < 0) return false; if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { tpm_dev.locality = loc; return true; } return false; } /* implementation similar to tpm_tis */ static void release_locality(struct tpm_chip *chip, int loc, int force) { u8 buf; if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0) return; if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) { buf = TPM_ACCESS_ACTIVE_LOCALITY; iic_tpm_write(TPM_ACCESS(loc), &buf, 1); } } static int request_locality(struct tpm_chip *chip, int loc) { unsigned long stop; u8 buf = TPM_ACCESS_REQUEST_USE; if (check_locality(chip, loc)) return loc; iic_tpm_write(TPM_ACCESS(loc), &buf, 1); /* wait for burstcount */ stop = jiffies + chip->timeout_a; do { if (check_locality(chip, loc)) return loc; usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); } while (time_before(jiffies, stop)); return -ETIME; } static u8 tpm_tis_i2c_status(struct tpm_chip *chip) { /* NOTE: since I2C read may fail, return 0 in this case --> time-out */ u8 buf = 0xFF; u8 i = 0; do { if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0) return 0; i++; /* if locallity is set STS should not be 0xFF */ } while ((buf == 0xFF) && i < 10); return buf; } static void tpm_tis_i2c_ready(struct tpm_chip *chip) { /* this causes the current command to be aborted */ u8 buf = TPM_STS_COMMAND_READY; iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1); } static ssize_t get_burstcount(struct tpm_chip *chip) { unsigned long stop; ssize_t burstcnt; u8 buf[3]; /* wait for burstcount */ /* which timeout value, spec has 2 answers (c & d) */ stop = jiffies + chip->timeout_d; do { /* Note: STS is little endian */ if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0) burstcnt = 0; else burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0]; if (burstcnt) return burstcnt; usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); } while (time_before(jiffies, stop)); return -EBUSY; } static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, int *status) { unsigned long stop; /* check current status */ *status = tpm_tis_i2c_status(chip); if ((*status != 0xFF) && (*status & mask) == mask) return 0; stop = jiffies + timeout; do { /* since we just checked the status, give the TPM some time */ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI); *status = tpm_tis_i2c_status(chip); if ((*status & mask) == mask) return 0; } while (time_before(jiffies, stop)); return -ETIME; } static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { size_t size = 0; ssize_t burstcnt; u8 retries = 0; int rc; while (size < count) { burstcnt = get_burstcount(chip); /* burstcnt < 0 = TPM is busy */ if (burstcnt < 0) return burstcnt; /* limit received data to max. left */ if (burstcnt > (count - size)) burstcnt = count - size; rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality), &(buf[size]), burstcnt); if (rc == 0) size += burstcnt; else if (rc < 0) retries++; /* avoid endless loop in case of broken HW */ if (retries > MAX_COUNT_LONG) return -EIO; } return size; } static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; int status; u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; goto out; } /* read first 10 bytes, including tag, paramsize, and result */ size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { dev_err(&chip->dev, "Unable to read header\n"); goto out; } expected = be32_to_cpu(*(__be32 *)(buf + 2)); if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { size = -EIO; goto out; } size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } out: tpm_tis_i2c_ready(chip); /* The TPM needs some time to clean up here, * so we sleep rather than keeping the bus busy */ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); release_locality(chip, tpm_dev.locality, 0); return size; } static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) { int rc, status; ssize_t burstcnt; size_t count = 0; u8 retries = 0; u8 sts = TPM_STS_GO; if (len > TPM_I2C_INFINEON_BUFSIZE) return -E2BIG; if (request_locality(chip, 0) < 0) return -EBUSY; status = tpm_tis_i2c_status(chip); if ((status & TPM_STS_COMMAND_READY) == 0) { tpm_tis_i2c_ready(chip); if (wait_for_stat (chip, TPM_STS_COMMAND_READY, chip->timeout_b, &status) < 0) { rc = -ETIME; goto out_err; } } while (count < len - 1) { burstcnt = get_burstcount(chip); /* burstcnt < 0 = TPM is busy */ if (burstcnt < 0) return burstcnt; if (burstcnt > (len - 1 - count)) burstcnt = len - 1 - count; rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), burstcnt); if (rc == 0) count += burstcnt; else if (rc < 0) retries++; /* avoid endless loop in case of broken HW */ if (retries > MAX_COUNT_LONG) { rc = -EIO; goto out_err; } wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); if ((status & TPM_STS_DATA_EXPECT) == 0) { rc = -EIO; goto out_err; } } /* write last byte */ iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1); wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status); if ((status & TPM_STS_DATA_EXPECT) != 0) { rc = -EIO; goto out_err; } /* go and do it */ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1); return 0; out_err: tpm_tis_i2c_ready(chip); /* The TPM needs some time to clean up here, * so we sleep rather than keeping the bus busy */ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI); release_locality(chip, tpm_dev.locality, 0); return rc; } static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status) { return (status == TPM_STS_COMMAND_READY); } static const struct tpm_class_ops tpm_tis_i2c = { .flags = TPM_OPS_AUTO_STARTUP, .status = tpm_tis_i2c_status, .recv = tpm_tis_i2c_recv, .send = tpm_tis_i2c_send, .cancel = tpm_tis_i2c_ready, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = tpm_tis_i2c_req_canceled, }; static int tpm_tis_i2c_init(struct device *dev) { u32 vendor; int rc = 0; struct tpm_chip *chip; chip = tpmm_chip_alloc(dev, &tpm_tis_i2c); if (IS_ERR(chip)) return PTR_ERR(chip); /* Default timeouts */ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); if (request_locality(chip, 0) != 0) { dev_err(dev, "could not request locality\n"); rc = -ENODEV; goto out_err; } /* read four bytes from DID_VID register */ if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) { dev_err(dev, "could not read vendor id\n"); rc = -EIO; goto out_release; } if (vendor == TPM_TIS_I2C_DID_VID_9645) { tpm_dev.chip_type = SLB9645; } else if (vendor == TPM_TIS_I2C_DID_VID_9635) { tpm_dev.chip_type = SLB9635; } else { dev_err(dev, "vendor id did not match! ID was %08x\n", vendor); rc = -ENODEV; goto out_release; } dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16); tpm_dev.chip = chip; return tpm_chip_register(chip); out_release: release_locality(chip, tpm_dev.locality, 1); tpm_dev.client = NULL; out_err: return rc; } static const struct i2c_device_id tpm_tis_i2c_table[] = { {"tpm_i2c_infineon"}, {"slb9635tt"}, {"slb9645tt"}, {}, }; MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table); #ifdef CONFIG_OF static const struct of_device_id tpm_tis_i2c_of_match[] = { {.compatible = "infineon,tpm_i2c_infineon"}, {.compatible = "infineon,slb9635tt"}, {.compatible = "infineon,slb9645tt"}, {}, }; MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match); #endif static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume); static int tpm_tis_i2c_probe(struct i2c_client *client) { int rc; struct device *dev = &(client->dev); if (tpm_dev.client != NULL) { dev_err(dev, "This driver only supports one client at a time\n"); return -EBUSY; /* We only support one client */ } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "no algorithms associated to the i2c bus\n"); return -ENODEV; } tpm_dev.client = client; rc = tpm_tis_i2c_init(&client->dev); if (rc != 0) { tpm_dev.client = NULL; rc = -ENODEV; } return rc; } static void tpm_tis_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = tpm_dev.chip; tpm_chip_unregister(chip); release_locality(chip, tpm_dev.locality, 1); tpm_dev.client = NULL; } static struct i2c_driver tpm_tis_i2c_driver = { .id_table = tpm_tis_i2c_table, .probe = tpm_tis_i2c_probe, .remove = tpm_tis_i2c_remove, .driver = { .name = "tpm_i2c_infineon", .pm = &tpm_tis_i2c_ops, .of_match_table = of_match_ptr(tpm_tis_i2c_of_match), }, }; module_i2c_driver(tpm_tis_i2c_driver); MODULE_AUTHOR("Peter Huewe <[email protected]>"); MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver"); MODULE_VERSION("2.2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_i2c_infineon.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2004 IBM Corporation * Copyright (C) 2014 Intel Corporation * * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/freezer.h> #include <linux/tpm_eventlog.h> #include "tpm.h" #define TPM_MAX_ORDINAL 243 /* * Array with one entry per ordinal defining the maximum amount * of time the chip could take to return the result. The ordinal * designation of short, medium or long is defined in a table in * TCG Specification TPM Main Part 2 TPM Structures Section 17. The * values of the SHORT, MEDIUM, and LONG durations are retrieved * from the chip during initialization with a call to tpm_get_timeouts. */ static const u8 tpm1_ordinal_duration[TPM_MAX_ORDINAL] = { TPM_UNDEFINED, /* 0 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 5 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 10 */ TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_LONG, TPM_MEDIUM, /* 15 */ TPM_SHORT, TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_SHORT, /* 20 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, /* 25 */ TPM_SHORT, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 30 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 35 */ TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 40 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 45 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_LONG, TPM_MEDIUM, /* 50 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 55 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 60 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 65 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 70 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 75 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 80 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_LONG, TPM_SHORT, TPM_UNDEFINED, /* 85 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 90 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 95 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 100 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 105 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 110 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 115 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 120 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 125 */ TPM_SHORT, TPM_LONG, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 130 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_MEDIUM, TPM_UNDEFINED, /* 135 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 140 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 145 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 150 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 155 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 160 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, /* 165 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 170 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 175 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 180 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, /* 185 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 190 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 195 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 200 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 205 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 210 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, /* 215 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 220 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 225 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 230 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 235 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 240 */ TPM_UNDEFINED, TPM_MEDIUM, }; /** * tpm1_calc_ordinal_duration() - calculate the maximum command duration * @chip: TPM chip to use. * @ordinal: TPM command ordinal. * * The function returns the maximum amount of time the chip could take * to return the result for a particular ordinal in jiffies. * * Return: A maximal duration time for an ordinal in jiffies. */ unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { int duration_idx = TPM_UNDEFINED; int duration = 0; /* * We only have a duration table for protected commands, where the upper * 16 bits are 0. For the few other ordinals the fallback will be used. */ if (ordinal < TPM_MAX_ORDINAL) duration_idx = tpm1_ordinal_duration[ordinal]; if (duration_idx != TPM_UNDEFINED) duration = chip->duration[duration_idx]; if (duration <= 0) return 2 * 60 * HZ; else return duration; } #define TPM_ORD_STARTUP 153 #define TPM_ST_CLEAR 1 /** * tpm1_startup() - turn on the TPM * @chip: TPM chip to use * * Normally the firmware should start the TPM. This function is provided as a * workaround if this does not happen. A legal case for this could be for * example when a TPM emulator is used. * * Return: same as tpm_transmit_cmd() */ static int tpm1_startup(struct tpm_chip *chip) { struct tpm_buf buf; int rc; dev_info(&chip->dev, "starting up the TPM manually\n"); rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP); if (rc < 0) return rc; tpm_buf_append_u16(&buf, TPM_ST_CLEAR); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM"); tpm_buf_destroy(&buf); return rc; } int tpm1_get_timeouts(struct tpm_chip *chip) { cap_t cap; unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4]; unsigned long durations[3]; ssize_t rc; rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL, sizeof(cap.timeout)); if (rc == TPM_ERR_INVALID_POSTINIT) { if (tpm1_startup(chip)) return rc; rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, "attempting to determine the timeouts", sizeof(cap.timeout)); } if (rc) { dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n", rc); return rc; } timeout_old[0] = jiffies_to_usecs(chip->timeout_a); timeout_old[1] = jiffies_to_usecs(chip->timeout_b); timeout_old[2] = jiffies_to_usecs(chip->timeout_c); timeout_old[3] = jiffies_to_usecs(chip->timeout_d); timeout_chip[0] = be32_to_cpu(cap.timeout.a); timeout_chip[1] = be32_to_cpu(cap.timeout.b); timeout_chip[2] = be32_to_cpu(cap.timeout.c); timeout_chip[3] = be32_to_cpu(cap.timeout.d); memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff)); /* * Provide ability for vendor overrides of timeout values in case * of misreporting. */ if (chip->ops->update_timeouts) chip->ops->update_timeouts(chip, timeout_eff); if (!chip->timeout_adjusted) { /* Restore default if chip reported 0 */ unsigned int i; for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) { if (timeout_eff[i]) continue; timeout_eff[i] = timeout_old[i]; chip->timeout_adjusted = true; } if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) { /* timeouts in msec rather usec */ for (i = 0; i != ARRAY_SIZE(timeout_eff); i++) timeout_eff[i] *= 1000; chip->timeout_adjusted = true; } } /* Report adjusted timeouts */ if (chip->timeout_adjusted) { dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", timeout_chip[0], timeout_eff[0], timeout_chip[1], timeout_eff[1], timeout_chip[2], timeout_eff[2], timeout_chip[3], timeout_eff[3]); } chip->timeout_a = usecs_to_jiffies(timeout_eff[0]); chip->timeout_b = usecs_to_jiffies(timeout_eff[1]); chip->timeout_c = usecs_to_jiffies(timeout_eff[2]); chip->timeout_d = usecs_to_jiffies(timeout_eff[3]); rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap, "attempting to determine the durations", sizeof(cap.duration)); if (rc) return rc; chip->duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short)); chip->duration[TPM_MEDIUM] = usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium)); chip->duration[TPM_LONG] = usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long)); chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */ /* * Provide the ability for vendor overrides of duration values in case * of misreporting. */ if (chip->ops->update_durations) chip->ops->update_durations(chip, durations); if (chip->duration_adjusted) { dev_info(&chip->dev, HW_ERR "Adjusting reported durations."); chip->duration[TPM_SHORT] = durations[0]; chip->duration[TPM_MEDIUM] = durations[1]; chip->duration[TPM_LONG] = durations[2]; } /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above * value wrong and apparently reports msecs rather than usecs. So we * fix up the resulting too-small TPM_SHORT value to make things work. * We also scale the TPM_MEDIUM and -_LONG values by 1000. */ if (chip->duration[TPM_SHORT] < (HZ / 100)) { chip->duration[TPM_SHORT] = HZ; chip->duration[TPM_MEDIUM] *= 1000; chip->duration[TPM_LONG] *= 1000; chip->duration_adjusted = true; dev_info(&chip->dev, "Adjusting TPM timeout parameters."); } chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; return 0; } #define TPM_ORD_PCR_EXTEND 20 int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash, const char *log_msg) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND); if (rc) return rc; tpm_buf_append_u32(&buf, pcr_idx); tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE); rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, log_msg); tpm_buf_destroy(&buf); return rc; } #define TPM_ORD_GET_CAP 101 ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, const char *desc, size_t min_cap_length) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP); if (rc) return rc; if (subcap_id == TPM_CAP_VERSION_1_1 || subcap_id == TPM_CAP_VERSION_1_2) { tpm_buf_append_u32(&buf, subcap_id); tpm_buf_append_u32(&buf, 0); } else { if (subcap_id == TPM_CAP_FLAG_PERM || subcap_id == TPM_CAP_FLAG_VOL) tpm_buf_append_u32(&buf, TPM_CAP_FLAG); else tpm_buf_append_u32(&buf, TPM_CAP_PROP); tpm_buf_append_u32(&buf, 4); tpm_buf_append_u32(&buf, subcap_id); } rc = tpm_transmit_cmd(chip, &buf, min_cap_length, desc); if (!rc) *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4]; tpm_buf_destroy(&buf); return rc; } EXPORT_SYMBOL_GPL(tpm1_getcap); #define TPM_ORD_GET_RANDOM 70 struct tpm1_get_random_out { __be32 rng_data_len; u8 rng_data[TPM_MAX_RNG_DATA]; } __packed; /** * tpm1_get_random() - get random bytes from the TPM's RNG * @chip: a &struct tpm_chip instance * @dest: destination buffer for the random bytes * @max: the maximum number of bytes to write to @dest * * Return: * * number of bytes read * * -errno (positive TPM return codes are masked to -EIO) */ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max) { struct tpm1_get_random_out *out; u32 num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA); struct tpm_buf buf; u32 total = 0; int retries = 5; u32 recd; int rc; rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM); if (rc) return rc; do { tpm_buf_append_u32(&buf, num_bytes); rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len), "attempting get random"); if (rc) { if (rc > 0) rc = -EIO; goto out; } out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE]; recd = be32_to_cpu(out->rng_data_len); if (recd > num_bytes) { rc = -EFAULT; goto out; } if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + sizeof(out->rng_data_len) + recd) { rc = -EFAULT; goto out; } memcpy(dest, out->rng_data, recd); dest += recd; total += recd; num_bytes -= recd; tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM); } while (retries-- && total < max); rc = total ? (int)total : -EIO; out: tpm_buf_destroy(&buf); return rc; } #define TPM_ORD_PCRREAD 21 int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCRREAD); if (rc) return rc; tpm_buf_append_u32(&buf, pcr_idx); rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, "attempting to read a pcr value"); if (rc) goto out; if (tpm_buf_length(&buf) < TPM_DIGEST_SIZE) { rc = -EFAULT; goto out; } memcpy(res_buf, &buf.data[TPM_HEADER_SIZE], TPM_DIGEST_SIZE); out: tpm_buf_destroy(&buf); return rc; } #define TPM_ORD_CONTINUE_SELFTEST 83 /** * tpm1_continue_selftest() - run TPM's selftest * @chip: TPM chip to use * * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing * a TPM error code. */ static int tpm1_continue_selftest(struct tpm_chip *chip) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_CONTINUE_SELFTEST); if (rc) return rc; rc = tpm_transmit_cmd(chip, &buf, 0, "continue selftest"); tpm_buf_destroy(&buf); return rc; } /** * tpm1_do_selftest - have the TPM continue its selftest and wait until it * can receive further commands * @chip: TPM chip to use * * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing * a TPM error code. */ int tpm1_do_selftest(struct tpm_chip *chip) { int rc; unsigned int loops; unsigned int delay_msec = 100; unsigned long duration; u8 dummy[TPM_DIGEST_SIZE]; duration = tpm1_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); loops = jiffies_to_msecs(duration) / delay_msec; rc = tpm1_continue_selftest(chip); if (rc == TPM_ERR_INVALID_POSTINIT) { chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED; dev_info(&chip->dev, "TPM not ready (%d)\n", rc); } /* This may fail if there was no TPM driver during a suspend/resume * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) */ if (rc) return rc; do { /* Attempt to read a PCR value */ rc = tpm1_pcr_read(chip, 0, dummy); /* Some buggy TPMs will not respond to tpm_tis_ready() for * around 300ms while the self test is ongoing, keep trying * until the self test duration expires. */ if (rc == -ETIME) { dev_info(&chip->dev, HW_ERR "TPM command timed out during continue self test"); tpm_msleep(delay_msec); continue; } if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n", rc); /* TPM is disabled and/or deactivated; driver can * proceed and TPM does handle commands for * suspend/resume correctly */ return 0; } if (rc != TPM_WARN_DOING_SELFTEST) return rc; tpm_msleep(delay_msec); } while (--loops > 0); return rc; } EXPORT_SYMBOL_GPL(tpm1_do_selftest); /** * tpm1_auto_startup - Perform the standard automatic TPM initialization * sequence * @chip: TPM chip to use * * Returns 0 on success, < 0 in case of fatal error. */ int tpm1_auto_startup(struct tpm_chip *chip) { int rc; rc = tpm1_get_timeouts(chip); if (rc) goto out; rc = tpm1_do_selftest(chip); if (rc == TPM_ERR_FAILEDSELFTEST) { dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n"); /* A TPM in this state possibly allows or needs a firmware upgrade */ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE; return 0; } else if (rc) { dev_err(&chip->dev, "TPM self test failed\n"); goto out; } return rc; out: if (rc > 0) rc = -ENODEV; return rc; } #define TPM_ORD_SAVESTATE 152 /** * tpm1_pm_suspend() - pm suspend handler * @chip: TPM chip to use. * @tpm_suspend_pcr: flush pcr for buggy TPM chips. * * The functions saves the TPM state to be restored on resume. * * Return: * * 0 on success, * * < 0 on error. */ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) { u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; struct tpm_buf buf; unsigned int try; int rc; /* for buggy tpm, flush pcrs with extend to selected dummy */ if (tpm_suspend_pcr) rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash, "extending dummy pcr before suspend"); rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE); if (rc) return rc; /* now do the actual savestate */ for (try = 0; try < TPM_RETRY; try++) { rc = tpm_transmit_cmd(chip, &buf, 0, NULL); /* * If the TPM indicates that it is too busy to respond to * this command then retry before giving up. It can take * several seconds for this TPM to be ready. * * This can happen if the TPM has already been sent the * SaveState command before the driver has loaded. TCG 1.2 * specification states that any communication after SaveState * may cause the TPM to invalidate previously saved state. */ if (rc != TPM_WARN_RETRY) break; tpm_msleep(TPM_TIMEOUT_RETRY); tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE); } if (rc) dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n", rc); else if (try > 0) dev_warn(&chip->dev, "TPM savestate took %dms\n", try * TPM_TIMEOUT_RETRY); tpm_buf_destroy(&buf); return rc; } /** * tpm1_get_pcr_allocation() - initialize the allocated bank * @chip: TPM chip to use. * * The function initializes the SHA1 allocated bank to extend PCR * * Return: * * 0 on success, * * < 0 on error. */ int tpm1_get_pcr_allocation(struct tpm_chip *chip) { chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), GFP_KERNEL); if (!chip->allocated_banks) return -ENOMEM; chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; chip->nr_allocated_banks = 1; return 0; }
linux-master
drivers/char/tpm/tpm1-cmd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 [email protected] */ #include <linux/slab.h> #include "tpm-dev.h" struct tpmrm_priv { struct file_priv priv; struct tpm_space space; }; static int tpmrm_open(struct inode *inode, struct file *file) { struct tpm_chip *chip; struct tpmrm_priv *priv; int rc; chip = container_of(inode->i_cdev, struct tpm_chip, cdevs); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; } tpm_common_open(file, chip, &priv->priv, &priv->space); return 0; } static int tpmrm_release(struct inode *inode, struct file *file) { struct file_priv *fpriv = file->private_data; struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv); tpm_common_release(file, fpriv); tpm2_del_space(fpriv->chip, &priv->space); kfree(priv); return 0; } const struct file_operations tpmrm_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpmrm_open, .read = tpm_common_read, .write = tpm_common_write, .poll = tpm_common_poll, .release = tpmrm_release, };
linux-master
drivers/char/tpm/tpmrm-dev.c
// SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX, * based on the TCG TPM Interface Spec version 1.2. * Specifications at www.trustedcomputinggroup.org * * Copyright (C) 2011, Nuvoton Technology Corporation. * Dan Morav <[email protected]> * Copyright (C) 2013, Obsidian Research Corp. * Jason Gunthorpe <[email protected]> * * Nuvoton contact information: [email protected] *****************************************************************************/ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/of_device.h> #include "tpm.h" /* I2C interface offsets */ #define TPM_STS 0x00 #define TPM_BURST_COUNT 0x01 #define TPM_DATA_FIFO_W 0x20 #define TPM_DATA_FIFO_R 0x40 #define TPM_VID_DID_RID 0x60 #define TPM_I2C_RETRIES 5 /* * I2C bus device maximum buffer size w/o counting I2C address or command * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data */ #define TPM_I2C_MAX_BUF_SIZE 32 #define TPM_I2C_RETRY_COUNT 32 #define TPM_I2C_BUS_DELAY 1000 /* usec */ #define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */ #define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */ #define TPM_I2C_DELAY_RANGE 300 /* usec */ #define OF_IS_TPM2 ((void *)1) #define I2C_IS_TPM2 1 struct priv_data { int irq; unsigned int intrs; wait_queue_head_t read_queue; }; static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, u8 *data) { s32 status; status = i2c_smbus_read_i2c_block_data(client, offset, size, data); dev_dbg(&client->dev, "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, offset, size, (int)size, data, status); return status; } static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, u8 *data) { s32 status; status = i2c_smbus_write_i2c_block_data(client, offset, size, data); dev_dbg(&client->dev, "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, offset, size, (int)size, data, status); return status; } #define TPM_STS_VALID 0x80 #define TPM_STS_COMMAND_READY 0x40 #define TPM_STS_GO 0x20 #define TPM_STS_DATA_AVAIL 0x10 #define TPM_STS_EXPECT 0x08 #define TPM_STS_RESPONSE_RETRY 0x02 #define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */ #define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ #define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ /* read TPM_STS register */ static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) { struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; u8 data; status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); if (status <= 0) { dev_err(&chip->dev, "%s() error return %d\n", __func__, status); data = TPM_STS_ERR_VAL; } return data; } /* write byte to TPM_STS register */ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) { s32 status; int i; /* this causes the current command to be aborted */ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); if (status < 0) usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY + TPM_I2C_DELAY_RANGE); } return status; } /* write commandReady to TPM_STS register */ static void i2c_nuvoton_ready(struct tpm_chip *chip) { struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; /* this causes the current command to be aborted */ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); if (status < 0) dev_err(&chip->dev, "%s() fail to write TPM_STS.commandReady\n", __func__); } /* read burstCount field from TPM_STS register * return -1 on fail to read */ static int i2c_nuvoton_get_burstcount(struct i2c_client *client, struct tpm_chip *chip) { unsigned long stop = jiffies + chip->timeout_d; s32 status; int burst_count = -1; u8 data; /* wait for burstcount to be non-zero */ do { /* in I2C burstCount is 1 byte */ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1, &data); if (status > 0 && data > 0) { burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); break; } usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY + TPM_I2C_DELAY_RANGE); } while (time_before(jiffies, stop)); return burst_count; } /* * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail * any call to this function which is not waiting for dataAvail will * set queue to NULL to avoid waiting for interrupt */ static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) { u8 status = i2c_nuvoton_read_status(chip); return (status != TPM_STS_ERR_VAL) && ((status & mask) == value); } static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, u32 timeout, wait_queue_head_t *queue) { if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) { s32 rc; struct priv_data *priv = dev_get_drvdata(&chip->dev); unsigned int cur_intrs = priv->intrs; enable_irq(priv->irq); rc = wait_event_interruptible_timeout(*queue, cur_intrs != priv->intrs, timeout); if (rc > 0) return 0; /* At this point we know that the SINT pin is asserted, so we * do not need to do i2c_nuvoton_check_status */ } else { unsigned long ten_msec, stop; bool status_valid; /* check current status */ status_valid = i2c_nuvoton_check_status(chip, mask, value); if (status_valid) return 0; /* use polling to wait for the event */ ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); stop = jiffies + timeout; do { if (time_before(jiffies, ten_msec)) usleep_range(TPM_I2C_RETRY_DELAY_SHORT, TPM_I2C_RETRY_DELAY_SHORT + TPM_I2C_DELAY_RANGE); else usleep_range(TPM_I2C_RETRY_DELAY_LONG, TPM_I2C_RETRY_DELAY_LONG + TPM_I2C_DELAY_RANGE); status_valid = i2c_nuvoton_check_status(chip, mask, value); if (status_valid) return 0; } while (time_before(jiffies, stop)); } dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, value); return -ETIMEDOUT; } /* wait for dataAvail field to be set in the TPM_STS register */ static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, wait_queue_head_t *queue) { return i2c_nuvoton_wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, TPM_STS_DATA_AVAIL | TPM_STS_VALID, timeout, queue); } /* Read @count bytes into @buf from TPM_RD_FIFO register */ static int i2c_nuvoton_recv_data(struct i2c_client *client, struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = dev_get_drvdata(&chip->dev); s32 rc; int burst_count, bytes2read, size = 0; while (size < count && i2c_nuvoton_wait_for_data_avail(chip, chip->timeout_c, &priv->read_queue) == 0) { burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { dev_err(&chip->dev, "%s() fail to read burstCount=%d\n", __func__, burst_count); return -EIO; } bytes2read = min_t(size_t, burst_count, count - size); rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, bytes2read, &buf[size]); if (rc < 0) { dev_err(&chip->dev, "%s() fail on i2c_nuvoton_read_buf()=%d\n", __func__, rc); return -EIO; } dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); size += bytes2read; } return size; } /* Read TPM command results */ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; int status; int burst_count; int retries; int size = 0; u32 expected; if (count < TPM_HEADER_SIZE) { i2c_nuvoton_ready(chip); /* return to idle */ dev_err(dev, "%s() count < header size\n", __func__); return -EIO; } for (retries = 0; retries < TPM_I2C_RETRIES; retries++) { if (retries > 0) { /* if this is not the first trial, set responseRetry */ i2c_nuvoton_write_status(client, TPM_STS_RESPONSE_RETRY); } /* * read first available (> 10 bytes), including: * tag, paramsize, and result */ status = i2c_nuvoton_wait_for_data_avail( chip, chip->timeout_c, &priv->read_queue); if (status != 0) { dev_err(dev, "%s() timeout on dataAvail\n", __func__); size = -ETIMEDOUT; continue; } burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { dev_err(dev, "%s() fail to get burstCount\n", __func__); size = -EIO; continue; } size = i2c_nuvoton_recv_data(client, chip, buf, burst_count); if (size < TPM_HEADER_SIZE) { dev_err(dev, "%s() fail to read header\n", __func__); size = -EIO; continue; } /* * convert number of expected bytes field from big endian 32 bit * to machine native */ expected = be32_to_cpu(*(__be32 *) (buf + 2)); if (expected > count || expected < size) { dev_err(dev, "%s() expected > count\n", __func__); size = -EIO; continue; } rc = i2c_nuvoton_recv_data(client, chip, &buf[size], expected - size); size += rc; if (rc < 0 || size < expected) { dev_err(dev, "%s() fail to read remainder of result\n", __func__); size = -EIO; continue; } if (i2c_nuvoton_wait_for_stat( chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, TPM_STS_VALID, chip->timeout_c, NULL)) { dev_err(dev, "%s() error left over data\n", __func__); size = -ETIMEDOUT; continue; } break; } i2c_nuvoton_ready(chip); dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); return size; } /* * Send TPM command. * * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; unsigned long duration; size_t count = 0; int burst_count, bytes2write, retries, rc = -EIO; for (retries = 0; retries < TPM_RETRY; retries++) { i2c_nuvoton_ready(chip); if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, TPM_STS_COMMAND_READY, chip->timeout_b, NULL)) { dev_err(dev, "%s() timeout on commandReady\n", __func__); rc = -EIO; continue; } rc = 0; while (count < len - 1) { burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { dev_err(dev, "%s() fail get burstCount\n", __func__); rc = -EIO; break; } bytes2write = min_t(size_t, burst_count, len - 1 - count); rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, bytes2write, &buf[count]); if (rc < 0) { dev_err(dev, "%s() fail i2cWriteBuf\n", __func__); break; } dev_dbg(dev, "%s(%d):", __func__, bytes2write); count += bytes2write; rc = i2c_nuvoton_wait_for_stat(chip, TPM_STS_VALID | TPM_STS_EXPECT, TPM_STS_VALID | TPM_STS_EXPECT, chip->timeout_c, NULL); if (rc < 0) { dev_err(dev, "%s() timeout on Expect\n", __func__); rc = -ETIMEDOUT; break; } } if (rc < 0) continue; /* write last byte */ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, &buf[count]); if (rc < 0) { dev_err(dev, "%s() fail to write last byte\n", __func__); rc = -EIO; continue; } dev_dbg(dev, "%s(last): %02x", __func__, buf[count]); rc = i2c_nuvoton_wait_for_stat(chip, TPM_STS_VALID | TPM_STS_EXPECT, TPM_STS_VALID, chip->timeout_c, NULL); if (rc) { dev_err(dev, "%s() timeout on Expect to clear\n", __func__); rc = -ETIMEDOUT; continue; } break; } if (rc < 0) { /* retries == TPM_RETRY */ i2c_nuvoton_ready(chip); return rc; } /* execute the TPM command */ rc = i2c_nuvoton_write_status(client, TPM_STS_GO); if (rc < 0) { dev_err(dev, "%s() fail to write Go\n", __func__); i2c_nuvoton_ready(chip); return rc; } ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); duration = tpm_calc_ordinal_duration(chip, ordinal); rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue); if (rc) { dev_err(dev, "%s() timeout command duration %ld\n", __func__, duration); i2c_nuvoton_ready(chip); return rc; } dev_dbg(dev, "%s() -> %zd\n", __func__, len); return 0; } static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) { return (status == TPM_STS_COMMAND_READY); } static const struct tpm_class_ops tpm_i2c = { .flags = TPM_OPS_AUTO_STARTUP, .status = i2c_nuvoton_read_status, .recv = i2c_nuvoton_recv, .send = i2c_nuvoton_send, .cancel = i2c_nuvoton_ready, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = i2c_nuvoton_req_canceled, }; /* The only purpose for the handler is to signal to any waiting threads that * the interrupt is currently being asserted. The driver does not do any * processing triggered by interrupts, and the chip provides no way to mask at * the source (plus that would be slow over I2C). Run the IRQ as a one-shot, * this means it cannot be shared. */ static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) { struct tpm_chip *chip = dev_id; struct priv_data *priv = dev_get_drvdata(&chip->dev); priv->intrs++; wake_up(&priv->read_queue); disable_irq_nosync(priv->irq); return IRQ_HANDLED; } static int get_vid(struct i2c_client *client, u32 *res) { static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe }; u32 temp; s32 rc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp); if (rc < 0) return rc; /* check WPCT301 values - ignore RID */ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) { /* * f/w rev 2.81 has an issue where the VID_DID_RID is not * reporting the right value. so give it another chance at * offset 0x20 (FIFO_W). */ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4, (u8 *) (&temp)); if (rc < 0) return rc; /* check WPCT301 values - ignore RID */ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) return -ENODEV; } *res = temp; return 0; } static int i2c_nuvoton_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); int rc; struct tpm_chip *chip; struct device *dev = &client->dev; struct priv_data *priv; u32 vid = 0; rc = get_vid(client, &vid); if (rc) return rc; dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid, (u8) (vid >> 16), (u8) (vid >> 24)); chip = tpmm_chip_alloc(dev, &tpm_i2c); if (IS_ERR(chip)) return PTR_ERR(chip); priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); if (!priv) return -ENOMEM; if (dev->of_node) { const struct of_device_id *of_id; of_id = of_match_device(dev->driver->of_match_table, dev); if (of_id && of_id->data == OF_IS_TPM2) chip->flags |= TPM_CHIP_FLAG_TPM2; } else if (id->driver_data == I2C_IS_TPM2) chip->flags |= TPM_CHIP_FLAG_TPM2; init_waitqueue_head(&priv->read_queue); /* Default timeouts */ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); dev_set_drvdata(&chip->dev, priv); /* * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT * The IRQ should be set in the i2c_board_info (which is done * automatically in of_i2c_register_devices, for device tree users */ priv->irq = client->irq; if (client->irq) { dev_dbg(dev, "%s() priv->irq\n", __func__); rc = devm_request_irq(dev, client->irq, i2c_nuvoton_int_handler, IRQF_TRIGGER_LOW, dev_name(&chip->dev), chip); if (rc) { dev_err(dev, "%s() Unable to request irq: %d for use\n", __func__, priv->irq); priv->irq = 0; } else { chip->flags |= TPM_CHIP_FLAG_IRQ; /* Clear any pending interrupt */ i2c_nuvoton_ready(chip); /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ rc = i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, TPM_STS_COMMAND_READY, chip->timeout_b, NULL); if (rc == 0) { /* * TIS is in ready state * write dummy byte to enter reception state * TPM_DATA_FIFO_W <- rc (0) */ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, (u8 *) (&rc)); if (rc < 0) return rc; /* TPM_STS <- 0x40 (commandReady) */ i2c_nuvoton_ready(chip); } else { /* * timeout_b reached - command was * aborted. TIS should now be in idle state - * only TPM_STS_VALID should be set */ if (i2c_nuvoton_read_status(chip) != TPM_STS_VALID) return -EIO; } } } return tpm_chip_register(chip); } static void i2c_nuvoton_remove(struct i2c_client *client) { struct tpm_chip *chip = i2c_get_clientdata(client); tpm_chip_unregister(chip); } static const struct i2c_device_id i2c_nuvoton_id[] = { {"tpm_i2c_nuvoton"}, {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2}, {} }; MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); #ifdef CONFIG_OF static const struct of_device_id i2c_nuvoton_of_match[] = { {.compatible = "nuvoton,npct501"}, {.compatible = "winbond,wpct301"}, {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2}, {}, }; MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); #endif static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume); static struct i2c_driver i2c_nuvoton_driver = { .id_table = i2c_nuvoton_id, .probe = i2c_nuvoton_probe, .remove = i2c_nuvoton_remove, .driver = { .name = "tpm_i2c_nuvoton", .pm = &i2c_nuvoton_pm_ops, .of_match_table = of_match_ptr(i2c_nuvoton_of_match), }, }; module_i2c_driver(i2c_nuvoton_driver); MODULE_AUTHOR("Dan Morav ([email protected])"); MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_i2c_nuvoton.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2014-2021 Nuvoton Technology corporation * Copyright (C) 2019-2022 Infineon Technologies AG * * This device driver implements the TPM interface as defined in the TCG PC * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04 * Revision 14. * * It is based on the tpm_tis_spi device driver. */ #include <linux/i2c.h> #include <linux/crc-ccitt.h> #include "tpm_tis_core.h" /* TPM registers */ #define TPM_I2C_LOC_SEL 0x00 #define TPM_I2C_ACCESS 0x04 #define TPM_I2C_INTERFACE_CAPABILITY 0x30 #define TPM_I2C_DEVICE_ADDRESS 0x38 #define TPM_I2C_DATA_CSUM_ENABLE 0x40 #define TPM_DATA_CSUM 0x44 #define TPM_I2C_DID_VID 0x48 #define TPM_I2C_RID 0x4C /* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */ #define TPM_LOC_SEL 0x0FFF /* Mask to extract the I2C register from TIS register addresses */ #define TPM_TIS_REGISTER_MASK 0x0FFF /* Default Guard Time of 250µs until interface capability register is read */ #define GUARD_TIME_DEFAULT_MIN 250 #define GUARD_TIME_DEFAULT_MAX 300 /* Guard Time of 250µs after I2C slave NACK */ #define GUARD_TIME_ERR_MIN 250 #define GUARD_TIME_ERR_MAX 300 /* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */ #define TPM_GUARD_TIME_SR_MASK 0x40000000 #define TPM_GUARD_TIME_RR_MASK 0x00100000 #define TPM_GUARD_TIME_RW_MASK 0x00080000 #define TPM_GUARD_TIME_WR_MASK 0x00040000 #define TPM_GUARD_TIME_WW_MASK 0x00020000 #define TPM_GUARD_TIME_MIN_MASK 0x0001FE00 #define TPM_GUARD_TIME_MIN_SHIFT 9 /* Masks with bits that must be read zero */ #define TPM_ACCESS_READ_ZERO 0x48 #define TPM_INT_ENABLE_ZERO 0x7FFFFF60 #define TPM_STS_READ_ZERO 0x23 #define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000 #define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000 struct tpm_tis_i2c_phy { struct tpm_tis_data priv; struct i2c_client *i2c_client; bool guard_time_read; bool guard_time_write; u16 guard_time_min; u16 guard_time_max; u8 *io_buf; }; static inline struct tpm_tis_i2c_phy * to_tpm_tis_i2c_phy(struct tpm_tis_data *data) { return container_of(data, struct tpm_tis_i2c_phy, priv); } /* * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation * of Register Space for FIFO TPM Access" of the TCG PC Client PTP * Specification. In order for this code to work together with tpm_tis_core, * those addresses need to mapped to the registers defined for I2C TPMs in * Table 51 "I2C-TPM Register Overview". * * For most addresses this can be done by simply stripping off the locality * information from the address. A few addresses need to be mapped explicitly, * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is * only defined for I2C TPMs and is also mapped explicitly here to distinguish * it from TPM_ACCESS(0). * * Locality information is ignored, since this driver assumes exclusive access * to the TPM and always uses locality 0. */ static u8 tpm_tis_i2c_address_to_register(u32 addr) { addr &= TPM_TIS_REGISTER_MASK; switch (addr) { case TPM_ACCESS(0): return TPM_I2C_ACCESS; case TPM_LOC_SEL: return TPM_I2C_LOC_SEL; case TPM_DID_VID(0): return TPM_I2C_DID_VID; case TPM_RID(0): return TPM_I2C_RID; default: return addr; } } static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data, struct i2c_msg *msg) { struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data); bool guard_time; int i = 0; int ret; if (msg->flags & I2C_M_RD) guard_time = phy->guard_time_read; else guard_time = phy->guard_time_write; do { ret = i2c_transfer(phy->i2c_client->adapter, msg, 1); if (ret < 0) usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX); else if (guard_time) usleep_range(phy->guard_time_min, phy->guard_time_max); /* retry on TPM NACK */ } while (ret < 0 && i++ < TPM_RETRY); return ret; } /* Check that bits which must be read zero are not set */ static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf) { u32 zero_mask; u32 value; switch (len) { case sizeof(u8): value = buf[0]; break; case sizeof(u16): value = le16_to_cpup((__le16 *)buf); break; case sizeof(u32): value = le32_to_cpup((__le32 *)buf); break; default: /* unknown length, skip check */ return 0; } switch (reg) { case TPM_I2C_ACCESS: zero_mask = TPM_ACCESS_READ_ZERO; break; case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK: zero_mask = TPM_INT_ENABLE_ZERO; break; case TPM_STS(0) & TPM_TIS_REGISTER_MASK: zero_mask = TPM_STS_READ_ZERO; break; case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK: zero_mask = TPM_INTF_CAPABILITY_ZERO; break; case TPM_I2C_INTERFACE_CAPABILITY: zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO; break; default: /* unknown register, skip check */ return 0; } if (unlikely((value & zero_mask) != 0x00)) { pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value); return -EIO; } return 0; } static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data); struct i2c_msg msg = { .addr = phy->i2c_client->addr }; u8 reg = tpm_tis_i2c_address_to_register(addr); int i; int ret; for (i = 0; i < TPM_RETRY; i++) { u16 read = 0; while (read < len) { /* write register */ msg.len = sizeof(reg); msg.buf = &reg; msg.flags = 0; ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg); if (ret < 0) return ret; /* read data */ msg.buf = result + read; msg.len = len - read; msg.flags = I2C_M_RD; if (msg.len > I2C_SMBUS_BLOCK_MAX) msg.len = I2C_SMBUS_BLOCK_MAX; ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg); if (ret < 0) return ret; read += msg.len; } ret = tpm_tis_i2c_sanity_check_read(reg, len, result); if (ret == 0) return 0; usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX); } return ret; } static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data); struct i2c_msg msg = { .addr = phy->i2c_client->addr }; u8 reg = tpm_tis_i2c_address_to_register(addr); int ret; u16 wrote = 0; if (len > TPM_BUFSIZE - 1) return -EIO; phy->io_buf[0] = reg; msg.buf = phy->io_buf; while (wrote < len) { /* write register and data in one go */ msg.len = sizeof(reg) + len - wrote; if (msg.len > I2C_SMBUS_BLOCK_MAX) msg.len = I2C_SMBUS_BLOCK_MAX; memcpy(phy->io_buf + sizeof(reg), value + wrote, msg.len - sizeof(reg)); ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg); if (ret < 0) return ret; wrote += msg.len - sizeof(reg); } return 0; } static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len, const u8 *value) { u16 crc_tpm, crc_host; int rc; rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm); if (rc < 0) return rc; /* reflect crc result, regardless of host endianness */ crc_host = swab16(crc_ccitt(0, value, len)); if (crc_tpm != crc_host) return -EIO; return 0; } /* * Guard Time: * After each I2C operation, the TPM might require the master to wait. * The time period is vendor-specific and must be read from the * TPM_I2C_INTERFACE_CAPABILITY register. * * Before the Guard Time is read (or after the TPM failed to send an I2C NACK), * a Guard Time of 250µs applies. * * Various flags in the same register indicate if a guard time is needed: * - SR: <I2C read with repeated start> <guard time> <I2C read> * - RR: <I2C read> <guard time> <I2C read> * - RW: <I2C read> <guard time> <I2C write> * - WR: <I2C write> <guard time> <I2C read> * - WW: <I2C write> <guard time> <I2C write> * * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME */ static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy) { u32 i2c_caps; int ret; phy->guard_time_read = true; phy->guard_time_write = true; phy->guard_time_min = GUARD_TIME_DEFAULT_MIN; phy->guard_time_max = GUARD_TIME_DEFAULT_MAX; ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY, sizeof(i2c_caps), (u8 *)&i2c_caps, TPM_TIS_PHYS_32); if (ret) return ret; phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) || (i2c_caps & TPM_GUARD_TIME_RW_MASK); phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) || (i2c_caps & TPM_GUARD_TIME_WW_MASK); phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >> TPM_GUARD_TIME_MIN_SHIFT; /* guard_time_max = guard_time_min * 1.2 */ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5; return 0; } static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = { .read_bytes = tpm_tis_i2c_read_bytes, .write_bytes = tpm_tis_i2c_write_bytes, .verify_crc = tpm_tis_i2c_verify_crc, }; static int tpm_tis_i2c_probe(struct i2c_client *dev) { struct tpm_tis_i2c_phy *phy; const u8 crc_enable = 1; const u8 locality = 0; int ret; phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL); if (!phy->io_buf) return -ENOMEM; set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags); phy->i2c_client = dev; /* must precede all communication with the tpm */ ret = tpm_tis_i2c_init_guard_time(phy); if (ret) return ret; ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality), &locality, TPM_TIS_PHYS_8); if (ret) return ret; ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE, sizeof(crc_enable), &crc_enable, TPM_TIS_PHYS_8); if (ret) return ret; return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops, NULL); } static void tpm_tis_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = i2c_get_clientdata(client); tpm_chip_unregister(chip); tpm_tis_remove(chip); } static const struct i2c_device_id tpm_tis_i2c_id[] = { { "tpm_tis_i2c", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id); #ifdef CONFIG_OF static const struct of_device_id of_tis_i2c_match[] = { { .compatible = "infineon,slb9673", }, {} }; MODULE_DEVICE_TABLE(of, of_tis_i2c_match); #endif static struct i2c_driver tpm_tis_i2c_driver = { .driver = { .name = "tpm_tis_i2c", .pm = &tpm_tis_pm, .of_match_table = of_match_ptr(of_tis_i2c_match), }, .probe = tpm_tis_i2c_probe, .remove = tpm_tis_i2c_remove, .id_table = tpm_tis_i2c_id, }; module_i2c_driver(tpm_tis_i2c_driver); MODULE_DESCRIPTION("TPM Driver for native I2C access"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis_i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Copyright (C) 2013 Obsidian Research Corp * Jason Gunthorpe <[email protected]> * * sysfs filesystem inspection interface to the TPM */ #include <linux/device.h> #include "tpm.h" struct tpm_readpubek_out { u8 algorithm[4]; u8 encscheme[2]; u8 sigscheme[2]; __be32 paramsize; u8 parameters[12]; __be32 keysize; u8 modulus[256]; u8 checksum[20]; } __packed; #define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256) #define TPM_ORD_READPUBEK 124 static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_buf tpm_buf; struct tpm_readpubek_out *out; int i; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); char anti_replay[20]; memset(&anti_replay, 0, sizeof(anti_replay)); if (tpm_try_get_ops(chip)) return 0; if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK)) goto out_ops; tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay)); if (tpm_transmit_cmd(chip, &tpm_buf, READ_PUBEK_RESULT_MIN_BODY_SIZE, "attempting to read the PUBEK")) goto out_buf; out = (struct tpm_readpubek_out *)&tpm_buf.data[10]; str += sprintf(str, "Algorithm: %4ph\n" "Encscheme: %2ph\n" "Sigscheme: %2ph\n" "Parameters: %12ph\n" "Modulus length: %d\n" "Modulus:\n", out->algorithm, out->encscheme, out->sigscheme, out->parameters, be32_to_cpu(out->keysize)); for (i = 0; i < 256; i += 16) str += sprintf(str, "%16ph\n", &out->modulus[i]); out_buf: tpm_buf_destroy(&tpm_buf); out_ops: tpm_put_ops(chip); return str - buf; } static DEVICE_ATTR_RO(pubek); static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; u8 digest[TPM_DIGEST_SIZE]; u32 i, j, num_pcrs; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap, "attempting to determine the number of PCRS", sizeof(cap.num_pcrs))) { tpm_put_ops(chip); return 0; } num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { if (tpm1_pcr_read(chip, i, digest)) { str = buf; break; } str += sprintf(str, "PCR-%02d: ", i); for (j = 0; j < TPM_DIGEST_SIZE; j++) str += sprintf(str, "%02X ", digest[j]); str += sprintf(str, "\n"); } tpm_put_ops(chip); return str - buf; } static DEVICE_ATTR_RO(pcrs); static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); ssize_t rc = 0; cap_t cap; if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent enabled state", sizeof(cap.perm_flags))) goto out_ops; rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); out_ops: tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(enabled); static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); ssize_t rc = 0; cap_t cap; if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent active state", sizeof(cap.perm_flags))) goto out_ops; rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); out_ops: tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(active); static ssize_t owned_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); ssize_t rc = 0; cap_t cap; if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, "attempting to determine the owner state", sizeof(cap.owned))) goto out_ops; rc = sprintf(buf, "%d\n", cap.owned); out_ops: tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(owned); static ssize_t temp_deactivated_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); ssize_t rc = 0; cap_t cap; if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, "attempting to determine the temporary state", sizeof(cap.stclear_flags))) goto out_ops; rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); out_ops: tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(temp_deactivated); static ssize_t caps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); struct tpm1_version *version; ssize_t rc = 0; char *str = buf; cap_t cap; if (tpm_try_get_ops(chip)) return 0; if (tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer", sizeof(cap.manufacturer_id))) goto out_ops; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); /* TPM 1.2 */ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version", sizeof(cap.version2))) { version = &cap.version2.version; goto out_print; } /* TPM 1.1 */ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap, "attempting to determine the 1.1 version", sizeof(cap.version1))) { goto out_ops; } version = &cap.version1; out_print: str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", version->major, version->minor, version->rev_major, version->rev_minor); rc = str - buf; out_ops: tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(caps); static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpm_chip *chip = to_tpm_chip(dev); if (tpm_try_get_ops(chip)) return 0; chip->ops->cancel(chip); tpm_put_ops(chip); return count; } static DEVICE_ATTR_WO(cancel); static ssize_t durations_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); if (chip->duration[TPM_LONG] == 0) return 0; return sprintf(buf, "%d %d %d [%s]\n", jiffies_to_usecs(chip->duration[TPM_SHORT]), jiffies_to_usecs(chip->duration[TPM_MEDIUM]), jiffies_to_usecs(chip->duration[TPM_LONG]), chip->duration_adjusted ? "adjusted" : "original"); } static DEVICE_ATTR_RO(durations); static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); return sprintf(buf, "%d %d %d %d [%s]\n", jiffies_to_usecs(chip->timeout_a), jiffies_to_usecs(chip->timeout_b), jiffies_to_usecs(chip->timeout_c), jiffies_to_usecs(chip->timeout_d), chip->timeout_adjusted ? "adjusted" : "original"); } static DEVICE_ATTR_RO(timeouts); static ssize_t tpm_version_major_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2 ? "2" : "1"); } static DEVICE_ATTR_RO(tpm_version_major); static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, &dev_attr_active.attr, &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, &dev_attr_tpm_version_major.attr, NULL, }; static struct attribute *tpm2_dev_attrs[] = { &dev_attr_tpm_version_major.attr, NULL }; static const struct attribute_group tpm1_dev_group = { .attrs = tpm1_dev_attrs, }; static const struct attribute_group tpm2_dev_group = { .attrs = tpm2_dev_attrs, }; struct tpm_pcr_attr { int alg_id; int pcr; struct device_attribute attr; }; #define to_tpm_pcr_attr(a) container_of(a, struct tpm_pcr_attr, attr) static ssize_t pcr_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_pcr_attr *ha = to_tpm_pcr_attr(attr); struct tpm_chip *chip = to_tpm_chip(dev); struct tpm_digest digest; int i; int digest_size = 0; int rc; char *str = buf; for (i = 0; i < chip->nr_allocated_banks; i++) if (ha->alg_id == chip->allocated_banks[i].alg_id) digest_size = chip->allocated_banks[i].digest_size; /* should never happen */ if (!digest_size) return -EINVAL; digest.alg_id = ha->alg_id; rc = tpm_pcr_read(chip, ha->pcr, &digest); if (rc) return rc; for (i = 0; i < digest_size; i++) str += sprintf(str, "%02X", digest.digest[i]); str += sprintf(str, "\n"); return str - buf; } /* * The following set of defines represents all the magic to build * the per hash attribute groups for displaying each bank of PCRs. * The only slight problem with this approach is that every PCR is * hard coded to be present, so you don't know if an PCR is missing * until a cat of the file returns -EINVAL * * Also note you must ignore checkpatch warnings in this macro * code. This is deep macro magic that checkpatch.pl doesn't * understand. */ /* Note, this must match TPM2_PLATFORM_PCR which is fixed at 24. */ #define _TPM_HELPER(_alg, _hash, F) \ F(_alg, _hash, 0) \ F(_alg, _hash, 1) \ F(_alg, _hash, 2) \ F(_alg, _hash, 3) \ F(_alg, _hash, 4) \ F(_alg, _hash, 5) \ F(_alg, _hash, 6) \ F(_alg, _hash, 7) \ F(_alg, _hash, 8) \ F(_alg, _hash, 9) \ F(_alg, _hash, 10) \ F(_alg, _hash, 11) \ F(_alg, _hash, 12) \ F(_alg, _hash, 13) \ F(_alg, _hash, 14) \ F(_alg, _hash, 15) \ F(_alg, _hash, 16) \ F(_alg, _hash, 17) \ F(_alg, _hash, 18) \ F(_alg, _hash, 19) \ F(_alg, _hash, 20) \ F(_alg, _hash, 21) \ F(_alg, _hash, 22) \ F(_alg, _hash, 23) /* ignore checkpatch warning about trailing ; in macro. */ #define PCR_ATTR(_alg, _hash, _pcr) \ static struct tpm_pcr_attr dev_attr_pcr_##_hash##_##_pcr = { \ .alg_id = _alg, \ .pcr = _pcr, \ .attr = { \ .attr = { \ .name = __stringify(_pcr), \ .mode = 0444 \ }, \ .show = pcr_value_show \ } \ }; #define PCR_ATTRS(_alg, _hash) \ _TPM_HELPER(_alg, _hash, PCR_ATTR) /* ignore checkpatch warning about trailing , in macro. */ #define PCR_ATTR_VAL(_alg, _hash, _pcr) \ &dev_attr_pcr_##_hash##_##_pcr.attr.attr, #define PCR_ATTR_GROUP_ARRAY(_alg, _hash) \ static struct attribute *pcr_group_attrs_##_hash[] = { \ _TPM_HELPER(_alg, _hash, PCR_ATTR_VAL) \ NULL \ } #define PCR_ATTR_GROUP(_alg, _hash) \ static struct attribute_group pcr_group_##_hash = { \ .name = "pcr-" __stringify(_hash), \ .attrs = pcr_group_attrs_##_hash \ } #define PCR_ATTR_BUILD(_alg, _hash) \ PCR_ATTRS(_alg, _hash) \ PCR_ATTR_GROUP_ARRAY(_alg, _hash); \ PCR_ATTR_GROUP(_alg, _hash) /* * End of macro structure to build an attribute group containing 24 * PCR value files for each supported hash algorithm */ /* * The next set of macros implements the cleverness for each hash to * build a static attribute group called pcr_group_<hash> which can be * added to chip->groups[]. * * The first argument is the TPM algorithm id and the second is the * hash used as both the suffix and the group name. Note: the group * name is a directory in the top level tpm class with the name * pcr-<hash>, so it must not clash with any other names already * in the sysfs directory. */ PCR_ATTR_BUILD(TPM_ALG_SHA1, sha1); PCR_ATTR_BUILD(TPM_ALG_SHA256, sha256); PCR_ATTR_BUILD(TPM_ALG_SHA384, sha384); PCR_ATTR_BUILD(TPM_ALG_SHA512, sha512); PCR_ATTR_BUILD(TPM_ALG_SM3_256, sm3); void tpm_sysfs_add_device(struct tpm_chip *chip) { int i; WARN_ON(chip->groups_cnt != 0); if (tpm_is_firmware_upgrade(chip)) return; if (chip->flags & TPM_CHIP_FLAG_TPM2) chip->groups[chip->groups_cnt++] = &tpm2_dev_group; else chip->groups[chip->groups_cnt++] = &tpm1_dev_group; /* add one group for each bank hash */ for (i = 0; i < chip->nr_allocated_banks; i++) { switch (chip->allocated_banks[i].alg_id) { case TPM_ALG_SHA1: chip->groups[chip->groups_cnt++] = &pcr_group_sha1; break; case TPM_ALG_SHA256: chip->groups[chip->groups_cnt++] = &pcr_group_sha256; break; case TPM_ALG_SHA384: chip->groups[chip->groups_cnt++] = &pcr_group_sha384; break; case TPM_ALG_SHA512: chip->groups[chip->groups_cnt++] = &pcr_group_sha512; break; case TPM_ALG_SM3_256: chip->groups[chip->groups_cnt++] = &pcr_group_sm3; break; default: /* * If triggers, send a patch to add both a * PCR_ATTR_BUILD() macro above for the * missing algorithm as well as an additional * case in this switch statement. */ dev_err(&chip->dev, "TPM with unsupported bank algorithm 0x%04x", chip->allocated_banks[i].alg_id); break; } } /* * This will only trigger if someone has added an additional * hash to the tpm_algorithms enum without incrementing * TPM_MAX_HASHES. */ WARN_ON(chip->groups_cnt > TPM_MAX_HASHES + 1); }
linux-master
drivers/char/tpm/tpm-sysfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ATMEL I2C TPM AT97SC3204T * * Copyright (C) 2012 V Lab Technologies * Teddy Reed <[email protected]> * Copyright (C) 2013, Obsidian Research Corp. * Jason Gunthorpe <[email protected]> * Device driver for ATMEL I2C TPMs. * * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM * devices the raw TCG formatted TPM command data is written via I2C and then * raw TCG formatted TPM command data is returned via I2C. * * TGC status/locality/etc functions seen in the LPC implementation do not * seem to be present. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/i2c.h> #include "tpm.h" #define I2C_DRIVER_NAME "tpm_i2c_atmel" #define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ #define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ #define ATMEL_STS_OK 1 struct priv_data { size_t len; /* This is the amount we read on the first try. 25 was chosen to fit a * fair number of read responses in the buffer so a 2nd retry can be * avoided in small message cases. */ u8 buffer[sizeof(struct tpm_header) + 25]; }; static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; priv->len = 0; if (len <= 2) return -EIO; status = i2c_master_send(client, buf, len); dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); if (status < 0) return status; /* The upper layer does not support incomplete sends. */ if (status != len) return -E2BIG; return 0; } static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); struct tpm_header *hdr = (struct tpm_header *)priv->buffer; u32 expected_len; int rc; if (priv->len == 0) return -EIO; /* Get the message size from the message header, if we didn't get the * whole message in read_status then we need to re-read the * message. */ expected_len = be32_to_cpu(hdr->length); if (expected_len > count) return -ENOMEM; if (priv->len >= expected_len) { dev_dbg(&chip->dev, "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); memcpy(buf, priv->buffer, expected_len); return expected_len; } rc = i2c_master_recv(client, buf, expected_len); dev_dbg(&chip->dev, "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); return rc; } static void i2c_atmel_cancel(struct tpm_chip *chip) { dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); } static u8 i2c_atmel_read_status(struct tpm_chip *chip) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); int rc; /* The TPM fails the I2C read until it is ready, so we do the entire * transfer here and buffer it locally. This way the common code can * properly handle the timeouts. */ priv->len = 0; memset(priv->buffer, 0, sizeof(priv->buffer)); /* Once the TPM has completed the command the command remains readable * until another command is issued. */ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); dev_dbg(&chip->dev, "%s: sts=%d", __func__, rc); if (rc <= 0) return 0; priv->len = rc; return ATMEL_STS_OK; } static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) { return false; } static const struct tpm_class_ops i2c_atmel = { .flags = TPM_OPS_AUTO_STARTUP, .status = i2c_atmel_read_status, .recv = i2c_atmel_recv, .send = i2c_atmel_send, .cancel = i2c_atmel_cancel, .req_complete_mask = ATMEL_STS_OK, .req_complete_val = ATMEL_STS_OK, .req_canceled = i2c_atmel_req_canceled, }; static int i2c_atmel_probe(struct i2c_client *client) { struct tpm_chip *chip; struct device *dev = &client->dev; struct priv_data *priv; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; chip = tpmm_chip_alloc(dev, &i2c_atmel); if (IS_ERR(chip)) return PTR_ERR(chip); priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); if (!priv) return -ENOMEM; /* Default timeouts */ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); dev_set_drvdata(&chip->dev, priv); /* There is no known way to probe for this device, and all version * information seems to be read via TPM commands. Thus we rely on the * TPM startup process in the common code to detect the device. */ return tpm_chip_register(chip); } static void i2c_atmel_remove(struct i2c_client *client) { struct device *dev = &(client->dev); struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); } static const struct i2c_device_id i2c_atmel_id[] = { {I2C_DRIVER_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); #ifdef CONFIG_OF static const struct of_device_id i2c_atmel_of_match[] = { {.compatible = "atmel,at97sc3204t"}, {}, }; MODULE_DEVICE_TABLE(of, i2c_atmel_of_match); #endif static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); static struct i2c_driver i2c_atmel_driver = { .id_table = i2c_atmel_id, .probe = i2c_atmel_probe, .remove = i2c_atmel_remove, .driver = { .name = I2C_DRIVER_NAME, .pm = &i2c_atmel_pm_ops, .of_match_table = of_match_ptr(i2c_atmel_of_match), }, }; module_i2c_driver(i2c_atmel_driver); MODULE_AUTHOR("Jason Gunthorpe <[email protected]>"); MODULE_DESCRIPTION("Atmel TPM I2C Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_i2c_atmel.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014, 2015 Intel Corporation * * Authors: * Jarkko Sakkinen <[email protected]> * * Maintained by: <[email protected]> * * This file contains TPM2 protocol implementations of the commands * used by the kernel internally. */ #include "tpm.h" #include <crypto/hash_info.h> static struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SHA1, TPM_ALG_SHA1}, {HASH_ALGO_SHA256, TPM_ALG_SHA256}, {HASH_ALGO_SHA384, TPM_ALG_SHA384}, {HASH_ALGO_SHA512, TPM_ALG_SHA512}, {HASH_ALGO_SM3_256, TPM_ALG_SM3_256}, }; int tpm2_get_timeouts(struct tpm_chip *chip) { /* Fixed timeouts for TPM2 */ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); /* PTP spec timeouts */ chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT); chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM); chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG); /* Key creation commands long timeouts */ chip->duration[TPM_LONG_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG_LONG); chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS; return 0; } /** * tpm2_ordinal_duration_index() - returns an index to the chip duration table * @ordinal: TPM command ordinal. * * The function returns an index to the chip duration table * (enum tpm_duration), that describes the maximum amount of * time the chip could take to return the result for a particular ordinal. * * The values of the MEDIUM, and LONG durations are taken * from the PC Client Profile (PTP) specification (750, 2000 msec) * * LONG_LONG is for commands that generates keys which empirically takes * a longer time on some systems. * * Return: * * TPM_MEDIUM * * TPM_LONG * * TPM_LONG_LONG * * TPM_UNDEFINED */ static u8 tpm2_ordinal_duration_index(u32 ordinal) { switch (ordinal) { /* Startup */ case TPM2_CC_STARTUP: /* 144 */ return TPM_MEDIUM; case TPM2_CC_SELF_TEST: /* 143 */ return TPM_LONG; case TPM2_CC_GET_RANDOM: /* 17B */ return TPM_LONG; case TPM2_CC_SEQUENCE_UPDATE: /* 15C */ return TPM_MEDIUM; case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */ return TPM_MEDIUM; case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */ return TPM_MEDIUM; case TPM2_CC_HASH_SEQUENCE_START: /* 186 */ return TPM_MEDIUM; case TPM2_CC_VERIFY_SIGNATURE: /* 177 */ return TPM_LONG_LONG; case TPM2_CC_PCR_EXTEND: /* 182 */ return TPM_MEDIUM; case TPM2_CC_HIERARCHY_CONTROL: /* 121 */ return TPM_LONG; case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */ return TPM_LONG; case TPM2_CC_GET_CAPABILITY: /* 17A */ return TPM_MEDIUM; case TPM2_CC_NV_READ: /* 14E */ return TPM_LONG; case TPM2_CC_CREATE_PRIMARY: /* 131 */ return TPM_LONG_LONG; case TPM2_CC_CREATE: /* 153 */ return TPM_LONG_LONG; case TPM2_CC_CREATE_LOADED: /* 191 */ return TPM_LONG_LONG; default: return TPM_UNDEFINED; } } /** * tpm2_calc_ordinal_duration() - calculate the maximum command duration * @chip: TPM chip to use. * @ordinal: TPM command ordinal. * * The function returns the maximum amount of time the chip could take * to return the result for a particular ordinal in jiffies. * * Return: A maximal duration time for an ordinal in jiffies. */ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { unsigned int index; index = tpm2_ordinal_duration_index(ordinal); if (index != TPM_UNDEFINED) return chip->duration[index]; else return msecs_to_jiffies(TPM2_DURATION_DEFAULT); } struct tpm2_pcr_read_out { __be32 update_cnt; __be32 pcr_selects_cnt; __be16 hash_alg; u8 pcr_select_size; u8 pcr_select[TPM2_PCR_SELECT_MIN]; __be32 digests_cnt; __be16 digest_size; u8 digest[]; } __packed; /** * tpm2_pcr_read() - read a PCR value * @chip: TPM chip to use. * @pcr_idx: index of the PCR to read. * @digest: PCR bank and buffer current PCR value is written to. * @digest_size_ptr: pointer to variable that stores the digest size. * * Return: Same as with tpm_transmit_cmd. */ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digest, u16 *digest_size_ptr) { int i; int rc; struct tpm_buf buf; struct tpm2_pcr_read_out *out; u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0}; u16 digest_size; u16 expected_digest_size = 0; if (pcr_idx >= TPM2_PLATFORM_PCR) return -EINVAL; if (!digest_size_ptr) { for (i = 0; i < chip->nr_allocated_banks && chip->allocated_banks[i].alg_id != digest->alg_id; i++) ; if (i == chip->nr_allocated_banks) return -EINVAL; expected_digest_size = chip->allocated_banks[i].digest_size; } rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ); if (rc) return rc; pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7); tpm_buf_append_u32(&buf, 1); tpm_buf_append_u16(&buf, digest->alg_id); tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN); tpm_buf_append(&buf, (const unsigned char *)pcr_select, sizeof(pcr_select)); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to read a pcr value"); if (rc) goto out; out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE]; digest_size = be16_to_cpu(out->digest_size); if (digest_size > sizeof(digest->digest) || (!digest_size_ptr && digest_size != expected_digest_size)) { rc = -EINVAL; goto out; } if (digest_size_ptr) *digest_size_ptr = digest_size; memcpy(digest->digest, out->digest, digest_size); out: tpm_buf_destroy(&buf); return rc; } struct tpm2_null_auth_area { __be32 handle; __be16 nonce_size; u8 attributes; __be16 auth_size; } __packed; /** * tpm2_pcr_extend() - extend a PCR value * * @chip: TPM chip to use. * @pcr_idx: index of the PCR. * @digests: list of pcr banks and corresponding digest values to extend. * * Return: Same as with tpm_transmit_cmd. */ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests) { struct tpm_buf buf; struct tpm2_null_auth_area auth_area; int rc; int i; rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); if (rc) return rc; tpm_buf_append_u32(&buf, pcr_idx); auth_area.handle = cpu_to_be32(TPM2_RS_PW); auth_area.nonce_size = 0; auth_area.attributes = 0; auth_area.auth_size = 0; tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area)); tpm_buf_append(&buf, (const unsigned char *)&auth_area, sizeof(auth_area)); tpm_buf_append_u32(&buf, chip->nr_allocated_banks); for (i = 0; i < chip->nr_allocated_banks; i++) { tpm_buf_append_u16(&buf, digests[i].alg_id); tpm_buf_append(&buf, (const unsigned char *)&digests[i].digest, chip->allocated_banks[i].digest_size); } rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); tpm_buf_destroy(&buf); return rc; } struct tpm2_get_random_out { __be16 size; u8 buffer[TPM_MAX_RNG_DATA]; } __packed; /** * tpm2_get_random() - get random bytes from the TPM RNG * * @chip: a &tpm_chip instance * @dest: destination buffer * @max: the max number of random bytes to pull * * Return: * size of the buffer on success, * -errno otherwise (positive TPM return codes are masked to -EIO) */ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) { struct tpm2_get_random_out *out; struct tpm_buf buf; u32 recd; u32 num_bytes = max; int err; int total = 0; int retries = 5; u8 *dest_ptr = dest; if (!num_bytes || max > TPM_MAX_RNG_DATA) return -EINVAL; err = tpm_buf_init(&buf, 0, 0); if (err) return err; do { tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM); tpm_buf_append_u16(&buf, num_bytes); err = tpm_transmit_cmd(chip, &buf, offsetof(struct tpm2_get_random_out, buffer), "attempting get random"); if (err) { if (err > 0) err = -EIO; goto out; } out = (struct tpm2_get_random_out *) &buf.data[TPM_HEADER_SIZE]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + offsetof(struct tpm2_get_random_out, buffer) + recd) { err = -EFAULT; goto out; } memcpy(dest_ptr, out->buffer, recd); dest_ptr += recd; total += recd; num_bytes -= recd; } while (retries-- && total < max); tpm_buf_destroy(&buf); return total ? total : -EIO; out: tpm_buf_destroy(&buf); return err; } /** * tpm2_flush_context() - execute a TPM2_FlushContext command * @chip: TPM chip to use * @handle: context handle */ void tpm2_flush_context(struct tpm_chip *chip, u32 handle) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); if (rc) { dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", handle); return; } tpm_buf_append_u32(&buf, handle); tpm_transmit_cmd(chip, &buf, 0, "flushing context"); tpm_buf_destroy(&buf); } EXPORT_SYMBOL_GPL(tpm2_flush_context); struct tpm2_get_cap_out { u8 more_data; __be32 subcap_id; __be32 property_cnt; __be32 property_id; __be32 value; } __packed; /** * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property * @chip: a &tpm_chip instance * @property_id: property ID. * @value: output variable. * @desc: passed to tpm_transmit_cmd() * * Return: * 0 on success, * -errno or a TPM return code otherwise */ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc) { struct tpm2_get_cap_out *out; struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) return rc; tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES); tpm_buf_append_u32(&buf, property_id); tpm_buf_append_u32(&buf, 1); rc = tpm_transmit_cmd(chip, &buf, 0, NULL); if (!rc) { out = (struct tpm2_get_cap_out *) &buf.data[TPM_HEADER_SIZE]; /* * To prevent failing boot up of some systems, Infineon TPM2.0 * returns SUCCESS on TPM2_Startup in field upgrade mode. Also * the TPM2_Getcapability command returns a zero length list * in field upgrade mode. */ if (be32_to_cpu(out->property_cnt) > 0) *value = be32_to_cpu(out->value); else rc = -ENODATA; } tpm_buf_destroy(&buf); return rc; } EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt); /** * tpm2_shutdown() - send a TPM shutdown command * * Sends a TPM shutdown command. The shutdown command is used in call * sites where the system is going down. If it fails, there is not much * that can be done except print an error message. * * @chip: a &tpm_chip instance * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE. */ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) { struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN); if (rc) return; tpm_buf_append_u16(&buf, shutdown_type); tpm_transmit_cmd(chip, &buf, 0, "stopping the TPM"); tpm_buf_destroy(&buf); } /** * tpm2_do_selftest() - ensure that all self tests have passed * * @chip: TPM chip to use * * Return: Same as with tpm_transmit_cmd. * * The TPM can either run all self tests synchronously and then return * RC_SUCCESS once all tests were successful. Or it can choose to run the tests * asynchronously and return RC_TESTING immediately while the self tests still * execute in the background. This function handles both cases and waits until * all tests have completed. */ static int tpm2_do_selftest(struct tpm_chip *chip) { struct tpm_buf buf; int full; int rc; for (full = 0; full < 2; full++) { rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SELF_TEST); if (rc) return rc; tpm_buf_append_u8(&buf, full); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting the self test"); tpm_buf_destroy(&buf); if (rc == TPM2_RC_TESTING) rc = TPM2_RC_SUCCESS; if (rc == TPM2_RC_INITIALIZE || rc == TPM2_RC_SUCCESS) return rc; } return rc; } /** * tpm2_probe() - probe for the TPM 2.0 protocol * @chip: a &tpm_chip instance * * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by * this function if this is the case. * * Return: * 0 on success, * -errno otherwise */ int tpm2_probe(struct tpm_chip *chip) { struct tpm_header *out; struct tpm_buf buf; int rc; rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) return rc; tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES); tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS); tpm_buf_append_u32(&buf, 1); rc = tpm_transmit_cmd(chip, &buf, 0, NULL); /* We ignore TPM return codes on purpose. */ if (rc >= 0) { out = (struct tpm_header *)buf.data; if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS) chip->flags |= TPM_CHIP_FLAG_TPM2; } tpm_buf_destroy(&buf); return 0; } EXPORT_SYMBOL_GPL(tpm2_probe); static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index) { struct tpm_bank_info *bank = chip->allocated_banks + bank_index; struct tpm_digest digest = { .alg_id = bank->alg_id }; int i; /* * Avoid unnecessary PCR read operations to reduce overhead * and obtain identifiers of the crypto subsystem. */ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) { enum hash_algo crypto_algo = tpm2_hash_map[i].crypto_id; if (bank->alg_id != tpm2_hash_map[i].tpm_id) continue; bank->digest_size = hash_digest_size[crypto_algo]; bank->crypto_id = crypto_algo; return 0; } bank->crypto_id = HASH_ALGO__LAST; return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size); } struct tpm2_pcr_selection { __be16 hash_alg; u8 size_of_select; u8 pcr_select[3]; } __packed; ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) { struct tpm2_pcr_selection pcr_selection; struct tpm_buf buf; void *marker; void *end; void *pcr_select_offset; u32 sizeof_pcr_selection; u32 nr_possible_banks; u32 nr_alloc_banks = 0; u16 hash_alg; u32 rsp_len; int rc; int i = 0; rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) return rc; tpm_buf_append_u32(&buf, TPM2_CAP_PCRS); tpm_buf_append_u32(&buf, 0); tpm_buf_append_u32(&buf, 1); rc = tpm_transmit_cmd(chip, &buf, 9, "get tpm pcr allocation"); if (rc) goto out; nr_possible_banks = be32_to_cpup( (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]); chip->allocated_banks = kcalloc(nr_possible_banks, sizeof(*chip->allocated_banks), GFP_KERNEL); if (!chip->allocated_banks) { rc = -ENOMEM; goto out; } marker = &buf.data[TPM_HEADER_SIZE + 9]; rsp_len = be32_to_cpup((__be32 *)&buf.data[2]); end = &buf.data[rsp_len]; for (i = 0; i < nr_possible_banks; i++) { pcr_select_offset = marker + offsetof(struct tpm2_pcr_selection, size_of_select); if (pcr_select_offset >= end) { rc = -EFAULT; break; } memcpy(&pcr_selection, marker, sizeof(pcr_selection)); hash_alg = be16_to_cpu(pcr_selection.hash_alg); pcr_select_offset = memchr_inv(pcr_selection.pcr_select, 0, pcr_selection.size_of_select); if (pcr_select_offset) { chip->allocated_banks[nr_alloc_banks].alg_id = hash_alg; rc = tpm2_init_bank_info(chip, nr_alloc_banks); if (rc < 0) break; nr_alloc_banks++; } sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) + sizeof(pcr_selection.size_of_select) + pcr_selection.size_of_select; marker = marker + sizeof_pcr_selection; } chip->nr_allocated_banks = nr_alloc_banks; out: tpm_buf_destroy(&buf); return rc; } int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) { struct tpm_buf buf; u32 nr_commands; __be32 *attrs; u32 cc; int i; int rc; rc = tpm2_get_tpm_pt(chip, TPM_PT_TOTAL_COMMANDS, &nr_commands, NULL); if (rc) goto out; if (nr_commands > 0xFFFFF) { rc = -EFAULT; goto out; } chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands, GFP_KERNEL); if (!chip->cc_attrs_tbl) { rc = -ENOMEM; goto out; } rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) goto out; tpm_buf_append_u32(&buf, TPM2_CAP_COMMANDS); tpm_buf_append_u32(&buf, TPM2_CC_FIRST); tpm_buf_append_u32(&buf, nr_commands); rc = tpm_transmit_cmd(chip, &buf, 9 + 4 * nr_commands, NULL); if (rc) { tpm_buf_destroy(&buf); goto out; } if (nr_commands != be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) { rc = -EFAULT; tpm_buf_destroy(&buf); goto out; } chip->nr_commands = nr_commands; attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9]; for (i = 0; i < nr_commands; i++, attrs++) { chip->cc_attrs_tbl[i] = be32_to_cpup(attrs); cc = chip->cc_attrs_tbl[i] & 0xFFFF; if (cc == TPM2_CC_CONTEXT_SAVE || cc == TPM2_CC_FLUSH_CONTEXT) { chip->cc_attrs_tbl[i] &= ~(GENMASK(2, 0) << TPM2_CC_ATTR_CHANDLES); chip->cc_attrs_tbl[i] |= 1 << TPM2_CC_ATTR_CHANDLES; } } tpm_buf_destroy(&buf); out: if (rc > 0) rc = -ENODEV; return rc; } EXPORT_SYMBOL_GPL(tpm2_get_cc_attrs_tbl); /** * tpm2_startup - turn on the TPM * @chip: TPM chip to use * * Normally the firmware should start the TPM. This function is provided as a * workaround if this does not happen. A legal case for this could be for * example when a TPM emulator is used. * * Return: same as tpm_transmit_cmd() */ static int tpm2_startup(struct tpm_chip *chip) { struct tpm_buf buf; int rc; dev_info(&chip->dev, "starting up the TPM manually\n"); rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP); if (rc < 0) return rc; tpm_buf_append_u16(&buf, TPM2_SU_CLEAR); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM"); tpm_buf_destroy(&buf); return rc; } /** * tpm2_auto_startup - Perform the standard automatic TPM initialization * sequence * @chip: TPM chip to use * * Returns 0 on success, < 0 in case of fatal error. */ int tpm2_auto_startup(struct tpm_chip *chip) { int rc; rc = tpm2_get_timeouts(chip); if (rc) goto out; rc = tpm2_do_selftest(chip); if (rc && rc != TPM2_RC_INITIALIZE) goto out; if (rc == TPM2_RC_INITIALIZE) { rc = tpm2_startup(chip); if (rc) goto out; rc = tpm2_do_selftest(chip); if (rc) goto out; } rc = tpm2_get_cc_attrs_tbl(chip); if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) { dev_info(&chip->dev, "TPM in field failure mode, requires firmware upgrade\n"); chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE; rc = 0; } out: /* * Infineon TPM in field upgrade mode will return no data for the number * of supported commands. */ if (rc == TPM2_RC_UPGRADE || rc == -ENODATA) { dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n"); chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE; rc = 0; } if (rc > 0) rc = -ENODEV; return rc; } int tpm2_find_cc(struct tpm_chip *chip, u32 cc) { u32 cc_mask; int i; cc_mask = 1 << TPM2_CC_ATTR_VENDOR | GENMASK(15, 0); for (i = 0; i < chip->nr_commands; i++) if (cc == (chip->cc_attrs_tbl[i] & cc_mask)) return i; return -1; }
linux-master
drivers/char/tpm/tpm2-cmd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Copyright (C) 2013 Obsidian Research Corp * Jason Gunthorpe <[email protected]> * * Device file system interface to the TPM */ #include <linux/slab.h> #include "tpm-dev.h" static int tpm_open(struct inode *inode, struct file *file) { struct tpm_chip *chip; struct file_priv *priv; chip = container_of(inode->i_cdev, struct tpm_chip, cdev); /* It's assured that the chip will be opened just once, * by the check of is_open variable, which is protected * by driver_lock. */ if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(&chip->dev, "Another process owns this TPM\n"); return -EBUSY; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) goto out; tpm_common_open(file, chip, priv, NULL); return 0; out: clear_bit(0, &chip->is_open); return -ENOMEM; } /* * Called on file close */ static int tpm_release(struct inode *inode, struct file *file) { struct file_priv *priv = file->private_data; tpm_common_release(file, priv); clear_bit(0, &priv->chip->is_open); kfree(priv); return 0; } const struct file_operations tpm_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpm_open, .read = tpm_common_read, .write = tpm_common_write, .poll = tpm_common_poll, .release = tpm_release, };
linux-master
drivers/char/tpm/tpm-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005, 2006 IBM Corporation * Copyright (C) 2014, 2015 Intel Corporation * * Authors: * Leendert van Doorn <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This device driver implements the TPM interface as defined in * the TCG TPM Interface Spec version 1.2, revision 1.0. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/acpi.h> #include <linux/freezer.h> #include <linux/dmi.h> #include "tpm.h" #include "tpm_tis_core.h" #define TPM_TIS_MAX_UNHANDLED_IRQS 1000 static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value); static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, bool *canceled) { u8 status = chip->ops->status(chip); *canceled = false; if ((status & mask) == mask) return true; if (check_cancel && chip->ops->req_canceled(chip, status)) { *canceled = true; return true; } return false; } static u8 tpm_tis_filter_sts_mask(u8 int_mask, u8 sts_mask) { if (!(int_mask & TPM_INTF_STS_VALID_INT)) sts_mask &= ~TPM_STS_VALID; if (!(int_mask & TPM_INTF_DATA_AVAIL_INT)) sts_mask &= ~TPM_STS_DATA_AVAIL; if (!(int_mask & TPM_INTF_CMD_READY_INT)) sts_mask &= ~TPM_STS_COMMAND_READY; return sts_mask; } static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); unsigned long stop; long rc; u8 status; bool canceled = false; u8 sts_mask; int ret = 0; /* check current status */ status = chip->ops->status(chip); if ((status & mask) == mask) return 0; sts_mask = mask & (TPM_STS_VALID | TPM_STS_DATA_AVAIL | TPM_STS_COMMAND_READY); /* check what status changes can be handled by irqs */ sts_mask = tpm_tis_filter_sts_mask(priv->int_mask, sts_mask); stop = jiffies + timeout; /* process status changes with irq support */ if (sts_mask) { ret = -ETIME; again: timeout = stop - jiffies; if ((long)timeout <= 0) return -ETIME; rc = wait_event_interruptible_timeout(*queue, wait_for_tpm_stat_cond(chip, sts_mask, check_cancel, &canceled), timeout); if (rc > 0) { if (canceled) return -ECANCELED; ret = 0; } if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } if (ret) return ret; mask &= ~sts_mask; if (!mask) /* all done */ return 0; /* process status changes without irq support */ do { status = chip->ops->status(chip); if ((status & mask) == mask) return 0; usleep_range(priv->timeout_min, priv->timeout_max); } while (time_before(jiffies, stop)); return -ETIME; } /* Before we attempt to access the TPM we must see that the valid bit is set. * The specification says that this bit is 0 at reset and remains 0 until the * 'TPM has gone through its self test and initialization and has established * correct values in the other bits.' */ static int wait_startup(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); unsigned long stop = jiffies + chip->timeout_a; do { int rc; u8 access; rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); if (rc < 0) return rc; if (access & TPM_ACCESS_VALID) return 0; tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -1; } static bool check_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; u8 access; rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); if (rc < 0) return false; if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_USE)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { priv->locality = l; return true; } return false; } static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l) { tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); return 0; } static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); mutex_lock(&priv->locality_count_mutex); priv->locality_count--; if (priv->locality_count == 0) __tpm_tis_relinquish_locality(priv, l); mutex_unlock(&priv->locality_count_mutex); return 0; } static int __tpm_tis_request_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); unsigned long stop, timeout; long rc; if (check_locality(chip, l)) return l; rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE); if (rc < 0) return rc; stop = jiffies + chip->timeout_a; if (chip->flags & TPM_CHIP_FLAG_IRQ) { again: timeout = stop - jiffies; if ((long)timeout <= 0) return -1; rc = wait_event_interruptible_timeout(priv->int_queue, (check_locality (chip, l)), timeout); if (rc > 0) return l; if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } else { /* wait for burstcount */ do { if (check_locality(chip, l)) return l; tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } return -1; } static int tpm_tis_request_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int ret = 0; mutex_lock(&priv->locality_count_mutex); if (priv->locality_count == 0) ret = __tpm_tis_request_locality(chip, l); if (!ret) priv->locality_count++; mutex_unlock(&priv->locality_count_mutex); return ret; } static u8 tpm_tis_status(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; u8 status; rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status); if (rc < 0) return 0; if (unlikely((status & TPM_STS_READ_ZERO) != 0)) { if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) { /* * If this trips, the chances are the read is * returning 0xff because the locality hasn't been * acquired. Usually because tpm_try_get_ops() hasn't * been called before doing a TPM operation. */ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n", status); /* * Dump stack for forensics, as invalid TPM_STS.x could be * potentially triggered by impaired tpm_try_get_ops() or * tpm_find_get_ops(). */ dump_stack(); } return 0; } return status; } static void tpm_tis_ready(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); /* this causes the current command to be aborted */ tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY); } static int get_burstcount(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); unsigned long stop; int burstcnt, rc; u32 value; /* wait for burstcount */ if (chip->flags & TPM_CHIP_FLAG_TPM2) stop = jiffies + chip->timeout_a; else stop = jiffies + chip->timeout_d; do { rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value); if (rc < 0) return rc; burstcnt = (value >> 8) & 0xFFFF; if (burstcnt) return burstcnt; usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX); } while (time_before(jiffies, stop)); return -EBUSY; } static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int size = 0, burstcnt, rc; while (size < count) { rc = wait_for_tpm_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, chip->timeout_c, &priv->read_queue, true); if (rc < 0) return rc; burstcnt = get_burstcount(chip); if (burstcnt < 0) { dev_err(&chip->dev, "Unable to read burstcount\n"); return burstcnt; } burstcnt = min_t(int, burstcnt, count - size); rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality), burstcnt, buf + size); if (rc < 0) return rc; size += burstcnt; } return size; } static int tpm_tis_try_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int size = 0; int status; u32 expected; int rc; size = recv_data(chip, buf, TPM_HEADER_SIZE); /* read first 10 bytes, including tag, paramsize, and result */ if (size < TPM_HEADER_SIZE) { dev_err(&chip->dev, "Unable to read header\n"); goto out; } expected = be32_to_cpu(*(__be32 *) (buf + 2)); if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } rc = recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (rc < 0) { size = rc; goto out; } size += rc; if (size < expected) { dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { size = -ETIME; goto out; } status = tpm_tis_status(chip); if (status & TPM_STS_DATA_AVAIL) { dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } rc = tpm_tis_verify_crc(priv, (size_t)size, buf); if (rc < 0) { dev_err(&chip->dev, "CRC mismatch for response.\n"); size = rc; goto out; } out: return size; } static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); unsigned int try; int rc = 0; if (count < TPM_HEADER_SIZE) return -EIO; for (try = 0; try < TPM_RETRY; try++) { rc = tpm_tis_try_recv(chip, buf, count); if (rc == -EIO) /* Data transfer errors, indicated by EIO, can be * recovered by rereading the response. */ tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_RESPONSE_RETRY); else break; } tpm_tis_ready(chip); return rc; } /* * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc, status, burstcnt; size_t count = 0; bool itpm = test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags); status = tpm_tis_status(chip); if ((status & TPM_STS_COMMAND_READY) == 0) { tpm_tis_ready(chip); if (wait_for_tpm_stat (chip, TPM_STS_COMMAND_READY, chip->timeout_b, &priv->int_queue, false) < 0) { rc = -ETIME; goto out_err; } } while (count < len - 1) { burstcnt = get_burstcount(chip); if (burstcnt < 0) { dev_err(&chip->dev, "Unable to read burstcount\n"); rc = burstcnt; goto out_err; } burstcnt = min_t(int, burstcnt, len - count - 1); rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality), burstcnt, buf + count); if (rc < 0) goto out_err; count += burstcnt; if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { rc = -EIO; goto out_err; } } /* write last byte */ rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]); if (rc < 0) goto out_err; if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) { rc = -EIO; goto out_err; } rc = tpm_tis_verify_crc(priv, len, buf); if (rc < 0) { dev_err(&chip->dev, "CRC mismatch for command.\n"); goto out_err; } return 0; out_err: tpm_tis_ready(chip); return rc; } static void __tpm_tis_disable_interrupts(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u32 int_mask = 0; tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &int_mask); int_mask &= ~TPM_GLOBAL_INT_ENABLE; tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), int_mask); chip->flags &= ~TPM_CHIP_FLAG_IRQ; } static void tpm_tis_disable_interrupts(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); if (priv->irq == 0) return; __tpm_tis_disable_interrupts(chip); devm_free_irq(chip->dev.parent, priv->irq, chip); priv->irq = 0; } /* * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; u32 ordinal; unsigned long dur; unsigned int try; for (try = 0; try < TPM_RETRY; try++) { rc = tpm_tis_send_data(chip, buf, len); if (rc >= 0) /* Data transfer done successfully */ break; else if (rc != -EIO) /* Data transfer failed, not recoverable */ return rc; } /* go and do it */ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO); if (rc < 0) goto out_err; if (chip->flags & TPM_CHIP_FLAG_IRQ) { ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); dur = tpm_calc_ordinal_duration(chip, ordinal); if (wait_for_tpm_stat (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, &priv->read_queue, false) < 0) { rc = -ETIME; goto out_err; } } return 0; out_err: tpm_tis_ready(chip); return rc; } static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) { int rc, irq; struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || test_bit(TPM_TIS_IRQ_TESTED, &priv->flags)) return tpm_tis_send_main(chip, buf, len); /* Verify receipt of the expected IRQ */ irq = priv->irq; priv->irq = 0; chip->flags &= ~TPM_CHIP_FLAG_IRQ; rc = tpm_tis_send_main(chip, buf, len); priv->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags)) tpm_msleep(1); if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags)) tpm_tis_disable_interrupts(chip); set_bit(TPM_TIS_IRQ_TESTED, &priv->flags); return rc; } struct tis_vendor_durations_override { u32 did_vid; struct tpm1_version version; unsigned long durations[3]; }; static const struct tis_vendor_durations_override vendor_dur_overrides[] = { /* STMicroelectronics 0x104a */ { 0x0000104a, { 1, 2, 8, 28 }, { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } }, }; static void tpm_tis_update_durations(struct tpm_chip *chip, unsigned long *duration_cap) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); struct tpm1_version *version; u32 did_vid; int i, rc; cap_t cap; chip->duration_adjusted = false; if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, true); rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); if (rc < 0) { dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n", __func__, rc); goto out; } /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version", sizeof(cap.version2)); if (!rc) { version = &cap.version2.version; } else { rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap, "attempting to determine the 1.1 version", sizeof(cap.version1)); if (rc) goto out; version = &cap.version1; } for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) { if (vendor_dur_overrides[i].did_vid != did_vid) continue; if ((version->major == vendor_dur_overrides[i].version.major) && (version->minor == vendor_dur_overrides[i].version.minor) && (version->rev_major == vendor_dur_overrides[i].version.rev_major) && (version->rev_minor == vendor_dur_overrides[i].version.rev_minor)) { memcpy(duration_cap, vendor_dur_overrides[i].durations, sizeof(vendor_dur_overrides[i].durations)); chip->duration_adjusted = true; goto out; } } out: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); } struct tis_vendor_timeout_override { u32 did_vid; unsigned long timeout_us[4]; }; static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { /* Atmel 3204 */ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, }; static void tpm_tis_update_timeouts(struct tpm_chip *chip, unsigned long *timeout_cap) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int i, rc; u32 did_vid; chip->timeout_adjusted = false; if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, true); rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); if (rc < 0) { dev_warn(&chip->dev, "%s: failed to read did_vid: %d\n", __func__, rc); goto out; } for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { if (vendor_timeout_overrides[i].did_vid != did_vid) continue; memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, sizeof(vendor_timeout_overrides[i].timeout_us)); chip->timeout_adjusted = true; } out: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); return; } /* * Early probing for iTPM with STS_DATA_EXPECT flaw. * Try sending command without itpm flag set and if that * fails, repeat with itpm flag set. */ static int probe_itpm(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc = 0; static const u8 cmd_getticks[] = { 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0xf1 }; size_t len = sizeof(cmd_getticks); u16 vendor; if (test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags)) return 0; rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor); if (rc < 0) return rc; /* probe only iTPMS */ if (vendor != TPM_VID_INTEL) return 0; if (tpm_tis_request_locality(chip, 0) != 0) return -EBUSY; rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) goto out; tpm_tis_ready(chip); set_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags); rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) dev_info(&chip->dev, "Detected an iTPM.\n"); else { clear_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags); rc = -EFAULT; } out: tpm_tis_ready(chip); tpm_tis_relinquish_locality(chip, priv->locality); return rc; } static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) { switch (priv->manufacturer_id) { case TPM_VID_WINBOND: return ((status == TPM_STS_VALID) || (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); case TPM_VID_STM: return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); default: break; } } return status == TPM_STS_COMMAND_READY; } static irqreturn_t tpm_tis_revert_interrupts(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); const char *product; const char *vendor; dev_warn(&chip->dev, FW_BUG "TPM interrupt storm detected, polling instead\n"); vendor = dmi_get_system_info(DMI_SYS_VENDOR); product = dmi_get_system_info(DMI_PRODUCT_VERSION); if (vendor && product) { dev_info(&chip->dev, "Consider adding the following entry to tpm_tis_dmi_table:\n"); dev_info(&chip->dev, "\tDMI_SYS_VENDOR: %s\n", vendor); dev_info(&chip->dev, "\tDMI_PRODUCT_VERSION: %s\n", product); } if (tpm_tis_request_locality(chip, 0) != 0) return IRQ_NONE; __tpm_tis_disable_interrupts(chip); tpm_tis_relinquish_locality(chip, 0); schedule_work(&priv->free_irq_work); return IRQ_HANDLED; } static irqreturn_t tpm_tis_update_unhandled_irqs(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); irqreturn_t irqret = IRQ_HANDLED; if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) return IRQ_HANDLED; if (time_after(jiffies, priv->last_unhandled_irq + HZ/10)) priv->unhandled_irqs = 1; else priv->unhandled_irqs++; priv->last_unhandled_irq = jiffies; if (priv->unhandled_irqs > TPM_TIS_MAX_UNHANDLED_IRQS) irqret = tpm_tis_revert_interrupts(chip); return irqret; } static irqreturn_t tis_int_handler(int dummy, void *dev_id) { struct tpm_chip *chip = dev_id; struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u32 interrupt; int rc; rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); if (rc < 0) goto err; if (interrupt == 0) goto err; set_bit(TPM_TIS_IRQ_TESTED, &priv->flags); if (interrupt & TPM_INTF_DATA_AVAIL_INT) wake_up_interruptible(&priv->read_queue); if (interrupt & (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_CMD_READY_INT)) wake_up_interruptible(&priv->int_queue); /* Clear interrupts handled with TPM_EOI */ tpm_tis_request_locality(chip, 0); rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt); tpm_tis_relinquish_locality(chip, 0); if (rc < 0) goto err; tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt); return IRQ_HANDLED; err: return tpm_tis_update_unhandled_irqs(chip); } static void tpm_tis_gen_interrupt(struct tpm_chip *chip) { const char *desc = "attempting to generate an interrupt"; u32 cap2; cap_t cap; int ret; chip->flags |= TPM_CHIP_FLAG_IRQ; if (chip->flags & TPM_CHIP_FLAG_TPM2) ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc); else ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0); if (ret) chip->flags &= ~TPM_CHIP_FLAG_IRQ; } static void tpm_tis_free_irq_func(struct work_struct *work) { struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work); struct tpm_chip *chip = priv->chip; devm_free_irq(chip->dev.parent, priv->irq, chip); priv->irq = 0; } /* Register the IRQ and issue a command that will cause an interrupt. If an * irq is seen then leave the chip setup for IRQ operation, otherwise reverse * everything and leave in polling mode. Returns 0 on success. */ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, int flags, int irq) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u8 original_int_vec; int rc; u32 int_status; INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, tis_int_handler, IRQF_ONESHOT | flags, dev_name(&chip->dev), chip); if (rc) { dev_info(&chip->dev, "Unable to request irq: %d for probe\n", irq); return -1; } priv->irq = irq; rc = tpm_tis_request_locality(chip, 0); if (rc < 0) return rc; rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), &original_int_vec); if (rc < 0) { tpm_tis_relinquish_locality(chip, priv->locality); return rc; } rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq); if (rc < 0) goto restore_irqs; rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status); if (rc < 0) goto restore_irqs; /* Clear all existing */ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status); if (rc < 0) goto restore_irqs; /* Turn on */ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask | TPM_GLOBAL_INT_ENABLE); if (rc < 0) goto restore_irqs; clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags); /* Generate an interrupt by having the core call through to * tpm_tis_send */ tpm_tis_gen_interrupt(chip); restore_irqs: /* tpm_tis_send will either confirm the interrupt is working or it * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { tpm_tis_write8(priv, original_int_vec, TPM_INT_VECTOR(priv->locality)); rc = -1; } tpm_tis_relinquish_locality(chip, priv->locality); return rc; } /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that * do not have ACPI/etc. We typically expect the interrupt to be declared if * present. */ static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u8 original_int_vec; int i, rc; rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), &original_int_vec); if (rc < 0) return; if (!original_int_vec) { if (IS_ENABLED(CONFIG_X86)) for (i = 3; i <= 15; i++) if (!tpm_tis_probe_irq_single(chip, intmask, 0, i)) return; } else if (!tpm_tis_probe_irq_single(chip, intmask, 0, original_int_vec)) return; } void tpm_tis_remove(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u32 reg = TPM_INT_ENABLE(priv->locality); u32 interrupt; int rc; tpm_tis_clkrun_enable(chip, true); rc = tpm_tis_read32(priv, reg, &interrupt); if (rc < 0) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); if (priv->ilb_base_addr) iounmap(priv->ilb_base_addr); } EXPORT_SYMBOL_GPL(tpm_tis_remove); /** * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration * of a single TPM command * @chip: TPM chip to use * @value: 1 - Disable CLKRUN protocol, so that clocks are free running * 0 - Enable CLKRUN protocol * Call this function directly in tpm_tis_remove() in error or driver removal * path, since the chip->ops is set to NULL in tpm_chip_unregister(). */ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) { struct tpm_tis_data *data = dev_get_drvdata(&chip->dev); u32 clkrun_val; if (!IS_ENABLED(CONFIG_X86) || !is_bsw() || !data->ilb_base_addr) return; if (value) { data->clkrun_enabled++; if (data->clkrun_enabled > 1) return; clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); /* Disable LPC CLKRUN# */ clkrun_val &= ~LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); /* * Write any random value on port 0x80 which is on LPC, to make * sure LPC clock is running before sending any TPM command. */ outb(0xCC, 0x80); } else { data->clkrun_enabled--; if (data->clkrun_enabled) return; clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); /* Enable LPC CLKRUN# */ clkrun_val |= LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); /* * Write any random value on port 0x80 which is on LPC, to make * sure LPC clock is running before sending any TPM command. */ outb(0xCC, 0x80); } } static const struct tpm_class_ops tpm_tis = { .flags = TPM_OPS_AUTO_STARTUP, .status = tpm_tis_status, .recv = tpm_tis_recv, .send = tpm_tis_send, .cancel = tpm_tis_ready, .update_timeouts = tpm_tis_update_timeouts, .update_durations = tpm_tis_update_durations, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = tpm_tis_req_canceled, .request_locality = tpm_tis_request_locality, .relinquish_locality = tpm_tis_relinquish_locality, .clk_enable = tpm_tis_clkrun_enable, }; int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, const struct tpm_tis_phy_ops *phy_ops, acpi_handle acpi_dev_handle) { u32 vendor; u32 intfcaps; u32 intmask; u32 clkrun_val; u8 rid; int rc, probe; struct tpm_chip *chip; chip = tpmm_chip_alloc(dev, &tpm_tis); if (IS_ERR(chip)) return PTR_ERR(chip); #ifdef CONFIG_ACPI chip->acpi_dev_handle = acpi_dev_handle; #endif chip->hwrng.quality = priv->rng_quality; /* Maximum timeouts */ chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX); chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX); chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX); chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX); priv->chip = chip; priv->timeout_min = TPM_TIMEOUT_USECS_MIN; priv->timeout_max = TPM_TIMEOUT_USECS_MAX; priv->phy_ops = phy_ops; priv->locality_count = 0; mutex_init(&priv->locality_count_mutex); dev_set_drvdata(&chip->dev, priv); rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); if (rc < 0) return rc; priv->manufacturer_id = vendor; if (priv->manufacturer_id == TPM_VID_ATML && !(chip->flags & TPM_CHIP_FLAG_TPM2)) { priv->timeout_min = TIS_TIMEOUT_MIN_ATML; priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); if (!priv->ilb_base_addr) return -ENOMEM; clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET); /* Check if CLKRUN# is already not enabled in the LPC bus */ if (!(clkrun_val & LPC_CLKRUN_EN)) { iounmap(priv->ilb_base_addr); priv->ilb_base_addr = NULL; } } if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, true); if (wait_startup(chip, 0) != 0) { rc = -ENODEV; goto out_err; } /* Take control of the TPM's interrupt hardware and shut it off */ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) goto out_err; /* Figure out the capabilities */ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); if (rc < 0) goto out_err; dev_dbg(dev, "TPM interface capabilities (0x%x):\n", intfcaps); if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) dev_dbg(dev, "\tBurst Count Static\n"); if (intfcaps & TPM_INTF_CMD_READY_INT) { intmask |= TPM_INTF_CMD_READY_INT; dev_dbg(dev, "\tCommand Ready Int Support\n"); } if (intfcaps & TPM_INTF_INT_EDGE_FALLING) dev_dbg(dev, "\tInterrupt Edge Falling\n"); if (intfcaps & TPM_INTF_INT_EDGE_RISING) dev_dbg(dev, "\tInterrupt Edge Rising\n"); if (intfcaps & TPM_INTF_INT_LEVEL_LOW) dev_dbg(dev, "\tInterrupt Level Low\n"); if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) dev_dbg(dev, "\tInterrupt Level High\n"); if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) { intmask |= TPM_INTF_LOCALITY_CHANGE_INT; dev_dbg(dev, "\tLocality Change Int Support\n"); } if (intfcaps & TPM_INTF_STS_VALID_INT) { intmask |= TPM_INTF_STS_VALID_INT; dev_dbg(dev, "\tSts Valid Int Support\n"); } if (intfcaps & TPM_INTF_DATA_AVAIL_INT) { intmask |= TPM_INTF_DATA_AVAIL_INT; dev_dbg(dev, "\tData Avail Int Support\n"); } intmask &= ~TPM_GLOBAL_INT_ENABLE; rc = tpm_tis_request_locality(chip, 0); if (rc < 0) { rc = -ENODEV; goto out_err; } tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); tpm_tis_relinquish_locality(chip, 0); rc = tpm_chip_start(chip); if (rc) goto out_err; rc = tpm2_probe(chip); tpm_chip_stop(chip); if (rc) goto out_err; rc = tpm_tis_read8(priv, TPM_RID(0), &rid); if (rc < 0) goto out_err; dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", vendor >> 16, rid); probe = probe_itpm(chip); if (probe < 0) { rc = -ENODEV; goto out_err; } /* INTERRUPT Setup */ init_waitqueue_head(&priv->read_queue); init_waitqueue_head(&priv->int_queue); rc = tpm_chip_bootstrap(chip); if (rc) goto out_err; if (irq != -1) { /* * Before doing irq testing issue a command to the TPM in polling mode * to make sure it works. May as well use that command to set the * proper timeouts for the driver. */ rc = tpm_tis_request_locality(chip, 0); if (rc < 0) goto out_err; rc = tpm_get_timeouts(chip); tpm_tis_relinquish_locality(chip, 0); if (rc) { dev_err(dev, "Could not get TPM timeouts and durations\n"); rc = -ENODEV; goto out_err; } if (irq) tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); else tpm_tis_probe_irq(chip, intmask); if (chip->flags & TPM_CHIP_FLAG_IRQ) { priv->int_mask = intmask; } else { dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); rc = tpm_tis_request_locality(chip, 0); if (rc < 0) goto out_err; tpm_tis_disable_interrupts(chip); tpm_tis_relinquish_locality(chip, 0); } } rc = tpm_chip_register(chip); if (rc) goto out_err; if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); return 0; out_err: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); tpm_tis_remove(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_tis_core_init); #ifdef CONFIG_PM_SLEEP static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); u32 intmask; int rc; /* * Re-enable interrupts that device may have lost or BIOS/firmware may * have disabled. */ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); if (rc < 0) { dev_err(&chip->dev, "Setting IRQ failed.\n"); return; } intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE; rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); if (rc < 0) dev_err(&chip->dev, "Enabling interrupts failed.\n"); } int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); int ret; ret = tpm_chip_start(chip); if (ret) return ret; if (chip->flags & TPM_CHIP_FLAG_IRQ) tpm_tis_reenable_interrupts(chip); /* * TPM 1.2 requires self-test on resume. This function actually returns * an error code but for unknown reason it isn't handled. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) tpm1_do_selftest(chip); tpm_chip_stop(chip); ret = tpm_pm_resume(dev); if (ret) return ret; return 0; } EXPORT_SYMBOL_GPL(tpm_tis_resume); #endif MODULE_AUTHOR("Leendert van Doorn ([email protected])"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis_core.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 Google, Inc * * This device driver implements a TCG PTP FIFO interface over SPI for chips * with Cr50 firmware. * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard. */ #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/spi/spi.h> #include <linux/wait.h> #include "tpm_tis_core.h" #include "tpm_tis_spi.h" /* * Cr50 timing constants: * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC. * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep. * - requires waiting for "ready" IRQ, if supported; or waiting for at least * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported. * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication. */ #define CR50_SLEEP_DELAY_MSEC 1000 #define CR50_WAKE_START_DELAY_USEC 1000 #define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2) #define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A) #define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A) #define MAX_IRQ_CONFIRMATION_ATTEMPTS 3 #define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12)) #define TPM_CR50_MAX_FW_VER_LEN 64 /* Default quality for hwrng. */ #define TPM_CR50_DEFAULT_RNG_QUALITY 700 struct cr50_spi_phy { struct tpm_tis_spi_phy spi_phy; struct mutex time_track_mutex; unsigned long last_access; unsigned long access_delay; unsigned int irq_confirmation_attempt; bool irq_needs_confirmation; bool irq_confirmed; }; static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy) { return container_of(phy, struct cr50_spi_phy, spi_phy); } /* * The cr50 interrupt handler just signals waiting threads that the * interrupt was asserted. It does not do any processing triggered * by interrupts but is instead used to avoid fixed delays. */ static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id) { struct cr50_spi_phy *cr50_phy = dev_id; cr50_phy->irq_confirmed = true; complete(&cr50_phy->spi_phy.ready); return IRQ_HANDLED; } /* * Cr50 needs to have at least some delay between consecutive * transactions. Make sure we wait. */ static void cr50_ensure_access_delay(struct cr50_spi_phy *phy) { unsigned long allowed_access = phy->last_access + phy->access_delay; unsigned long time_now = jiffies; struct device *dev = &phy->spi_phy.spi_device->dev; /* * Note: There is a small chance, if Cr50 is not accessed in a few days, * that time_in_range will not provide the correct result after the wrap * around for jiffies. In this case, we'll have an unneeded short delay, * which is fine. */ if (time_in_range_open(time_now, phy->last_access, allowed_access)) { unsigned long remaining, timeout = allowed_access - time_now; remaining = wait_for_completion_timeout(&phy->spi_phy.ready, timeout); if (!remaining && phy->irq_confirmed) dev_warn(dev, "Timeout waiting for TPM ready IRQ\n"); } if (phy->irq_needs_confirmation) { unsigned int attempt = ++phy->irq_confirmation_attempt; if (phy->irq_confirmed) { phy->irq_needs_confirmation = false; phy->access_delay = CR50_READY_IRQ_TIMEOUT; dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n", attempt); } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) { phy->irq_needs_confirmation = false; dev_warn(dev, "IRQ not confirmed - will use delays\n"); } } } /* * Cr50 might go to sleep if there is no SPI activity for some time and * miss the first few bits/bytes on the bus. In such case, wake it up * by asserting CS and give it time to start up. */ static bool cr50_needs_waking(struct cr50_spi_phy *phy) { /* * Note: There is a small chance, if Cr50 is not accessed in a few days, * that time_in_range will not provide the correct result after the wrap * around for jiffies. In this case, we'll probably timeout or read * incorrect value from TPM_STS and just retry the operation. */ return !time_in_range_open(jiffies, phy->last_access, phy->spi_phy.wake_after); } static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy) { struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy; if (cr50_needs_waking(cr50_phy)) { /* Assert CS, wait 1 msec, deassert CS */ struct spi_transfer spi_cs_wake = { .delay = { .value = 1000, .unit = SPI_DELAY_UNIT_USECS } }; spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1); /* Wait for it to fully wake */ usleep_range(CR50_WAKE_START_DELAY_USEC, CR50_WAKE_START_DELAY_USEC * 2); } /* Reset the time when we need to wake Cr50 again */ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC); } /* * Flow control: clock the bus and wait for cr50 to set LSB before * sending/receiving data. TCG PTP spec allows it to happen during * the last byte of header, but cr50 never does that in practice, * and earlier versions had a bug when it was set too early, so don't * check for it during header transfer. */ static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy, struct spi_transfer *spi_xfer) { struct device *dev = &phy->spi_device->dev; unsigned long timeout = jiffies + CR50_FLOW_CONTROL; struct spi_message m; int ret; spi_xfer->len = 1; do { spi_message_init(&m); spi_message_add_tail(spi_xfer, &m); ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) return ret; if (time_after(jiffies, timeout)) { dev_warn(dev, "Timeout during flow control\n"); return -EBUSY; } } while (!(phy->iobuf[0] & 0x01)); return 0; } static bool tpm_cr50_spi_is_firmware_power_managed(struct device *dev) { u8 val; int ret; /* This flag should default true when the device property is not present */ ret = device_property_read_u8(dev, "firmware-power-managed", &val); if (ret) return true; return val; } static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len, u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy); int ret; mutex_lock(&cr50_phy->time_track_mutex); /* * Do this outside of spi_bus_lock in case cr50 is not the * only device on that spi bus. */ cr50_ensure_access_delay(cr50_phy); cr50_wake_if_needed(cr50_phy); ret = tpm_tis_spi_transfer(data, addr, len, in, out); cr50_phy->last_access = jiffies; mutex_unlock(&cr50_phy->time_track_mutex); return ret; } static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value); } static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = { .read_bytes = tpm_tis_spi_cr50_read_bytes, .write_bytes = tpm_tis_spi_cr50_write_bytes, }; static void cr50_print_fw_version(struct tpm_tis_data *data) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); int i, len = 0; char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1]; char fw_ver_block[4]; /* * Write anything to TPM_CR50_FW_VER to start from the beginning * of the version string */ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0); /* Read the string, 4 bytes at a time, until we get '\0' */ do { tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4, fw_ver_block); for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i) fw_ver[len] = fw_ver_block[i]; } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN); fw_ver[len] = '\0'; dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver); } int cr50_spi_probe(struct spi_device *spi) { struct tpm_tis_spi_phy *phy; struct cr50_spi_phy *cr50_phy; int ret; struct tpm_chip *chip; cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL); if (!cr50_phy) return -ENOMEM; phy = &cr50_phy->spi_phy; phy->flow_control = cr50_spi_flow_control; phy->wake_after = jiffies; phy->priv.rng_quality = TPM_CR50_DEFAULT_RNG_QUALITY; init_completion(&phy->ready); cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY; cr50_phy->last_access = jiffies; mutex_init(&cr50_phy->time_track_mutex); if (spi->irq > 0) { ret = devm_request_irq(&spi->dev, spi->irq, cr50_spi_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "cr50_spi", cr50_phy); if (ret < 0) { if (ret == -EPROBE_DEFER) return ret; dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n", spi->irq, ret); /* * This is not fatal, the driver will fall back to * delays automatically, since ready will never * be completed without a registered irq handler. * So, just fall through. */ } else { /* * IRQ requested, let's verify that it is actually * triggered, before relying on it. */ cr50_phy->irq_needs_confirmation = true; } } else { dev_warn(&spi->dev, "No IRQ - will use delays between transactions.\n"); } ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops); if (ret) return ret; cr50_print_fw_version(&phy->priv); chip = dev_get_drvdata(&spi->dev); if (tpm_cr50_spi_is_firmware_power_managed(&spi->dev)) chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED; return 0; } #ifdef CONFIG_PM_SLEEP int tpm_tis_spi_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_tis_data *data = dev_get_drvdata(&chip->dev); struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); /* * Jiffies not increased during suspend, so we need to reset * the time to wake Cr50 after resume. */ phy->wake_after = jiffies; return tpm_tis_resume(dev); } #endif
linux-master
drivers/char/tpm/tpm_tis_spi_cr50.c
// SPDX-License-Identifier: GPL-2.0-only /* * Description: * Device Driver for the Infineon Technologies * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module * Specifications at www.trustedcomputinggroup.org * * Copyright (C) 2005, Marcel Selhorst <[email protected]> * Sirrix AG - security technologies <[email protected]> and * Applied Data Security Group, Ruhr-University Bochum, Germany * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ */ #include <linux/init.h> #include <linux/pnp.h> #include "tpm.h" /* Infineon specific definitions */ /* maximum number of WTX-packages */ #define TPM_MAX_WTX_PACKAGES 50 /* msleep-Time for WTX-packages */ #define TPM_WTX_MSLEEP_TIME 20 /* msleep-Time --> Interval to check status register */ #define TPM_MSLEEP_TIME 3 /* gives number of max. msleep()-calls before throwing timeout */ #define TPM_MAX_TRIES 5000 #define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 #define TPM_INF_IO_PORT 0x0 #define TPM_INF_IO_MEM 0x1 #define TPM_INF_ADDR 0x0 #define TPM_INF_DATA 0x1 struct tpm_inf_dev { int iotype; void __iomem *mem_base; /* MMIO ioremap'd addr */ unsigned long map_base; /* phys MMIO base */ unsigned long map_size; /* MMIO region size */ unsigned int index_off; /* index register offset */ unsigned int data_regs; /* Data registers */ unsigned int data_size; unsigned int config_port; /* IO Port config index reg */ unsigned int config_size; }; static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); else return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); else return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ enum infineon_tpm_header { TPM_VL_VER = 0x01, TPM_VL_CHANNEL_CONTROL = 0x07, TPM_VL_CHANNEL_PERSONALISATION = 0x0A, TPM_VL_CHANNEL_TPM = 0x0B, TPM_VL_CONTROL = 0x00, TPM_INF_NAK = 0x15, TPM_CTRL_WTX = 0x10, TPM_CTRL_WTX_ABORT = 0x18, TPM_CTRL_WTX_ABORT_ACK = 0x18, TPM_CTRL_ERROR = 0x20, TPM_CTRL_CHAININGACK = 0x40, TPM_CTRL_CHAINING = 0x80, TPM_CTRL_DATA = 0x04, TPM_CTRL_DATA_CHA = 0x84, TPM_CTRL_DATA_CHA_ACK = 0xC4 }; enum infineon_tpm_register { WRFIFO = 0x00, RDFIFO = 0x01, STAT = 0x02, CMD = 0x03 }; enum infineon_tpm_command_bits { CMD_DIS = 0x00, CMD_LP = 0x01, CMD_RES = 0x02, CMD_IRQC = 0x06 }; enum infineon_tpm_status_bits { STAT_XFE = 0x00, STAT_LPA = 0x01, STAT_FOK = 0x02, STAT_TOK = 0x03, STAT_IRQA = 0x06, STAT_RDA = 0x07 }; /* some outgoing values */ enum infineon_tpm_values { CHIP_ID1 = 0x20, CHIP_ID2 = 0x21, TPM_DAR = 0x30, RESET_LP_IRQC_DISABLE = 0x41, ENABLE_REGISTER_PAIR = 0x55, IOLIMH = 0x60, IOLIML = 0x61, DISABLE_REGISTER_PAIR = 0xAA, IDVENL = 0xF1, IDVENH = 0xF2, IDPDL = 0xF3, IDPDH = 0xF4 }; static int number_of_wtx; static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo) { int status; int check = 0; int i; if (clear_wrfifo) { for (i = 0; i < 4096; i++) { status = tpm_data_in(WRFIFO); if (status == 0xff) { if (check == 5) break; else check++; } } } /* Note: The values which are currently in the FIFO of the TPM are thrown away since there is no usage for them. Usually, this has nothing to say, since the TPM will give its answer immediately or will be aborted anyway, so the data here is usually garbage and useless. We have to clean this, because the next communication with the TPM would be rubbish, if there is still some old data in the Read FIFO. */ i = 0; do { status = tpm_data_in(RDFIFO); status = tpm_data_in(STAT); i++; if (i == TPM_MAX_TRIES) return -EIO; } while ((status & (1 << STAT_RDA)) != 0); return 0; } static int wait(struct tpm_chip *chip, int wait_for_bit) { int status; int i; for (i = 0; i < TPM_MAX_TRIES; i++) { status = tpm_data_in(STAT); /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; tpm_msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; }; static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) { wait(chip, STAT_XFE); tpm_data_out(sendbyte, WRFIFO); } /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more calculation time, it sends a WTX-package, which has to be acknowledged or aborted. This usually occurs if you are hammering the TPM with key creation. Set the maximum number of WTX-packages in the definitions above, if the number is reached, the waiting-time will be denied and the TPM command has to be resend. */ static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); tpm_msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) { dev_info(&chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; tpm_msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u32 size = 0; number_of_wtx = 0; recv_begin: /* start receiving header */ for (i = 0; i < 4; i++) { ret = wait(chip, STAT_RDA); if (ret) return -EIO; buf[i] = tpm_data_in(RDFIFO); } if (buf[0] != TPM_VL_VER) { dev_err(&chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } if (buf[1] == TPM_CTRL_DATA) { /* size of the data received */ size = ((buf[2] << 8) | buf[3]); for (i = 0; i < size; i++) { wait(chip, STAT_RDA); buf[i] = tpm_data_in(RDFIFO); } if ((size == 0x6D00) && (buf[1] == 0x80)) { dev_err(&chip->dev, "Error handling on vendor layer!\n"); return -EIO; } for (i = 0; i < size; i++) buf[i] = buf[i + 6]; size = size - 6; return size; } if (buf[1] == TPM_CTRL_WTX) { dev_info(&chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; } else { tpm_wtx_abort(chip); goto recv_begin; } } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { dev_info(&chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { dev_err(&chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) dev_err(&chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; } return -EIO; } static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u8 count_high, count_low, count_4, count_3, count_2, count_1; /* Disabling Reset, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); ret = empty_fifo(chip, 1); if (ret) { dev_err(&chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } ret = wait(chip, STAT_XFE); if (ret) return -EIO; count_4 = (count & 0xff000000) >> 24; count_3 = (count & 0x00ff0000) >> 16; count_2 = (count & 0x0000ff00) >> 8; count_1 = (count & 0x000000ff); count_high = ((count + 6) & 0xffffff00) >> 8; count_low = ((count + 6) & 0x000000ff); /* Sending Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_DATA); wait_and_send(chip, count_high); wait_and_send(chip, count_low); /* Sending Data Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_VL_CHANNEL_TPM); wait_and_send(chip, count_4); wait_and_send(chip, count_3); wait_and_send(chip, count_2); wait_and_send(chip, count_1); /* Sending Data */ for (i = 0; i < count; i++) { wait_and_send(chip, buf[i]); } return 0; } static void tpm_inf_cancel(struct tpm_chip *chip) { /* Since we are using the legacy mode to communicate with the TPM, we have no cancel functions, but have a workaround for interrupting the TPM through WTX. */ } static u8 tpm_inf_status(struct tpm_chip *chip) { return tpm_data_in(STAT); } static const struct tpm_class_ops tpm_inf = { .recv = tpm_inf_recv, .send = tpm_inf_send, .cancel = tpm_inf_cancel, .status = tpm_inf_status, .req_complete_mask = 0, .req_complete_val = 0, }; static const struct pnp_device_id tpm_inf_pnp_tbl[] = { /* Infineon TPMs */ {"IFX0101", 0}, {"IFX0102", 0}, {"", 0} }; MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); static int tpm_inf_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { int rc = 0; u8 iol, ioh; int vendorid[2]; int version[2]; int productid[2]; const char *chipname; struct tpm_chip *chip; /* read IO-ports through PnP */ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_PORT; tpm_dev.config_port = pnp_port_start(dev, 0); tpm_dev.config_size = pnp_port_len(dev, 0); tpm_dev.data_regs = pnp_port_start(dev, 1); tpm_dev.data_size = pnp_port_len(dev, 1); if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) { rc = -EINVAL; goto err_last; } dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); if (!((tpm_dev.data_regs >> 8) & 0xff)) { rc = -EINVAL; goto err_last; } /* publish my base address and request region */ if (request_region(tpm_dev.data_regs, tpm_dev.data_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } if (request_region(tpm_dev.config_port, tpm_dev.config_size, "tpm_infineon0") == NULL) { release_region(tpm_dev.data_regs, tpm_dev.data_size); rc = -EINVAL; goto err_last; } } else if (pnp_mem_valid(dev, 0) && !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_MEM; tpm_dev.map_base = pnp_mem_start(dev, 0); tpm_dev.map_size = pnp_mem_len(dev, 0); dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); /* publish my base address and request region */ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size); if (tpm_dev.mem_base == NULL) { release_mem_region(tpm_dev.map_base, tpm_dev.map_size); rc = -EINVAL; goto err_last; } /* * The only known MMIO based Infineon TPM system provides * a single large mem region with the device config * registers at the default TPM_ADDR. The data registers * seem like they could be placed anywhere within the MMIO * region, but lets just put them at zero offset. */ tpm_dev.index_off = TPM_ADDR; tpm_dev.data_regs = 0x0; } else { rc = -EINVAL; goto err_last; } /* query chip for its vendor, its version number a.s.o. */ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); tpm_config_out(IDVENL, TPM_INF_ADDR); vendorid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDVENH, TPM_INF_ADDR); vendorid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDL, TPM_INF_ADDR); productid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDH, TPM_INF_ADDR); productid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID1, TPM_INF_ADDR); version[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID2, TPM_INF_ADDR); version[0] = tpm_config_in(TPM_INF_DATA); switch ((productid[0] << 8) | productid[1]) { case 6: chipname = " (SLD 9630 TT 1.1)"; break; case 11: chipname = " (SLB 9635 TT 1.2)"; break; default: chipname = " (unknown chip)"; break; } if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { /* configure TPM with IO-ports */ tpm_config_out(IOLIMH, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); /* control if IO-ports are set correctly */ tpm_config_out(IOLIMH, TPM_INF_ADDR); ioh = tpm_config_in(TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); iol = tpm_config_in(TPM_INF_DATA); if ((ioh << 8 | iol) != tpm_dev.data_regs) { dev_err(&dev->dev, "Could not set IO-data registers to 0x%x\n", tpm_dev.data_regs); rc = -EIO; goto err_release_region; } /* activate register */ tpm_config_out(TPM_DAR, TPM_INF_ADDR); tpm_config_out(0x01, TPM_INF_DATA); tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); /* disable RESET, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); /* Finally, we're done, print some infos */ dev_info(&dev->dev, "TPM found: " "config base 0x%lx, " "data base 0x%lx, " "chip version 0x%02x%02x, " "vendor id 0x%x%x (Infineon), " "product id 0x%02x%02x" "%s\n", tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.config_port : tpm_dev.map_base + tpm_dev.index_off, tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.data_regs : tpm_dev.map_base + tpm_dev.data_regs, version[0], version[1], vendorid[0], vendorid[1], productid[0], productid[1], chipname); chip = tpmm_chip_alloc(&dev->dev, &tpm_inf); if (IS_ERR(chip)) { rc = PTR_ERR(chip); goto err_release_region; } rc = tpm_chip_register(chip); if (rc) goto err_release_region; return 0; } else { rc = -ENODEV; goto err_release_region; } err_release_region: if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } err_last: return rc; } static void tpm_inf_pnp_remove(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); tpm_chip_unregister(chip); if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } } #ifdef CONFIG_PM_SLEEP static int tpm_inf_resume(struct device *dev) { /* Re-configure TPM after suspending */ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); tpm_config_out(IOLIMH, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); /* activate register */ tpm_config_out(TPM_DAR, TPM_INF_ADDR); tpm_config_out(0x01, TPM_INF_DATA); tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); /* disable RESET, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); return tpm_pm_resume(dev); } #endif static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume); static struct pnp_driver tpm_inf_pnp_driver = { .name = "tpm_inf_pnp", .id_table = tpm_inf_pnp_tbl, .probe = tpm_inf_pnp_probe, .remove = tpm_inf_pnp_remove, .driver = { .pm = &tpm_inf_pm, } }; module_pnp_driver(tpm_inf_pnp_driver); MODULE_AUTHOR("Marcel Selhorst <[email protected]>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); MODULE_VERSION("1.9.2"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_infineon.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005, 2006 IBM Corporation * Copyright (C) 2014, 2015 Intel Corporation * * Authors: * Leendert van Doorn <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This device driver implements the TPM interface as defined in * the TCG TPM Interface Spec version 1.2, revision 1.0. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/acpi.h> #include <linux/freezer.h> #include <linux/of.h> #include <linux/kernel.h> #include "tpm.h" #include "tpm_tis_core.h" struct tpm_info { struct resource res; /* irq > 0 means: use irq $irq; * irq = 0 means: autoprobe for an irq; * irq = -1 means: no irq support */ int irq; }; struct tpm_tis_tcg_phy { struct tpm_tis_data priv; void __iomem *iobase; }; static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data) { return container_of(data, struct tpm_tis_tcg_phy, priv); } #ifdef CONFIG_PREEMPT_RT /* * Flush previous write operations with a dummy read operation to the * TPM MMIO base address. */ static inline void tpm_tis_flush(void __iomem *iobase) { ioread8(iobase + TPM_ACCESS(0)); } #else #define tpm_tis_flush(iobase) do { } while (0) #endif /* * Write a byte word to the TPM MMIO address, and flush the write queue. * The flush ensures that the data is sent immediately over the bus and not * aggregated with further requests and transferred later in a batch. The large * write requests can lead to unwanted latency spikes by blocking the CPU until * the complete batch has been transferred. */ static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) { iowrite8(b, iobase + addr); tpm_tis_flush(iobase); } /* * Write a 32-bit word to the TPM MMIO address, and flush the write queue. * The flush ensures that the data is sent immediately over the bus and not * aggregated with further requests and transferred later in a batch. The large * write requests can lead to unwanted latency spikes by blocking the CPU until * the complete batch has been transferred. */ static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) { iowrite32(b, iobase + addr); tpm_tis_flush(iobase); } static bool interrupts; module_param(interrupts, bool, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); static bool itpm; module_param(itpm, bool, 0444); MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); static bool force; #ifdef CONFIG_X86 module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); #endif #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int has_hid(struct acpi_device *dev, const char *hid) { struct acpi_hardware_id *id; list_for_each_entry(id, &dev->pnp.ids, list) if (!strcmp(hid, id->id)) return 1; return 0; } static inline int is_itpm(struct acpi_device *dev) { if (!dev) return 0; return has_hid(dev, "INTC0102"); } #else static inline int is_itpm(struct acpi_device *dev) { return 0; } #endif #if defined(CONFIG_ACPI) #define DEVICE_IS_TPM2 1 static const struct acpi_device_id tpm_acpi_tbl[] = { {"MSFT0101", DEVICE_IS_TPM2}, {}, }; MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl); static int check_acpi_tpm2(struct device *dev) { const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); struct acpi_table_tpm2 *tbl; acpi_status st; int ret = 0; if (!aid || aid->driver_data != DEVICE_IS_TPM2) return 0; /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 * table is mandatory */ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* The tpm2_crb driver handles this device */ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) ret = -ENODEV; acpi_put_table((struct acpi_table_header *)tbl); return ret; } #else static int check_acpi_tpm2(struct device *dev) { return 0; } #endif static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); __le16 result_le16; __le32 result_le32; switch (io_mode) { case TPM_TIS_PHYS_8: while (len--) *result++ = ioread8(phy->iobase + addr); break; case TPM_TIS_PHYS_16: result_le16 = cpu_to_le16(ioread16(phy->iobase + addr)); memcpy(result, &result_le16, sizeof(u16)); break; case TPM_TIS_PHYS_32: result_le32 = cpu_to_le32(ioread32(phy->iobase + addr)); memcpy(result, &result_le32, sizeof(u32)); break; } return 0; } static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); switch (io_mode) { case TPM_TIS_PHYS_8: while (len--) tpm_tis_iowrite8(*value++, phy->iobase, addr); break; case TPM_TIS_PHYS_16: return -EINVAL; case TPM_TIS_PHYS_32: tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr); break; } return 0; } static const struct tpm_tis_phy_ops tpm_tcg = { .read_bytes = tpm_tcg_read_bytes, .write_bytes = tpm_tcg_write_bytes, }; static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) { struct tpm_tis_tcg_phy *phy; int irq = -1; int rc; rc = check_acpi_tpm2(dev); if (rc) return rc; phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL); if (phy == NULL) return -ENOMEM; phy->iobase = devm_ioremap_resource(dev, &tpm_info->res); if (IS_ERR(phy->iobase)) return PTR_ERR(phy->iobase); if (interrupts) irq = tpm_info->irq; if (itpm || is_itpm(ACPI_COMPANION(dev))) set_bit(TPM_TIS_ITPM_WORKAROUND, &phy->priv.flags); return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg, ACPI_HANDLE(dev)); } static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, const struct pnp_device_id *pnp_id) { struct tpm_info tpm_info = {}; struct resource *res; res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; tpm_info.res = *res; if (pnp_irq_valid(pnp_dev, 0)) tpm_info.irq = pnp_irq(pnp_dev, 0); else tpm_info.irq = -1; return tpm_tis_init(&pnp_dev->dev, &tpm_info); } /* * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module * parameter"). This commit added IFX0102 device ID, which is also used by * tpm_infineon but ignored to add quirks to probe which driver ought to be * used. */ static struct pnp_device_id tpm_pnp_tbl[] = { {"PNP0C31", 0}, /* TPM */ {"ATM1200", 0}, /* Atmel */ {"IFX0102", 0}, /* Infineon */ {"BCM0101", 0}, /* Broadcom */ {"BCM0102", 0}, /* Broadcom */ {"NSC1200", 0}, /* National */ {"ICO0102", 0}, /* Intel */ /* Add new here */ {"", 0}, /* User Specified */ {"", 0} /* Terminator */ }; MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); static void tpm_tis_pnp_remove(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); } static struct pnp_driver tis_pnp_driver = { .name = "tpm_tis", .id_table = tpm_pnp_tbl, .probe = tpm_tis_pnp_init, .remove = tpm_tis_pnp_remove, .driver = { .pm = &tpm_tis_pm, }, }; #define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2) module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); static struct platform_device *force_pdev; static int tpm_tis_plat_probe(struct platform_device *pdev) { struct tpm_info tpm_info = {}; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); return -ENODEV; } tpm_info.res = *res; tpm_info.irq = platform_get_irq_optional(pdev, 0); if (tpm_info.irq <= 0) { if (pdev != force_pdev) tpm_info.irq = -1; else /* When forcing auto probe the IRQ */ tpm_info.irq = 0; } return tpm_tis_init(&pdev->dev, &tpm_info); } static void tpm_tis_plat_remove(struct platform_device *pdev) { struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); } #ifdef CONFIG_OF static const struct of_device_id tis_of_platform_match[] = { {.compatible = "tcg,tpm-tis-mmio"}, {}, }; MODULE_DEVICE_TABLE(of, tis_of_platform_match); #endif static struct platform_driver tis_drv = { .probe = tpm_tis_plat_probe, .remove_new = tpm_tis_plat_remove, .driver = { .name = "tpm_tis", .pm = &tpm_tis_pm, .of_match_table = of_match_ptr(tis_of_platform_match), .acpi_match_table = ACPI_PTR(tpm_acpi_tbl), }, }; static int tpm_tis_force_device(void) { struct platform_device *pdev; static const struct resource x86_resources[] = { DEFINE_RES_MEM(0xFED40000, TIS_MEM_LEN) }; if (!force) return 0; /* The driver core will match the name tpm_tis of the device to * the tpm_tis platform driver and complete the setup via * tpm_tis_plat_probe */ pdev = platform_device_register_simple("tpm_tis", -1, x86_resources, ARRAY_SIZE(x86_resources)); if (IS_ERR(pdev)) return PTR_ERR(pdev); force_pdev = pdev; return 0; } static int __init init_tis(void) { int rc; rc = tpm_tis_force_device(); if (rc) goto err_force; rc = platform_driver_register(&tis_drv); if (rc) goto err_platform; if (IS_ENABLED(CONFIG_PNP)) { rc = pnp_register_driver(&tis_pnp_driver); if (rc) goto err_pnp; } return 0; err_pnp: platform_driver_unregister(&tis_drv); err_platform: if (force_pdev) platform_device_unregister(force_pdev); err_force: return rc; } static void __exit cleanup_tis(void) { pnp_unregister_driver(&tis_pnp_driver); platform_driver_unregister(&tis_drv); if (force_pdev) platform_device_unregister(force_pdev); } module_init(init_tis); module_exit(cleanup_tis); MODULE_AUTHOR("Leendert van Doorn ([email protected])"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2020 Google Inc. * * Based on Infineon TPM driver by Peter Huewe. * * cr50 is a firmware for H1 secure modules that requires special * handling for the I2C interface. * * - Use an interrupt for transaction status instead of hardcoded delays. * - Must use write+wait+read read protocol. * - All 4 bytes of status register must be read/written at once. * - Burst count max is 63 bytes, and burst count behaves slightly differently * than other I2C TPMs. * - When reading from FIFO the full burstcnt must be read instead of just * reading header and determining the remainder. */ #include <linux/acpi.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/wait.h> #include "tpm_tis_core.h" #define TPM_CR50_MAX_BUFSIZE 64 #define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */ #define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */ #define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */ #define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */ #define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */ #define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */ #define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */ #define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4)) #define TPM_I2C_STS(l) (0x0001 | ((l) << 4)) #define TPM_I2C_DATA_FIFO(l) (0x0005 | ((l) << 4)) #define TPM_I2C_DID_VID(l) (0x0006 | ((l) << 4)) /** * struct tpm_i2c_cr50_priv_data - Driver private data. * @irq: Irq number used for this chip. * If irq <= 0, then a fixed timeout is used instead of waiting for irq. * @tpm_ready: Struct used by irq handler to signal R/W readiness. * @buf: Buffer used for i2c writes, with i2c address prepended to content. * * Private driver struct used by kernel threads and interrupt context. */ struct tpm_i2c_cr50_priv_data { int irq; struct completion tpm_ready; u8 buf[TPM_CR50_MAX_BUFSIZE]; }; /** * tpm_cr50_i2c_int_handler() - cr50 interrupt handler. * @dummy: Unused parameter. * @tpm_info: TPM chip information. * * The cr50 interrupt handler signals waiting threads that the * interrupt has been asserted. It does not do any interrupt triggered * processing but is instead used to avoid fixed delays. * * Return: * IRQ_HANDLED signifies irq was handled by this device. */ static irqreturn_t tpm_cr50_i2c_int_handler(int dummy, void *tpm_info) { struct tpm_chip *chip = tpm_info; struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev); complete(&priv->tpm_ready); return IRQ_HANDLED; } /** * tpm_cr50_i2c_wait_tpm_ready() - Wait for tpm to signal ready. * @chip: A TPM chip. * * Wait for completion interrupt if available, otherwise use a fixed * delay for the TPM to be ready. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip) { struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev); /* Use a safe fixed delay if interrupt is not supported */ if (priv->irq <= 0) { msleep(TPM_CR50_TIMEOUT_NOIRQ_MS); return 0; } /* Wait for interrupt to indicate TPM is ready to respond */ if (!wait_for_completion_timeout(&priv->tpm_ready, chip->timeout_a)) { dev_warn(&chip->dev, "Timeout waiting for TPM ready\n"); return -ETIMEDOUT; } return 0; } /** * tpm_cr50_i2c_enable_tpm_irq() - Enable TPM irq. * @chip: A TPM chip. */ static void tpm_cr50_i2c_enable_tpm_irq(struct tpm_chip *chip) { struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev); if (priv->irq > 0) { reinit_completion(&priv->tpm_ready); enable_irq(priv->irq); } } /** * tpm_cr50_i2c_disable_tpm_irq() - Disable TPM irq. * @chip: A TPM chip. */ static void tpm_cr50_i2c_disable_tpm_irq(struct tpm_chip *chip) { struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev); if (priv->irq > 0) disable_irq(priv->irq); } /** * tpm_cr50_i2c_transfer_message() - Transfer a message over i2c. * @dev: Device information. * @adapter: I2C adapter. * @msg: Message to transfer. * * Call unlocked i2c transfer routine with the provided parameters and * retry in case of bus errors. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_transfer_message(struct device *dev, struct i2c_adapter *adapter, struct i2c_msg *msg) { unsigned int try; int rc; for (try = 0; try < TPM_CR50_I2C_MAX_RETRIES; try++) { rc = __i2c_transfer(adapter, msg, 1); if (rc == 1) return 0; /* Successfully transferred the message */ if (try) dev_warn(dev, "i2c transfer failed (attempt %d/%d): %d\n", try + 1, TPM_CR50_I2C_MAX_RETRIES, rc); usleep_range(TPM_CR50_I2C_RETRY_DELAY_LO, TPM_CR50_I2C_RETRY_DELAY_HI); } /* No i2c message transferred */ return -EIO; } /** * tpm_cr50_i2c_read() - Read from TPM register. * @chip: A TPM chip. * @addr: Register address to read from. * @buffer: Read destination, provided by caller. * @len: Number of bytes to read. * * Sends the register address byte to the TPM, then waits until TPM * is ready via interrupt signal or timeout expiration, then 'len' * bytes are read from TPM response into the provided 'buffer'. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len) { struct i2c_client *client = to_i2c_client(chip->dev.parent); struct i2c_msg msg_reg_addr = { .addr = client->addr, .len = 1, .buf = &addr }; struct i2c_msg msg_response = { .addr = client->addr, .flags = I2C_M_RD, .len = len, .buf = buffer }; int rc; i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); /* Send the register address byte to the TPM */ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_reg_addr); if (rc < 0) goto out; /* Wait for TPM to be ready with response data */ rc = tpm_cr50_i2c_wait_tpm_ready(chip); if (rc < 0) goto out; /* Read response data from the TPM */ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_response); out: tpm_cr50_i2c_disable_tpm_irq(chip); i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; return 0; } /** * tpm_cr50_i2c_write()- Write to TPM register. * @chip: A TPM chip. * @addr: Register address to write to. * @buffer: Data to write. * @len: Number of bytes to write. * * The provided address is prepended to the data in 'buffer', the * cobined address+data is sent to the TPM, then wait for TPM to * indicate it is done writing. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len) { struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); struct i2c_msg msg = { .addr = client->addr, .len = len + 1, .buf = priv->buf }; int rc; if (len > TPM_CR50_MAX_BUFSIZE - 1) return -EINVAL; /* Prepend the 'register address' to the buffer */ priv->buf[0] = addr; memcpy(priv->buf + 1, buffer, len); i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT); /* Prepare for completion interrupt */ tpm_cr50_i2c_enable_tpm_irq(chip); /* Send write request buffer with address */ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg); if (rc < 0) goto out; /* Wait for TPM to be ready, ignore timeout */ tpm_cr50_i2c_wait_tpm_ready(chip); out: tpm_cr50_i2c_disable_tpm_irq(chip); i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT); if (rc < 0) return rc; return 0; } /** * tpm_cr50_check_locality() - Verify TPM locality 0 is active. * @chip: A TPM chip. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_check_locality(struct tpm_chip *chip) { u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY; u8 buf; int rc; rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); if (rc < 0) return rc; if ((buf & mask) == mask) return 0; return -EIO; } /** * tpm_cr50_release_locality() - Release TPM locality. * @chip: A TPM chip. * @force: Flag to force release if set. */ static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force) { u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING; u8 addr = TPM_I2C_ACCESS(0); u8 buf; if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0) return; if (force || (buf & mask) == mask) { buf = TPM_ACCESS_ACTIVE_LOCALITY; tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf)); } } /** * tpm_cr50_request_locality() - Request TPM locality 0. * @chip: A TPM chip. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_request_locality(struct tpm_chip *chip) { u8 buf = TPM_ACCESS_REQUEST_USE; unsigned long stop; int rc; if (!tpm_cr50_check_locality(chip)) return 0; rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf)); if (rc < 0) return rc; stop = jiffies + chip->timeout_a; do { if (!tpm_cr50_check_locality(chip)) return 0; msleep(TPM_CR50_TIMEOUT_SHORT_MS); } while (time_before(jiffies, stop)); return -ETIMEDOUT; } /** * tpm_cr50_i2c_tis_status() - Read cr50 tis status. * @chip: A TPM chip. * * cr50 requires all 4 bytes of status register to be read. * * Return: * TPM status byte. */ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip) { u8 buf[4]; if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) return 0; return buf[0]; } /** * tpm_cr50_i2c_tis_set_ready() - Set status register to ready. * @chip: A TPM chip. * * cr50 requires all 4 bytes of status register to be written. */ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip) { u8 buf[4] = { TPM_STS_COMMAND_READY }; tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf)); msleep(TPM_CR50_TIMEOUT_SHORT_MS); } /** * tpm_cr50_i2c_get_burst_and_status() - Get burst count and status. * @chip: A TPM chip. * @mask: Status mask. * @burst: Return value for burst. * @status: Return value for status. * * cr50 uses bytes 3:2 of status register for burst count and * all 4 bytes must be read. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask, size_t *burst, u32 *status) { unsigned long stop; u8 buf[4]; *status = 0; /* wait for burstcount */ stop = jiffies + chip->timeout_b; do { if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) { msleep(TPM_CR50_TIMEOUT_SHORT_MS); continue; } *status = *buf; *burst = le16_to_cpup((__le16 *)(buf + 1)); if ((*status & mask) == mask && *burst > 0 && *burst <= TPM_CR50_MAX_BUFSIZE - 1) return 0; msleep(TPM_CR50_TIMEOUT_SHORT_MS); } while (time_before(jiffies, stop)); dev_err(&chip->dev, "Timeout reading burst and status\n"); return -ETIMEDOUT; } /** * tpm_cr50_i2c_tis_recv() - TPM reception callback. * @chip: A TPM chip. * @buf: Reception buffer. * @buf_len: Buffer length to read. * * Return: * - >= 0: Number of read bytes. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len) { u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL; size_t burstcnt, cur, len, expected; u8 addr = TPM_I2C_DATA_FIFO(0); u32 status; int rc; if (buf_len < TPM_HEADER_SIZE) return -EINVAL; rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status); if (rc < 0) goto out_err; if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) { dev_err(&chip->dev, "Unexpected burstcnt: %zu (max=%zu, min=%d)\n", burstcnt, buf_len, TPM_HEADER_SIZE); rc = -EIO; goto out_err; } /* Read first chunk of burstcnt bytes */ rc = tpm_cr50_i2c_read(chip, addr, buf, burstcnt); if (rc < 0) { dev_err(&chip->dev, "Read of first chunk failed\n"); goto out_err; } /* Determine expected data in the return buffer */ expected = be32_to_cpup((__be32 *)(buf + 2)); if (expected > buf_len) { dev_err(&chip->dev, "Buffer too small to receive i2c data\n"); rc = -E2BIG; goto out_err; } /* Now read the rest of the data */ cur = burstcnt; while (cur < expected) { /* Read updated burst count and check status */ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status); if (rc < 0) goto out_err; len = min_t(size_t, burstcnt, expected - cur); rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len); if (rc < 0) { dev_err(&chip->dev, "Read failed\n"); goto out_err; } cur += len; } /* Ensure TPM is done reading data */ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status); if (rc < 0) goto out_err; if (status & TPM_STS_DATA_AVAIL) { dev_err(&chip->dev, "Data still available\n"); rc = -EIO; goto out_err; } tpm_cr50_release_locality(chip, false); return cur; out_err: /* Abort current transaction if still pending */ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); tpm_cr50_release_locality(chip, false); return rc; } /** * tpm_cr50_i2c_tis_send() - TPM transmission callback. * @chip: A TPM chip. * @buf: Buffer to send. * @len: Buffer length. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) { size_t burstcnt, limit, sent = 0; u8 tpm_go[4] = { TPM_STS_GO }; unsigned long stop; u32 status; int rc; rc = tpm_cr50_request_locality(chip); if (rc < 0) return rc; /* Wait until TPM is ready for a command */ stop = jiffies + chip->timeout_b; while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) { if (time_after(jiffies, stop)) { rc = -ETIMEDOUT; goto out_err; } tpm_cr50_i2c_tis_set_ready(chip); } while (len > 0) { u8 mask = TPM_STS_VALID; /* Wait for data if this is not the first chunk */ if (sent > 0) mask |= TPM_STS_DATA_EXPECT; /* Read burst count and check status */ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status); if (rc < 0) goto out_err; /* * Use burstcnt - 1 to account for the address byte * that is inserted by tpm_cr50_i2c_write() */ limit = min_t(size_t, burstcnt - 1, len); rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit); if (rc < 0) { dev_err(&chip->dev, "Write failed\n"); goto out_err; } sent += limit; len -= limit; } /* Ensure TPM is not expecting more data */ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status); if (rc < 0) goto out_err; if (status & TPM_STS_DATA_EXPECT) { dev_err(&chip->dev, "Data still expected\n"); rc = -EIO; goto out_err; } /* Start the TPM command */ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go, sizeof(tpm_go)); if (rc < 0) { dev_err(&chip->dev, "Start command failed\n"); goto out_err; } return 0; out_err: /* Abort current transaction if still pending */ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY) tpm_cr50_i2c_tis_set_ready(chip); tpm_cr50_release_locality(chip, false); return rc; } /** * tpm_cr50_i2c_req_canceled() - Callback to notify a request cancel. * @chip: A TPM chip. * @status: Status given by the cancel callback. * * Return: * True if command is ready, False otherwise. */ static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status) { return status == TPM_STS_COMMAND_READY; } static bool tpm_cr50_i2c_is_firmware_power_managed(struct device *dev) { u8 val; int ret; /* This flag should default true when the device property is not present */ ret = device_property_read_u8(dev, "firmware-power-managed", &val); if (ret) return true; return val; } static const struct tpm_class_ops cr50_i2c = { .flags = TPM_OPS_AUTO_STARTUP, .status = &tpm_cr50_i2c_tis_status, .recv = &tpm_cr50_i2c_tis_recv, .send = &tpm_cr50_i2c_tis_send, .cancel = &tpm_cr50_i2c_tis_set_ready, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = &tpm_cr50_i2c_req_canceled, }; #ifdef CONFIG_ACPI static const struct acpi_device_id cr50_i2c_acpi_id[] = { { "GOOG0005", 0 }, {} }; MODULE_DEVICE_TABLE(acpi, cr50_i2c_acpi_id); #endif #ifdef CONFIG_OF static const struct of_device_id of_cr50_i2c_match[] = { { .compatible = "google,cr50", }, {} }; MODULE_DEVICE_TABLE(of, of_cr50_i2c_match); #endif /** * tpm_cr50_i2c_probe() - Driver probe function. * @client: I2C client information. * @id: I2C device id. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static int tpm_cr50_i2c_probe(struct i2c_client *client) { struct tpm_i2c_cr50_priv_data *priv; struct device *dev = &client->dev; struct tpm_chip *chip; u32 vendor; u8 buf[4]; int rc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; chip = tpmm_chip_alloc(dev, &cr50_i2c); if (IS_ERR(chip)) return PTR_ERR(chip); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* cr50 is a TPM 2.0 chip */ chip->flags |= TPM_CHIP_FLAG_TPM2; if (tpm_cr50_i2c_is_firmware_power_managed(dev)) chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED; /* Default timeouts */ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); dev_set_drvdata(&chip->dev, priv); init_completion(&priv->tpm_ready); if (client->irq > 0) { rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_NO_AUTOEN, dev->driver->name, chip); if (rc < 0) { dev_err(dev, "Failed to probe IRQ %d\n", client->irq); return rc; } priv->irq = client->irq; } else { dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n", TPM_CR50_TIMEOUT_NOIRQ_MS); } rc = tpm_cr50_request_locality(chip); if (rc < 0) { dev_err(dev, "Could not request locality\n"); return rc; } /* Read four bytes from DID_VID register */ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf)); if (rc < 0) { dev_err(dev, "Could not read vendor id\n"); tpm_cr50_release_locality(chip, true); return rc; } vendor = le32_to_cpup((__le32 *)buf); if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) { dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor); tpm_cr50_release_locality(chip, true); return -ENODEV; } dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n", vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50", client->addr, client->irq, vendor >> 16); return tpm_chip_register(chip); } /** * tpm_cr50_i2c_remove() - Driver remove function. * @client: I2C client information. * * Return: * - 0: Success. * - -errno: A POSIX error code. */ static void tpm_cr50_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = i2c_get_clientdata(client); struct device *dev = &client->dev; if (!chip) { dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n"); return; } tpm_chip_unregister(chip); tpm_cr50_release_locality(chip, true); } static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume); static struct i2c_driver cr50_i2c_driver = { .probe = tpm_cr50_i2c_probe, .remove = tpm_cr50_i2c_remove, .driver = { .name = "cr50_i2c", .pm = &cr50_i2c_pm, .acpi_match_table = ACPI_PTR(cr50_i2c_acpi_id), .of_match_table = of_match_ptr(of_cr50_i2c_match), }, }; module_i2c_driver(cr50_i2c_driver); MODULE_DESCRIPTION("cr50 TPM I2C Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis_i2c_cr50.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Copyright (C) 2014 Intel Corporation * * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/suspend.h> #include <linux/freezer.h> #include <linux/tpm_eventlog.h> #include "tpm.h" /* * Bug workaround - some TPM's don't flush the most * recently changed pcr on suspend, so force the flush * with an extend to the selected _unused_ non-volatile pcr. */ static u32 tpm_suspend_pcr; module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); MODULE_PARM_DESC(suspend_pcr, "PCR to use for dummy writes to facilitate flush on suspend."); /** * tpm_calc_ordinal_duration() - calculate the maximum command duration * @chip: TPM chip to use. * @ordinal: TPM command ordinal. * * The function returns the maximum amount of time the chip could take * to return the result for a particular ordinal in jiffies. * * Return: A maximal duration time for an ordinal in jiffies. */ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { if (chip->flags & TPM_CHIP_FLAG_TPM2) return tpm2_calc_ordinal_duration(chip, ordinal); else return tpm1_calc_ordinal_duration(chip, ordinal); } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) { struct tpm_header *header = buf; int rc; ssize_t len = 0; u32 count, ordinal; unsigned long stop; if (bufsiz < TPM_HEADER_SIZE) return -EINVAL; if (bufsiz > TPM_BUFSIZE) bufsiz = TPM_BUFSIZE; count = be32_to_cpu(header->length); ordinal = be32_to_cpu(header->ordinal); if (count == 0) return -ENODATA; if (count > bufsiz) { dev_err(&chip->dev, "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } rc = chip->ops->send(chip, buf, count); if (rc < 0) { if (rc != -EPIPE) dev_err(&chip->dev, "%s: send(): error %d\n", __func__, rc); return rc; } /* A sanity check. send() should just return zero on success e.g. * not the command length. */ if (rc > 0) { dev_warn(&chip->dev, "%s: send(): invalid value %d\n", __func__, rc); rc = 0; } if (chip->flags & TPM_CHIP_FLAG_IRQ) goto out_recv; stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { u8 status = chip->ops->status(chip); if ((status & chip->ops->req_complete_mask) == chip->ops->req_complete_val) goto out_recv; if (chip->ops->req_canceled(chip, status)) { dev_err(&chip->dev, "Operation Canceled\n"); return -ECANCELED; } tpm_msleep(TPM_TIMEOUT_POLL); rmb(); } while (time_before(jiffies, stop)); chip->ops->cancel(chip); dev_err(&chip->dev, "Operation Timed out\n"); return -ETIME; out_recv: len = chip->ops->recv(chip, buf, bufsiz); if (len < 0) { rc = len; dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc); } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) rc = -EFAULT; return rc ? rc : len; } /** * tpm_transmit - Internal kernel interface to transmit TPM commands. * @chip: a TPM chip to use * @buf: a TPM command buffer * @bufsiz: length of the TPM command buffer * * A wrapper around tpm_try_transmit() that handles TPM2_RC_RETRY returns from * the TPM and retransmits the command after a delay up to a maximum wait of * TPM2_DURATION_LONG. * * Note that TPM 1.x never returns TPM2_RC_RETRY so the retry logic is TPM 2.0 * only. * * Return: * * The response length - OK * * -errno - A system error */ ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz) { struct tpm_header *header = (struct tpm_header *)buf; /* space for header and handles */ u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)]; unsigned int delay_msec = TPM2_DURATION_SHORT; u32 rc = 0; ssize_t ret; const size_t save_size = min(sizeof(save), bufsiz); /* the command code is where the return code will be */ u32 cc = be32_to_cpu(header->return_code); /* * Subtlety here: if we have a space, the handles will be * transformed, so when we restore the header we also have to * restore the handles. */ memcpy(save, buf, save_size); for (;;) { ret = tpm_try_transmit(chip, buf, bufsiz); if (ret < 0) break; rc = be32_to_cpu(header->return_code); if (rc != TPM2_RC_RETRY && rc != TPM2_RC_TESTING) break; /* * return immediately if self test returns test * still running to shorten boot time. */ if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST) break; if (delay_msec > TPM2_DURATION_LONG) { if (rc == TPM2_RC_RETRY) dev_err(&chip->dev, "in retry loop\n"); else dev_err(&chip->dev, "self test is still running\n"); break; } tpm_msleep(delay_msec); delay_msec *= 2; memcpy(buf, save, save_size); } return ret; } /** * tpm_transmit_cmd - send a tpm command to the device * @chip: a TPM chip to use * @buf: a TPM command buffer * @min_rsp_body_length: minimum expected length of response body * @desc: command description used in the error message * * Return: * * 0 - OK * * -errno - A system error * * TPM_RC - A TPM error */ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, size_t min_rsp_body_length, const char *desc) { const struct tpm_header *header = (struct tpm_header *)buf->data; int err; ssize_t len; len = tpm_transmit(chip, buf->data, PAGE_SIZE); if (len < 0) return len; err = be32_to_cpu(header->return_code); if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED && err != TPM2_RC_TESTING && desc) dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); if (err) return err; if (len < min_rsp_body_length + TPM_HEADER_SIZE) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(tpm_transmit_cmd); int tpm_get_timeouts(struct tpm_chip *chip) { if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS) return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2) return tpm2_get_timeouts(chip); else return tpm1_get_timeouts(chip); } EXPORT_SYMBOL_GPL(tpm_get_timeouts); /** * tpm_is_tpm2 - do we a have a TPM2 chip? * @chip: a &struct tpm_chip instance, %NULL for the default chip * * Return: * 1 if we have a TPM2 chip. * 0 if we don't have a TPM2 chip. * A negative number for system errors (errno). */ int tpm_is_tpm2(struct tpm_chip *chip) { int rc; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_is_tpm2); /** * tpm_pcr_read - read a PCR value from SHA1 bank * @chip: a &struct tpm_chip instance, %NULL for the default chip * @pcr_idx: the PCR to be retrieved * @digest: the PCR bank and buffer current PCR value is written to * * Return: same as with tpm_transmit_cmd() */ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digest) { int rc; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL); else rc = tpm1_pcr_read(chip, pcr_idx, digest->digest); tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend a PCR value in SHA1 bank. * @chip: a &struct tpm_chip instance, %NULL for the default chip * @pcr_idx: the PCR to be retrieved * @digests: array of tpm_digest structures used to extend PCRs * * Note: callers must pass a digest for every allocated PCR bank, in the same * order of the banks in chip->allocated_banks. * * Return: same as with tpm_transmit_cmd() */ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests) { int rc; int i; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; for (i = 0; i < chip->nr_allocated_banks; i++) { if (digests[i].alg_id != chip->allocated_banks[i].alg_id) { rc = -EINVAL; goto out; } } if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_pcr_extend(chip, pcr_idx, digests); goto out; } rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest, "attempting extend a PCR value"); out: tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); /** * tpm_send - send a TPM command * @chip: a &struct tpm_chip instance, %NULL for the default chip * @cmd: a TPM command buffer * @buflen: the length of the TPM command buffer * * Return: same as with tpm_transmit_cmd() */ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) { struct tpm_buf buf; int rc; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; buf.data = cmd; rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_send); int tpm_auto_startup(struct tpm_chip *chip) { int rc; if (!(chip->ops->flags & TPM_OPS_AUTO_STARTUP)) return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_auto_startup(chip); else rc = tpm1_auto_startup(chip); return rc; } /* * We are about to suspend. Save the TPM state * so that it can be restored. */ int tpm_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); int rc = 0; if (!chip) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) goto suspended; if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) && !pm_suspend_via_firmware()) goto suspended; rc = tpm_try_get_ops(chip); if (!rc) { if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); else rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); tpm_put_ops(chip); } suspended: chip->flags |= TPM_CHIP_FLAG_SUSPENDED; if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); /* * Resume from a power safe. The BIOS already restored * the TPM state. */ int tpm_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) return -ENODEV; chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED; /* * Guarantee that SUSPENDED is written last, so that hwrng does not * activate before the chip has been fully resumed. */ wmb(); return 0; } EXPORT_SYMBOL_GPL(tpm_pm_resume); /** * tpm_get_random() - get random bytes from the TPM's RNG * @chip: a &struct tpm_chip instance, %NULL for the default chip * @out: destination buffer for the random bytes * @max: the max number of bytes to write to @out * * Return: number of random bytes read or a negative error value. */ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) { int rc; if (!out || max > TPM_MAX_RNG_DATA) return -EINVAL; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_get_random(chip, out, max); else rc = tpm1_get_random(chip, out, max); tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_get_random); static int __init tpm_init(void) { int rc; rc = class_register(&tpm_class); if (rc) { pr_err("couldn't create tpm class\n"); return rc; } rc = class_register(&tpmrm_class); if (rc) { pr_err("couldn't create tpmrm class\n"); goto out_destroy_tpm_class; } rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm"); if (rc < 0) { pr_err("tpm: failed to allocate char dev region\n"); goto out_destroy_tpmrm_class; } rc = tpm_dev_common_init(); if (rc) { pr_err("tpm: failed to allocate char dev region\n"); goto out_unreg_chrdev; } return 0; out_unreg_chrdev: unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES); out_destroy_tpmrm_class: class_unregister(&tpmrm_class); out_destroy_tpm_class: class_unregister(&tpm_class); return rc; } static void __exit tpm_exit(void) { idr_destroy(&dev_nums_idr); class_unregister(&tpm_class); class_unregister(&tpmrm_class); unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES); tpm_dev_common_exit(); } subsys_initcall(tpm_init); module_exit(tpm_exit); MODULE_AUTHOR("Leendert van Doorn ([email protected])"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm-interface.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012-2020 IBM Corporation * * Author: Ashley Lai <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org */ #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/slab.h> #include <asm/vio.h> #include <asm/irq.h> #include <linux/types.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <asm/prom.h> #include "tpm.h" #include "tpm_ibmvtpm.h" static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; static const struct vio_device_id tpm_ibmvtpm_device_table[] = { { "IBM,vtpm", "IBM,vtpm"}, { "IBM,vtpm", "IBM,vtpm20"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); /** * ibmvtpm_send_crq_word() - Send a CRQ request * @vdev: vio device struct * @w1: pre-constructed first word of tpm crq (second word is reserved) * * Return: * 0 - Success * Non-zero - Failure */ static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1) { return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0); } /** * ibmvtpm_send_crq() - Send a CRQ request * * @vdev: vio device struct * @valid: Valid field * @msg: Type field * @len: Length field * @data: Data field * * The ibmvtpm crq is defined as follows: * * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 * ----------------------------------------------------------------------- * Word0 | Valid | Type | Length | Data * ----------------------------------------------------------------------- * Word1 | Reserved * ----------------------------------------------------------------------- * * Which matches the following structure (on bigendian host): * * struct ibmvtpm_crq { * u8 valid; * u8 msg; * __be16 len; * __be32 data; * __be64 reserved; * } __attribute__((packed, aligned(8))); * * However, the value is passed in a register so just compute the numeric value * to load into the register avoiding byteswap altogether. Endian only affects * memory loads and stores - registers are internally represented the same. * * Return: * 0 (H_SUCCESS) - Success * Non-zero - Failure */ static int ibmvtpm_send_crq(struct vio_dev *vdev, u8 valid, u8 msg, u16 len, u32 data) { u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) | (u64)data; return ibmvtpm_send_crq_word(vdev, w1); } /** * tpm_ibmvtpm_recv - Receive data after send * * @chip: tpm chip struct * @buf: buffer to read * @count: size of buffer * * Return: * Number of bytes read */ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); u16 len; if (!ibmvtpm->rtce_buf) { dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); return 0; } len = ibmvtpm->res_len; if (count < len) { dev_err(ibmvtpm->dev, "Invalid size in recv: count=%zd, crq_size=%d\n", count, len); return -EIO; } spin_lock(&ibmvtpm->rtce_lock); memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len); memset(ibmvtpm->rtce_buf, 0, len); ibmvtpm->res_len = 0; spin_unlock(&ibmvtpm->rtce_lock); return len; } /** * ibmvtpm_crq_send_init - Send a CRQ initialize message * @ibmvtpm: vtpm device struct * * Return: * 0 on success. * Non-zero on failure. */ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) { int rc; rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "%s failed rc=%d\n", __func__, rc); return rc; } /** * tpm_ibmvtpm_resume - Resume from suspend * * @dev: device struct * * Return: Always 0. */ static int tpm_ibmvtpm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); int rc = 0; do { if (rc) msleep(100); rc = plpar_hcall_norets(H_ENABLE_CRQ, ibmvtpm->vdev->unit_address); } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); if (rc) { dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); return rc; } rc = vio_enable_interrupts(ibmvtpm->vdev); if (rc) { dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); return rc; } rc = ibmvtpm_crq_send_init(ibmvtpm); if (rc) dev_err(dev, "Error send_init rc=%d\n", rc); return rc; } /** * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send * @count: size of buffer * * Return: * 0 on success, * -errno on error */ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); bool retry = true; int rc, sig; if (!ibmvtpm->rtce_buf) { dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); return 0; } if (count > ibmvtpm->rtce_size) { dev_err(ibmvtpm->dev, "Invalid size in send: count=%zd, rtce_size=%d\n", count, ibmvtpm->rtce_size); return -EIO; } if (ibmvtpm->tpm_processing_cmd) { dev_info(ibmvtpm->dev, "Need to wait for TPM to finish\n"); /* wait for previous command to finish */ sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); if (sig) return -EINTR; } spin_lock(&ibmvtpm->rtce_lock); ibmvtpm->res_len = 0; memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); /* * set the processing flag before the Hcall, since we may get the * result (interrupt) before even being able to check rc. */ ibmvtpm->tpm_processing_cmd = 1; again: rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { /* * H_CLOSED can be returned after LPM resume. Call * tpm_ibmvtpm_resume() to re-enable the CRQ then retry * ibmvtpm_send_crq() once before failing. */ if (rc == H_CLOSED && retry) { tpm_ibmvtpm_resume(ibmvtpm->dev); retry = false; goto again; } dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); ibmvtpm->tpm_processing_cmd = 0; } spin_unlock(&ibmvtpm->rtce_lock); return 0; } static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) { return; } static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); return ibmvtpm->tpm_processing_cmd; } /** * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size * * @ibmvtpm: vtpm device struct * * Return: * 0 on success. * Non-zero on failure. */ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) { int rc; rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); return rc; } /** * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version * - Note that this is vtpm version and not tpm version * * @ibmvtpm: vtpm device struct * * Return: * 0 on success. * Non-zero on failure. */ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) { int rc; rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_version failed rc=%d\n", rc); return rc; } /** * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message * @ibmvtpm: vtpm device struct * * Return: * 0 on success. * Non-zero on failure. */ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) { int rc; rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); return rc; } /** * tpm_ibmvtpm_remove - ibm vtpm remove entry point * @vdev: vio device struct * * Return: Always 0. */ static void tpm_ibmvtpm_remove(struct vio_dev *vdev) { struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); int rc = 0; tpm_chip_unregister(chip); free_irq(vdev->irq, ibmvtpm); do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)ibmvtpm->crq_queue.crq_addr); if (ibmvtpm->rtce_buf) { dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle, ibmvtpm->rtce_size, DMA_BIDIRECTIONAL); kfree(ibmvtpm->rtce_buf); } kfree(ibmvtpm); /* For tpm_ibmvtpm_get_desired_dma */ dev_set_drvdata(&vdev->dev, NULL); } /** * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver * @vdev: vio device struct * * Return: * Number of bytes the driver needs to DMA map. */ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) { struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); struct ibmvtpm_dev *ibmvtpm; /* * ibmvtpm initializes at probe time, so the data we are * asking for may not be set yet. Estimate that 4K required * for TCE-mapped buffer in addition to CRQ. */ if (chip) ibmvtpm = dev_get_drvdata(&chip->dev); else return CRQ_RES_BUF_SIZE + PAGE_SIZE; return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; } /** * tpm_ibmvtpm_suspend - Suspend * @dev: device struct * * Return: Always 0. */ static int tpm_ibmvtpm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); int rc = 0; rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "tpm_ibmvtpm_suspend failed rc=%d\n", rc); return rc; } /** * ibmvtpm_reset_crq - Reset CRQ * * @ibmvtpm: ibm vtpm struct * * Return: * 0 on success. * Non-zero on failure. */ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) { int rc = 0; do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, ibmvtpm->vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE); ibmvtpm->crq_queue.index = 0; return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); } static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) { return (status == 0); } static const struct tpm_class_ops tpm_ibmvtpm = { .recv = tpm_ibmvtpm_recv, .send = tpm_ibmvtpm_send, .cancel = tpm_ibmvtpm_cancel, .status = tpm_ibmvtpm_status, .req_complete_mask = 1, .req_complete_val = 0, .req_canceled = tpm_ibmvtpm_req_canceled, }; static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = { .suspend = tpm_ibmvtpm_suspend, .resume = tpm_ibmvtpm_resume, }; /** * ibmvtpm_crq_get_next - Get next responded crq * * @ibmvtpm: vtpm device struct * * Return: vtpm crq pointer or NULL. */ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm) { struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue; struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index]; if (crq->valid & VTPM_MSG_RES) { if (++crq_q->index == crq_q->num_entry) crq_q->index = 0; smp_rmb(); } else crq = NULL; return crq; } /** * ibmvtpm_crq_process - Process responded crq * * @crq: crq to be processed * @ibmvtpm: vtpm device struct * */ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, struct ibmvtpm_dev *ibmvtpm) { int rc = 0; switch (crq->valid) { case VALID_INIT_CRQ: switch (crq->msg) { case INIT_CRQ_RES: dev_info(ibmvtpm->dev, "CRQ initialized\n"); rc = ibmvtpm_crq_send_init_complete(ibmvtpm); if (rc) dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc); return; case INIT_CRQ_COMP_RES: dev_info(ibmvtpm->dev, "CRQ initialization completed\n"); return; default: dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg); return; } case IBMVTPM_VALID_CMD: switch (crq->msg) { case VTPM_GET_RTCE_BUFFER_SIZE_RES: if (be16_to_cpu(crq->len) <= 0) { dev_err(ibmvtpm->dev, "Invalid rtce size\n"); return; } ibmvtpm->rtce_size = be16_to_cpu(crq->len); ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, GFP_ATOMIC); if (!ibmvtpm->rtce_buf) { dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n"); return; } ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev, ibmvtpm->rtce_buf, ibmvtpm->rtce_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(ibmvtpm->dev, ibmvtpm->rtce_dma_handle)) { kfree(ibmvtpm->rtce_buf); ibmvtpm->rtce_buf = NULL; dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n"); } return; case VTPM_GET_VERSION_RES: ibmvtpm->vtpm_version = be32_to_cpu(crq->data); return; case VTPM_TPM_COMMAND_RES: /* len of the data in rtce buffer */ ibmvtpm->res_len = be16_to_cpu(crq->len); ibmvtpm->tpm_processing_cmd = 0; wake_up_interruptible(&ibmvtpm->wq); return; default: return; } } return; } /** * ibmvtpm_interrupt - Interrupt handler * * @irq: irq number to handle * @vtpm_instance: vtpm that received interrupt * * Returns: * IRQ_HANDLED **/ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) { struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance; struct ibmvtpm_crq *crq; /* while loop is needed for initial setup (get version and * get rtce_size). There should be only one tpm request at any * given time. */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } return IRQ_HANDLED; } /** * tpm_ibmvtpm_probe - ibm vtpm initialize entry point * * @vio_dev: vio device struct * @id: vio device id struct * * Return: * 0 on success. * Non-zero on failure. */ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, const struct vio_device_id *id) { struct ibmvtpm_dev *ibmvtpm; struct device *dev = &vio_dev->dev; struct ibmvtpm_crq_queue *crq_q; struct tpm_chip *chip; int rc = -ENOMEM, rc1; chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm); if (IS_ERR(chip)) return PTR_ERR(chip); ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); if (!ibmvtpm) { dev_err(dev, "kzalloc for ibmvtpm failed\n"); goto cleanup; } ibmvtpm->dev = dev; ibmvtpm->vdev = vio_dev; crq_q = &ibmvtpm->crq_queue; crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); if (!crq_q->crq_addr) { dev_err(dev, "Unable to allocate memory for crq_addr\n"); goto cleanup; } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) { dev_err(dev, "dma mapping failed\n"); goto cleanup; } rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); if (rc == H_RESOURCE) rc = ibmvtpm_reset_crq(ibmvtpm); if (rc) { dev_err(dev, "Unable to register CRQ rc=%d\n", rc); goto reg_crq_cleanup; } rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0, tpm_ibmvtpm_driver_name, ibmvtpm); if (rc) { dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq); goto init_irq_cleanup; } rc = vio_enable_interrupts(vio_dev); if (rc) { dev_err(dev, "Error %d enabling interrupts\n", rc); goto init_irq_cleanup; } init_waitqueue_head(&ibmvtpm->wq); crq_q->index = 0; dev_set_drvdata(&chip->dev, ibmvtpm); spin_lock_init(&ibmvtpm->rtce_lock); rc = ibmvtpm_crq_send_init(ibmvtpm); if (rc) goto init_irq_cleanup; rc = ibmvtpm_crq_get_version(ibmvtpm); if (rc) goto init_irq_cleanup; rc = ibmvtpm_crq_get_rtce_size(ibmvtpm); if (rc) goto init_irq_cleanup; if (!wait_event_timeout(ibmvtpm->crq_queue.wq, ibmvtpm->rtce_buf != NULL, HZ)) { rc = -ENODEV; dev_err(dev, "CRQ response timed out\n"); goto init_irq_cleanup; } if (!strcmp(id->compat, "IBM,vtpm20")) chip->flags |= TPM_CHIP_FLAG_TPM2; rc = tpm_get_timeouts(chip); if (rc) goto init_irq_cleanup; if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_get_cc_attrs_tbl(chip); if (rc) goto init_irq_cleanup; } return tpm_chip_register(chip); init_irq_cleanup: do { rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address); } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1)); reg_crq_cleanup: dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); cleanup: if (ibmvtpm) { if (crq_q->crq_addr) free_page((unsigned long)crq_q->crq_addr); kfree(ibmvtpm); } return rc; } static struct vio_driver ibmvtpm_driver = { .id_table = tpm_ibmvtpm_device_table, .probe = tpm_ibmvtpm_probe, .remove = tpm_ibmvtpm_remove, .get_desired_dma = tpm_ibmvtpm_get_desired_dma, .name = tpm_ibmvtpm_driver_name, .pm = &tpm_ibmvtpm_pm_ops, }; /** * ibmvtpm_module_init - Initialize ibm vtpm module. * * * Return: * 0 on success. * Non-zero on failure. */ static int __init ibmvtpm_module_init(void) { return vio_register_driver(&ibmvtpm_driver); } /** * ibmvtpm_module_exit - Tear down ibm vtpm module. */ static void __exit ibmvtpm_module_exit(void) { vio_unregister_driver(&ibmvtpm_driver); } module_init(ibmvtpm_module_init); module_exit(ibmvtpm_module_exit); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("IBM vTPM Driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_ibmvtpm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Intel Corporation * * Authors: * Jarkko Sakkinen <[email protected]> * * Maintained by: <[email protected]> * * This file contains TPM2 protocol implementations of the commands * used by the kernel internally. */ #include <linux/gfp.h> #include <asm/unaligned.h> #include "tpm.h" enum tpm2_handle_types { TPM2_HT_HMAC_SESSION = 0x02000000, TPM2_HT_POLICY_SESSION = 0x03000000, TPM2_HT_TRANSIENT = 0x80000000, }; struct tpm2_context { __be64 sequence; __be32 saved_handle; __be32 hierarchy; __be16 blob_size; } __packed; static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) { int i; for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) { if (space->session_tbl[i]) tpm2_flush_context(chip, space->session_tbl[i]); } } int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); /* Prevent caller getting a dangling pointer. */ space->context_buf = NULL; return -ENOMEM; } space->buf_size = buf_size; return 0; } void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { if (tpm_try_get_ops(chip) == 0) { tpm2_flush_sessions(chip, space); tpm_put_ops(chip); } kfree(space->context_buf); kfree(space->session_buf); } static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, unsigned int *offset, u32 *handle) { struct tpm_buf tbuf; struct tpm2_context *ctx; unsigned int body_size; int rc; rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_LOAD); if (rc) return rc; ctx = (struct tpm2_context *)&buf[*offset]; body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size); tpm_buf_append(&tbuf, &buf[*offset], body_size); rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); tpm_buf_destroy(&tbuf); return -EFAULT; } else if (tpm2_rc_value(rc) == TPM2_RC_HANDLE || rc == TPM2_RC_REFERENCE_H0) { /* * TPM_RC_HANDLE means that the session context can't * be loaded because of an internal counter mismatch * that makes the TPM think there might have been a * replay. This might happen if the context was saved * and loaded outside the space. * * TPM_RC_REFERENCE_H0 means the session has been * flushed outside the space */ *handle = 0; tpm_buf_destroy(&tbuf); return -ENOENT; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); tpm_buf_destroy(&tbuf); return -EFAULT; } *handle = be32_to_cpup((__be32 *)&tbuf.data[TPM_HEADER_SIZE]); *offset += body_size; tpm_buf_destroy(&tbuf); return 0; } static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, unsigned int buf_size, unsigned int *offset) { struct tpm_buf tbuf; unsigned int body_size; int rc; rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_SAVE); if (rc) return rc; tpm_buf_append_u32(&tbuf, handle); rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); tpm_buf_destroy(&tbuf); return -EFAULT; } else if (tpm2_rc_value(rc) == TPM2_RC_REFERENCE_H0) { tpm_buf_destroy(&tbuf); return -ENOENT; } else if (rc) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); tpm_buf_destroy(&tbuf); return -EFAULT; } body_size = tpm_buf_length(&tbuf) - TPM_HEADER_SIZE; if ((*offset + body_size) > buf_size) { dev_warn(&chip->dev, "%s: out of backing storage\n", __func__); tpm_buf_destroy(&tbuf); return -ENOMEM; } memcpy(&buf[*offset], &tbuf.data[TPM_HEADER_SIZE], body_size); *offset += body_size; tpm_buf_destroy(&tbuf); return 0; } void tpm2_flush_space(struct tpm_chip *chip) { struct tpm_space *space = &chip->work_space; int i; for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); tpm2_flush_sessions(chip, space); } static int tpm2_load_space(struct tpm_chip *chip) { struct tpm_space *space = &chip->work_space; unsigned int offset; int i; int rc; for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) { if (!space->context_tbl[i]) continue; /* sanity check, should never happen */ if (~space->context_tbl[i]) { dev_err(&chip->dev, "context table is inconsistent"); return -EFAULT; } rc = tpm2_load_context(chip, space->context_buf, &offset, &space->context_tbl[i]); if (rc) return rc; } for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) { u32 handle; if (!space->session_tbl[i]) continue; rc = tpm2_load_context(chip, space->session_buf, &offset, &handle); if (rc == -ENOENT) { /* load failed, just forget session */ space->session_tbl[i] = 0; } else if (rc) { tpm2_flush_space(chip); return rc; } if (handle != space->session_tbl[i]) { dev_warn(&chip->dev, "session restored to wrong handle\n"); tpm2_flush_space(chip); return -EFAULT; } } return 0; } static bool tpm2_map_to_phandle(struct tpm_space *space, void *handle) { u32 vhandle = be32_to_cpup((__be32 *)handle); u32 phandle; int i; i = 0xFFFFFF - (vhandle & 0xFFFFFF); if (i >= ARRAY_SIZE(space->context_tbl) || !space->context_tbl[i]) return false; phandle = space->context_tbl[i]; *((__be32 *)handle) = cpu_to_be32(phandle); return true; } static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd) { struct tpm_space *space = &chip->work_space; unsigned int nr_handles; u32 attrs; __be32 *handle; int i; i = tpm2_find_cc(chip, cc); if (i < 0) return -EINVAL; attrs = chip->cc_attrs_tbl[i]; nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); handle = (__be32 *)&cmd[TPM_HEADER_SIZE]; for (i = 0; i < nr_handles; i++, handle++) { if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) { if (!tpm2_map_to_phandle(space, handle)) return -EINVAL; } } return 0; } static int tpm_find_and_validate_cc(struct tpm_chip *chip, struct tpm_space *space, const void *cmd, size_t len) { const struct tpm_header *header = (const void *)cmd; int i; u32 cc; u32 attrs; unsigned int nr_handles; if (len < TPM_HEADER_SIZE || !chip->nr_commands) return -EINVAL; cc = be32_to_cpu(header->ordinal); i = tpm2_find_cc(chip, cc); if (i < 0) { dev_dbg(&chip->dev, "0x%04X is an invalid command\n", cc); return -EOPNOTSUPP; } attrs = chip->cc_attrs_tbl[i]; nr_handles = 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0)); if (len < TPM_HEADER_SIZE + 4 * nr_handles) goto err_len; return cc; err_len: dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__, len); return -EINVAL; } int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, size_t cmdsiz) { int rc; int cc; if (!space) return 0; cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz); if (cc < 0) return cc; memcpy(&chip->work_space.context_tbl, &space->context_tbl, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); memcpy(chip->work_space.context_buf, space->context_buf, space->buf_size); memcpy(chip->work_space.session_buf, space->session_buf, space->buf_size); rc = tpm2_load_space(chip); if (rc) { tpm2_flush_space(chip); return rc; } rc = tpm2_map_command(chip, cc, cmd); if (rc) { tpm2_flush_space(chip); return rc; } chip->last_cc = cc; return 0; } static bool tpm2_add_session(struct tpm_chip *chip, u32 handle) { struct tpm_space *space = &chip->work_space; int i; for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) if (space->session_tbl[i] == 0) break; if (i == ARRAY_SIZE(space->session_tbl)) return false; space->session_tbl[i] = handle; return true; } static u32 tpm2_map_to_vhandle(struct tpm_space *space, u32 phandle, bool alloc) { int i; for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) { if (alloc) { if (!space->context_tbl[i]) { space->context_tbl[i] = phandle; break; } } else if (space->context_tbl[i] == phandle) break; } if (i == ARRAY_SIZE(space->context_tbl)) return 0; return TPM2_HT_TRANSIENT | (0xFFFFFF - i); } static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, size_t len) { struct tpm_space *space = &chip->work_space; struct tpm_header *header = (struct tpm_header *)rsp; u32 phandle; u32 phandle_type; u32 vhandle; u32 attrs; int i; if (be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) return 0; i = tpm2_find_cc(chip, cc); /* sanity check, should never happen */ if (i < 0) return -EFAULT; attrs = chip->cc_attrs_tbl[i]; if (!((attrs >> TPM2_CC_ATTR_RHANDLE) & 1)) return 0; phandle = be32_to_cpup((__be32 *)&rsp[TPM_HEADER_SIZE]); phandle_type = phandle & 0xFF000000; switch (phandle_type) { case TPM2_HT_TRANSIENT: vhandle = tpm2_map_to_vhandle(space, phandle, true); if (!vhandle) goto out_no_slots; *(__be32 *)&rsp[TPM_HEADER_SIZE] = cpu_to_be32(vhandle); break; case TPM2_HT_HMAC_SESSION: case TPM2_HT_POLICY_SESSION: if (!tpm2_add_session(chip, phandle)) goto out_no_slots; break; default: dev_err(&chip->dev, "%s: unknown handle 0x%08X\n", __func__, phandle); break; } return 0; out_no_slots: tpm2_flush_context(chip, phandle); dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__, phandle); return -ENOMEM; } struct tpm2_cap_handles { u8 more_data; __be32 capability; __be32 count; __be32 handles[]; } __packed; static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp, size_t len) { struct tpm_space *space = &chip->work_space; struct tpm_header *header = (struct tpm_header *)rsp; struct tpm2_cap_handles *data; u32 phandle; u32 phandle_type; u32 vhandle; int i; int j; if (cc != TPM2_CC_GET_CAPABILITY || be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) { return 0; } if (len < TPM_HEADER_SIZE + 9) return -EFAULT; data = (void *)&rsp[TPM_HEADER_SIZE]; if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES) return 0; if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4) return -EFAULT; if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count)) return -EFAULT; for (i = 0, j = 0; i < be32_to_cpu(data->count); i++) { phandle = be32_to_cpup((__be32 *)&data->handles[i]); phandle_type = phandle & 0xFF000000; switch (phandle_type) { case TPM2_HT_TRANSIENT: vhandle = tpm2_map_to_vhandle(space, phandle, false); if (!vhandle) break; data->handles[j] = cpu_to_be32(vhandle); j++; break; default: data->handles[j] = cpu_to_be32(phandle); j++; break; } } header->length = cpu_to_be32(TPM_HEADER_SIZE + 9 + 4 * j); data->count = cpu_to_be32(j); return 0; } static int tpm2_save_space(struct tpm_chip *chip) { struct tpm_space *space = &chip->work_space; unsigned int offset; int i; int rc; for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) { if (!(space->context_tbl[i] && ~space->context_tbl[i])) continue; rc = tpm2_save_context(chip, space->context_tbl[i], space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; continue; } else if (rc) return rc; tpm2_flush_context(chip, space->context_tbl[i]); space->context_tbl[i] = ~0; } for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) { if (!space->session_tbl[i]) continue; rc = tpm2_save_context(chip, space->session_tbl[i], space->session_buf, space->buf_size, &offset); if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; } else if (rc < 0) { tpm2_flush_space(chip); return rc; } } return 0; } int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz) { struct tpm_header *header = buf; int rc; if (!space) return 0; rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz); if (rc) { tpm2_flush_space(chip); goto out; } rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz); if (rc) { tpm2_flush_space(chip); goto out; } rc = tpm2_save_space(chip); if (rc) { tpm2_flush_space(chip); goto out; } *bufsiz = be32_to_cpu(header->length); memcpy(&space->context_tbl, &chip->work_space.context_tbl, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); memcpy(space->context_buf, chip->work_space.context_buf, space->buf_size); memcpy(space->session_buf, chip->work_space.session_buf, space->buf_size); return 0; out: dev_err(&chip->dev, "%s: error %d\n", __func__, rc); return rc; } /* * Put the reference to the main device. */ static void tpm_devs_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); /* release the master device reference */ put_device(&chip->dev); } /* * Remove the device file for exposed TPM spaces and release the device * reference. This may also release the reference to the master device. */ void tpm_devs_remove(struct tpm_chip *chip) { cdev_device_del(&chip->cdevs, &chip->devs); put_device(&chip->devs); } /* * Add a device file to expose TPM spaces. Also take a reference to the * main device. */ int tpm_devs_add(struct tpm_chip *chip) { int rc; device_initialize(&chip->devs); chip->devs.parent = chip->dev.parent; chip->devs.class = &tpmrm_class; /* * Get extra reference on main device to hold on behalf of devs. * This holds the chip structure while cdevs is in use. The * corresponding put is in the tpm_devs_release. */ get_device(&chip->dev); chip->devs.release = tpm_devs_release; chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdevs.owner = THIS_MODULE; rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); if (rc) goto err_put_devs; rc = cdev_device_add(&chip->cdevs, &chip->devs); if (rc) { dev_err(&chip->devs, "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", dev_name(&chip->devs), MAJOR(chip->devs.devt), MINOR(chip->devs.devt), rc); goto err_put_devs; } return 0; err_put_devs: put_device(&chip->devs); return rc; }
linux-master
drivers/char/tpm/tpm2-space.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org */ #include "tpm.h" #include "tpm_atmel.h" /* write status bits */ enum tpm_atmel_write_status { ATML_STATUS_ABORT = 0x01, ATML_STATUS_LASTBYTE = 0x04 }; /* read status bits */ enum tpm_atmel_read_status { ATML_STATUS_BUSY = 0x01, ATML_STATUS_DATA_AVAIL = 0x02, ATML_STATUS_REWRITE = 0x04, ATML_STATUS_READY = 0x08 }; static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); u8 status, *hdr = buf; u32 size; int i; __be32 *native_size; /* start reading header */ if (count < 6) return -EIO; for (i = 0; i < 6; i++) { status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(&chip->dev, "error reading header\n"); return -EIO; } *buf++ = ioread8(priv->iobase); } /* size of the data received */ native_size = (__force __be32 *) (hdr + 2); size = be32_to_cpu(*native_size); if (count < size) { dev_err(&chip->dev, "Recv size(%d) less than available space\n", size); for (; i < size; i++) { /* clear the waiting data anyway */ status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(&chip->dev, "error reading data\n"); return -EIO; } } return -EIO; } /* read all the data available */ for (; i < size; i++) { status = ioread8(priv->iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { dev_err(&chip->dev, "error reading data\n"); return -EIO; } *buf++ = ioread8(priv->iobase); } /* make sure data available is gone */ status = ioread8(priv->iobase + 1); if (status & ATML_STATUS_DATA_AVAIL) { dev_err(&chip->dev, "data available is stuck\n"); return -EIO; } return size; } static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); int i; dev_dbg(&chip->dev, "tpm_atml_send:\n"); for (i = 0; i < count; i++) { dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); iowrite8(buf[i], priv->iobase); } return 0; } static void tpm_atml_cancel(struct tpm_chip *chip) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); iowrite8(ATML_STATUS_ABORT, priv->iobase + 1); } static u8 tpm_atml_status(struct tpm_chip *chip) { struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); return ioread8(priv->iobase + 1); } static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status) { return (status == ATML_STATUS_READY); } static const struct tpm_class_ops tpm_atmel = { .recv = tpm_atml_recv, .send = tpm_atml_send, .cancel = tpm_atml_cancel, .status = tpm_atml_status, .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL, .req_complete_val = ATML_STATUS_DATA_AVAIL, .req_canceled = tpm_atml_req_canceled, }; static struct platform_device *pdev; static void atml_plat_remove(void) { struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); tpm_chip_unregister(chip); if (priv->have_region) atmel_release_region(priv->base, priv->region_size); atmel_put_base_addr(priv->iobase); platform_device_unregister(pdev); } static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume); static struct platform_driver atml_drv = { .driver = { .name = "tpm_atmel", .pm = &tpm_atml_pm, }, }; static int __init init_atmel(void) { int rc = 0; void __iomem *iobase = NULL; int have_region, region_size; unsigned long base; struct tpm_chip *chip; struct tpm_atmel_priv *priv; rc = platform_driver_register(&atml_drv); if (rc) return rc; if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) { rc = -ENODEV; goto err_unreg_drv; } have_region = (atmel_request_region (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); if (IS_ERR(pdev)) { rc = PTR_ERR(pdev); goto err_rel_reg; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto err_unreg_dev; } priv->iobase = iobase; priv->base = base; priv->have_region = have_region; priv->region_size = region_size; chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel); if (IS_ERR(chip)) { rc = PTR_ERR(chip); goto err_unreg_dev; } dev_set_drvdata(&chip->dev, priv); rc = tpm_chip_register(chip); if (rc) goto err_unreg_dev; return 0; err_unreg_dev: platform_device_unregister(pdev); err_rel_reg: atmel_put_base_addr(iobase); if (have_region) atmel_release_region(base, region_size); err_unreg_drv: platform_driver_unregister(&atml_drv); return rc; } static void __exit cleanup_atmel(void) { platform_driver_unregister(&atml_drv); atml_plat_remove(); } module_init(init_atmel); module_exit(cleanup_atmel); MODULE_AUTHOR("Leendert van Doorn ([email protected])"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_atmel.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015, 2016 IBM Corporation * Copyright (C) 2016 Intel Corporation * * Author: Stefan Berger <[email protected]> * * Maintained by: <[email protected]> * * Device driver for vTPM (vTPM proxy driver) */ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/miscdevice.h> #include <linux/vtpm_proxy.h> #include <linux/file.h> #include <linux/anon_inodes.h> #include <linux/poll.h> #include <linux/compat.h> #include "tpm.h" #define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) struct proxy_dev { struct tpm_chip *chip; u32 flags; /* public API flags */ wait_queue_head_t wq; struct mutex buf_lock; /* protect buffer and flags */ long state; /* internal state */ #define STATE_OPENED_FLAG BIT(0) #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ #define STATE_REGISTERED_FLAG BIT(2) #define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */ size_t req_len; /* length of queued TPM request */ size_t resp_len; /* length of queued TPM response */ u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ struct work_struct work; /* task that retrieves TPM timeouts */ }; /* all supported flags */ #define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) static struct workqueue_struct *workqueue; static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); /* * Functions related to 'server side' */ /** * vtpm_proxy_fops_read - Read TPM commands on 'server side' * * @filp: file pointer * @buf: read buffer * @count: number of bytes to read * @off: offset * * Return: * Number of bytes read or negative error code */ static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off) { struct proxy_dev *proxy_dev = filp->private_data; size_t len; int sig, rc; sig = wait_event_interruptible(proxy_dev->wq, proxy_dev->req_len != 0 || !(proxy_dev->state & STATE_OPENED_FLAG)); if (sig) return -EINTR; mutex_lock(&proxy_dev->buf_lock); if (!(proxy_dev->state & STATE_OPENED_FLAG)) { mutex_unlock(&proxy_dev->buf_lock); return -EPIPE; } len = proxy_dev->req_len; if (count < len || len > sizeof(proxy_dev->buffer)) { mutex_unlock(&proxy_dev->buf_lock); pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", count, len); return -EIO; } rc = copy_to_user(buf, proxy_dev->buffer, len); memset(proxy_dev->buffer, 0, len); proxy_dev->req_len = 0; if (!rc) proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; mutex_unlock(&proxy_dev->buf_lock); if (rc) return -EFAULT; return len; } /** * vtpm_proxy_fops_write - Write TPM responses on 'server side' * * @filp: file pointer * @buf: write buffer * @count: number of bytes to write * @off: offset * * Return: * Number of bytes read or negative error value */ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, size_t count, loff_t *off) { struct proxy_dev *proxy_dev = filp->private_data; mutex_lock(&proxy_dev->buf_lock); if (!(proxy_dev->state & STATE_OPENED_FLAG)) { mutex_unlock(&proxy_dev->buf_lock); return -EPIPE; } if (count > sizeof(proxy_dev->buffer) || !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { mutex_unlock(&proxy_dev->buf_lock); return -EIO; } proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; proxy_dev->req_len = 0; if (copy_from_user(proxy_dev->buffer, buf, count)) { mutex_unlock(&proxy_dev->buf_lock); return -EFAULT; } proxy_dev->resp_len = count; mutex_unlock(&proxy_dev->buf_lock); wake_up_interruptible(&proxy_dev->wq); return count; } /* * vtpm_proxy_fops_poll - Poll status on 'server side' * * @filp: file pointer * @wait: poll table * * Return: Poll flags */ static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) { struct proxy_dev *proxy_dev = filp->private_data; __poll_t ret; poll_wait(filp, &proxy_dev->wq, wait); ret = EPOLLOUT; mutex_lock(&proxy_dev->buf_lock); if (proxy_dev->req_len) ret |= EPOLLIN | EPOLLRDNORM; if (!(proxy_dev->state & STATE_OPENED_FLAG)) ret |= EPOLLHUP; mutex_unlock(&proxy_dev->buf_lock); return ret; } /* * vtpm_proxy_fops_open - Open vTPM device on 'server side' * * @filp: file pointer * * Called when setting up the anonymous file descriptor */ static void vtpm_proxy_fops_open(struct file *filp) { struct proxy_dev *proxy_dev = filp->private_data; proxy_dev->state |= STATE_OPENED_FLAG; } /** * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open * Call to undo vtpm_proxy_fops_open * *@proxy_dev: tpm proxy device */ static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) { mutex_lock(&proxy_dev->buf_lock); proxy_dev->state &= ~STATE_OPENED_FLAG; mutex_unlock(&proxy_dev->buf_lock); /* no more TPM responses -- wake up anyone waiting for them */ wake_up_interruptible(&proxy_dev->wq); } /* * vtpm_proxy_fops_release - Close 'server side' * * @inode: inode * @filp: file pointer * Return: * Always returns 0. */ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) { struct proxy_dev *proxy_dev = filp->private_data; filp->private_data = NULL; vtpm_proxy_delete_device(proxy_dev); return 0; } static const struct file_operations vtpm_proxy_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = vtpm_proxy_fops_read, .write = vtpm_proxy_fops_write, .poll = vtpm_proxy_fops_poll, .release = vtpm_proxy_fops_release, }; /* * Functions invoked by the core TPM driver to send TPM commands to * 'server side' and receive responses from there. */ /* * Called when core TPM driver reads TPM responses from 'server side' * * @chip: tpm chip to use * @buf: receive buffer * @count: bytes to read * Return: * Number of TPM response bytes read, negative error value otherwise */ static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); size_t len; /* process gone ? */ mutex_lock(&proxy_dev->buf_lock); if (!(proxy_dev->state & STATE_OPENED_FLAG)) { mutex_unlock(&proxy_dev->buf_lock); return -EPIPE; } len = proxy_dev->resp_len; if (count < len) { dev_err(&chip->dev, "Invalid size in recv: count=%zd, resp_len=%zd\n", count, len); len = -EIO; goto out; } memcpy(buf, proxy_dev->buffer, len); proxy_dev->resp_len = 0; out: mutex_unlock(&proxy_dev->buf_lock); return len; } static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_header *hdr = (struct tpm_header *)buf; if (count < sizeof(struct tpm_header)) return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2) { switch (be32_to_cpu(hdr->ordinal)) { case TPM2_CC_SET_LOCALITY: return 1; } } else { switch (be32_to_cpu(hdr->ordinal)) { case TPM_ORD_SET_LOCALITY: return 1; } } return 0; } /* * Called when core TPM driver forwards TPM requests to 'server side'. * * @chip: tpm chip to use * @buf: send buffer * @count: bytes to send * * Return: * 0 in case of success, negative error value otherwise. */ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); if (count > sizeof(proxy_dev->buffer)) { dev_err(&chip->dev, "Invalid size in send: count=%zd, buffer size=%zd\n", count, sizeof(proxy_dev->buffer)); return -EIO; } if (!(proxy_dev->state & STATE_DRIVER_COMMAND) && vtpm_proxy_is_driver_command(chip, buf, count)) return -EFAULT; mutex_lock(&proxy_dev->buf_lock); if (!(proxy_dev->state & STATE_OPENED_FLAG)) { mutex_unlock(&proxy_dev->buf_lock); return -EPIPE; } proxy_dev->resp_len = 0; proxy_dev->req_len = count; memcpy(proxy_dev->buffer, buf, count); proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; mutex_unlock(&proxy_dev->buf_lock); wake_up_interruptible(&proxy_dev->wq); return 0; } static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) { /* not supported */ } static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); if (proxy_dev->resp_len) return VTPM_PROXY_REQ_COMPLETE_FLAG; return 0; } static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); bool ret; mutex_lock(&proxy_dev->buf_lock); ret = !(proxy_dev->state & STATE_OPENED_FLAG); mutex_unlock(&proxy_dev->buf_lock); return ret; } static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) { struct tpm_buf buf; int rc; const struct tpm_header *header; struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_SET_LOCALITY); else rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SET_LOCALITY); if (rc) return rc; tpm_buf_append_u8(&buf, locality); proxy_dev->state |= STATE_DRIVER_COMMAND; rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality"); proxy_dev->state &= ~STATE_DRIVER_COMMAND; if (rc < 0) { locality = rc; goto out; } header = (const struct tpm_header *)buf.data; rc = be32_to_cpu(header->return_code); if (rc) locality = -1; out: tpm_buf_destroy(&buf); return locality; } static const struct tpm_class_ops vtpm_proxy_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, .recv = vtpm_proxy_tpm_op_recv, .send = vtpm_proxy_tpm_op_send, .cancel = vtpm_proxy_tpm_op_cancel, .status = vtpm_proxy_tpm_op_status, .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, .req_canceled = vtpm_proxy_tpm_req_canceled, .request_locality = vtpm_proxy_request_locality, }; /* * Code related to the startup of the TPM 2 and startup of TPM 1.2 + * retrieval of timeouts and durations. */ static void vtpm_proxy_work(struct work_struct *work) { struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, work); int rc; rc = tpm_chip_register(proxy_dev->chip); if (rc) vtpm_proxy_fops_undo_open(proxy_dev); else proxy_dev->state |= STATE_REGISTERED_FLAG; } /* * vtpm_proxy_work_stop: make sure the work has finished * * This function is useful when user space closed the fd * while the driver still determines timeouts. */ static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) { vtpm_proxy_fops_undo_open(proxy_dev); flush_work(&proxy_dev->work); } /* * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization */ static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) { queue_work(workqueue, &proxy_dev->work); } /* * Code related to creation and deletion of device pairs */ static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) { struct proxy_dev *proxy_dev; struct tpm_chip *chip; int err; proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); if (proxy_dev == NULL) return ERR_PTR(-ENOMEM); init_waitqueue_head(&proxy_dev->wq); mutex_init(&proxy_dev->buf_lock); INIT_WORK(&proxy_dev->work, vtpm_proxy_work); chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); if (IS_ERR(chip)) { err = PTR_ERR(chip); goto err_proxy_dev_free; } dev_set_drvdata(&chip->dev, proxy_dev); proxy_dev->chip = chip; return proxy_dev; err_proxy_dev_free: kfree(proxy_dev); return ERR_PTR(err); } /* * Undo what has been done in vtpm_create_proxy_dev */ static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) { put_device(&proxy_dev->chip->dev); /* frees chip */ kfree(proxy_dev); } /* * Create a /dev/tpm%d and 'server side' file descriptor pair * * Return: * Returns file pointer on success, an error value otherwise */ static struct file *vtpm_proxy_create_device( struct vtpm_proxy_new_dev *vtpm_new_dev) { struct proxy_dev *proxy_dev; int rc, fd; struct file *file; if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) return ERR_PTR(-EOPNOTSUPP); proxy_dev = vtpm_proxy_create_proxy_dev(); if (IS_ERR(proxy_dev)) return ERR_CAST(proxy_dev); proxy_dev->flags = vtpm_new_dev->flags; /* setup an anonymous file for the server-side */ fd = get_unused_fd_flags(O_RDWR); if (fd < 0) { rc = fd; goto err_delete_proxy_dev; } file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, O_RDWR); if (IS_ERR(file)) { rc = PTR_ERR(file); goto err_put_unused_fd; } /* from now on we can unwind with put_unused_fd() + fput() */ /* simulate an open() on the server side */ vtpm_proxy_fops_open(file); if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; vtpm_proxy_work_start(proxy_dev); vtpm_new_dev->fd = fd; vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; return file; err_put_unused_fd: put_unused_fd(fd); err_delete_proxy_dev: vtpm_proxy_delete_proxy_dev(proxy_dev); return ERR_PTR(rc); } /* * Counter part to vtpm_create_device. */ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) { vtpm_proxy_work_stop(proxy_dev); /* * A client may hold the 'ops' lock, so let it know that the server * side shuts down before we try to grab the 'ops' lock when * unregistering the chip. */ vtpm_proxy_fops_undo_open(proxy_dev); if (proxy_dev->state & STATE_REGISTERED_FLAG) tpm_chip_unregister(proxy_dev->chip); vtpm_proxy_delete_proxy_dev(proxy_dev); } /* * Code related to the control device /dev/vtpmx */ /** * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl * @file: /dev/vtpmx * @ioctl: the ioctl number * @arg: pointer to the struct vtpmx_proxy_new_dev * * Creates an anonymous file that is used by the process acting as a TPM to * communicate with the client processes. The function will also add a new TPM * device through which data is proxied to this TPM acting process. The caller * will be provided with a file descriptor to communicate with the clients and * major and minor numbers for the TPM device. */ static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; struct vtpm_proxy_new_dev vtpm_new_dev; struct file *vtpm_file; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vtpm_new_dev_p = argp; if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, sizeof(vtpm_new_dev))) return -EFAULT; vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev); if (IS_ERR(vtpm_file)) return PTR_ERR(vtpm_file); if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, sizeof(vtpm_new_dev))) { put_unused_fd(vtpm_new_dev.fd); fput(vtpm_file); return -EFAULT; } fd_install(vtpm_new_dev.fd, vtpm_file); return 0; } /* * vtpmx_fops_ioctl: ioctl on /dev/vtpmx * * Return: * Returns 0 on success, a negative error code otherwise. */ static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { switch (ioctl) { case VTPM_PROXY_IOC_NEW_DEV: return vtpmx_ioc_new_dev(f, ioctl, arg); default: return -ENOIOCTLCMD; } } static const struct file_operations vtpmx_fops = { .owner = THIS_MODULE, .unlocked_ioctl = vtpmx_fops_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; static struct miscdevice vtpmx_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "vtpmx", .fops = &vtpmx_fops, }; static int __init vtpm_module_init(void) { int rc; workqueue = create_workqueue("tpm-vtpm"); if (!workqueue) { pr_err("couldn't create workqueue\n"); return -ENOMEM; } rc = misc_register(&vtpmx_miscdev); if (rc) { pr_err("couldn't create vtpmx device\n"); destroy_workqueue(workqueue); } return rc; } static void __exit vtpm_module_exit(void) { destroy_workqueue(workqueue); misc_deregister(&vtpmx_miscdev); } module_init(vtpm_module_init); module_exit(vtpm_module_exit); MODULE_AUTHOR("Stefan Berger ([email protected])"); MODULE_DESCRIPTION("vTPM Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_vtpm_proxy.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) Microsoft Corporation * * Implements a firmware TPM as described here: * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/ * * A reference implementation is available here: * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM */ #include <linux/acpi.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/tee_drv.h> #include <linux/tpm.h> #include <linux/uuid.h> #include "tpm.h" #include "tpm_ftpm_tee.h" /* * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896 * * Randomly generated, and must correspond to the GUID on the TA side. * Defined here in the reference implementation: * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42 */ static const uuid_t ftpm_ta_uuid = UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4, 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** * ftpm_tee_tpm_op_recv() - retrieve fTPM response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. * @buf: the buffer to store data. * @count: the number of bytes to read. * * Return: * In case of success the number of bytes received. * On failure, -errno. */ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); size_t len; len = pvt_data->resp_len; if (count < len) { dev_err(&chip->dev, "%s: Invalid size in recv: count=%zd, resp_len=%zd\n", __func__, count, len); return -EIO; } memcpy(buf, pvt_data->resp_buf, len); pvt_data->resp_len = 0; return len; } /** * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h * @buf: the buffer to send. * @len: the number of bytes to send. * * Return: * In case of success, returns 0. * On failure, -errno */ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent); size_t resp_len; int rc; u8 *temp_buf; struct tpm_header *resp_header; struct tee_ioctl_invoke_arg transceive_args; struct tee_param command_params[4]; struct tee_shm *shm = pvt_data->shm; if (len > MAX_COMMAND_SIZE) { dev_err(&chip->dev, "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n", __func__, len); return -EIO; } memset(&transceive_args, 0, sizeof(transceive_args)); memset(command_params, 0, sizeof(command_params)); pvt_data->resp_len = 0; /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */ transceive_args = (struct tee_ioctl_invoke_arg) { .func = FTPM_OPTEE_TA_SUBMIT_COMMAND, .session = pvt_data->session, .num_params = 4, }; /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */ command_params[0] = (struct tee_param) { .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT, .u.memref = { .shm = shm, .size = len, .shm_offs = 0, }, }; temp_buf = tee_shm_get_va(shm, 0); if (IS_ERR(temp_buf)) { dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n", __func__); return PTR_ERR(temp_buf); } memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE)); memcpy(temp_buf, buf, len); command_params[1] = (struct tee_param) { .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT, .u.memref = { .shm = shm, .size = MAX_RESPONSE_SIZE, .shm_offs = MAX_COMMAND_SIZE, }, }; rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args, command_params); if ((rc < 0) || (transceive_args.ret != 0)) { dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n", __func__, transceive_args.ret); return (rc < 0) ? rc : transceive_args.ret; } temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs); if (IS_ERR(temp_buf)) { dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n", __func__); return PTR_ERR(temp_buf); } resp_header = (struct tpm_header *)temp_buf; resp_len = be32_to_cpu(resp_header->length); /* sanity check resp_len */ if (resp_len < TPM_HEADER_SIZE) { dev_err(&chip->dev, "%s: tpm response header too small\n", __func__); return -EIO; } if (resp_len > MAX_RESPONSE_SIZE) { dev_err(&chip->dev, "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n", __func__, resp_len); return -EIO; } /* sanity checks look good, cache the response */ memcpy(pvt_data->resp_buf, temp_buf, resp_len); pvt_data->resp_len = resp_len; return 0; } static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip) { /* not supported */ } static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip) { return 0; } static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status) { return false; } static const struct tpm_class_ops ftpm_tee_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, .recv = ftpm_tee_tpm_op_recv, .send = ftpm_tee_tpm_op_send, .cancel = ftpm_tee_tpm_op_cancel, .status = ftpm_tee_tpm_op_status, .req_complete_mask = 0, .req_complete_val = 0, .req_canceled = ftpm_tee_tpm_req_canceled, }; /* * Check whether this driver supports the fTPM TA in the TEE instance * represented by the params (ver/data) to this function. */ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) { /* * Currently this driver only support GP Complaint OPTEE based fTPM TA */ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) && (ver->gen_caps & TEE_GEN_CAP_GP)) return 1; else return 0; } /** * ftpm_tee_probe() - initialize the fTPM * @pdev: the platform_device description. * * Return: * On success, 0. On failure, -errno. */ static int ftpm_tee_probe(struct device *dev) { int rc; struct tpm_chip *chip; struct ftpm_tee_private *pvt_data = NULL; struct tee_ioctl_open_session_arg sess_arg; pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private), GFP_KERNEL); if (!pvt_data) return -ENOMEM; dev_set_drvdata(dev, pvt_data); /* Open context with TEE driver */ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL, NULL); if (IS_ERR(pvt_data->ctx)) { if (PTR_ERR(pvt_data->ctx) == -ENOENT) return -EPROBE_DEFER; dev_err(dev, "%s: tee_client_open_context failed\n", __func__); return PTR_ERR(pvt_data->ctx); } /* Open a session with fTPM TA */ memset(&sess_arg, 0, sizeof(sess_arg)); export_uuid(sess_arg.uuid, &ftpm_ta_uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL); if ((rc < 0) || (sess_arg.ret != 0)) { dev_err(dev, "%s: tee_client_open_session failed, err=%x\n", __func__, sess_arg.ret); rc = -EINVAL; goto out_tee_session; } pvt_data->session = sess_arg.session; /* Allocate dynamic shared memory with fTPM TA */ pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx, MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE); if (IS_ERR(pvt_data->shm)) { dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__); rc = -ENOMEM; goto out_shm_alloc; } /* Allocate new struct tpm_chip instance */ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops); if (IS_ERR(chip)) { dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__); rc = PTR_ERR(chip); goto out_chip_alloc; } pvt_data->chip = chip; pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2; /* Create a character device for the fTPM */ rc = tpm_chip_register(pvt_data->chip); if (rc) { dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n", __func__, rc); goto out_chip; } return 0; out_chip: put_device(&pvt_data->chip->dev); out_chip_alloc: tee_shm_free(pvt_data->shm); out_shm_alloc: tee_client_close_session(pvt_data->ctx, pvt_data->session); out_tee_session: tee_client_close_context(pvt_data->ctx); return rc; } static int ftpm_plat_tee_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; return ftpm_tee_probe(dev); } /** * ftpm_tee_remove() - remove the TPM device * @pdev: the platform_device description. * * Return: * 0 always. */ static int ftpm_tee_remove(struct device *dev) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(dev); /* Release the chip */ tpm_chip_unregister(pvt_data->chip); /* frees chip */ put_device(&pvt_data->chip->dev); /* Free the shared memory pool */ tee_shm_free(pvt_data->shm); /* close the existing session with fTPM TA*/ tee_client_close_session(pvt_data->ctx, pvt_data->session); /* close the context with TEE driver */ tee_client_close_context(pvt_data->ctx); /* memory allocated with devm_kzalloc() is freed automatically */ return 0; } static void ftpm_plat_tee_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; ftpm_tee_remove(dev); } /** * ftpm_tee_shutdown() - shutdown the TPM device * @pdev: the platform_device description. */ static void ftpm_plat_tee_shutdown(struct platform_device *pdev) { struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev); tee_shm_free(pvt_data->shm); tee_client_close_session(pvt_data->ctx, pvt_data->session); tee_client_close_context(pvt_data->ctx); } static const struct of_device_id of_ftpm_tee_ids[] = { { .compatible = "microsoft,ftpm" }, { } }; MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids); static struct platform_driver ftpm_tee_plat_driver = { .driver = { .name = "ftpm-tee", .of_match_table = of_match_ptr(of_ftpm_tee_ids), }, .shutdown = ftpm_plat_tee_shutdown, .probe = ftpm_plat_tee_probe, .remove_new = ftpm_plat_tee_remove, }; /* UUID of the fTPM TA */ static const struct tee_client_device_id optee_ftpm_id_table[] = { {UUID_INIT(0xbc50d971, 0xd4c9, 0x42c4, 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96)}, {} }; MODULE_DEVICE_TABLE(tee, optee_ftpm_id_table); static struct tee_client_driver ftpm_tee_driver = { .id_table = optee_ftpm_id_table, .driver = { .name = "optee-ftpm", .bus = &tee_bus_type, .probe = ftpm_tee_probe, .remove = ftpm_tee_remove, }, }; static int __init ftpm_mod_init(void) { int rc; rc = platform_driver_register(&ftpm_tee_plat_driver); if (rc) return rc; rc = driver_register(&ftpm_tee_driver.driver); if (rc) { platform_driver_unregister(&ftpm_tee_plat_driver); return rc; } return 0; } static void __exit ftpm_mod_exit(void) { platform_driver_unregister(&ftpm_tee_plat_driver); driver_unregister(&ftpm_tee_driver.driver); } module_init(ftpm_mod_init); module_exit(ftpm_mod_exit); MODULE_AUTHOR("Thirupathaiah Annapureddy <[email protected]>"); MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/tpm/tpm_ftpm_tee.c
// SPDX-License-Identifier: GPL-2.0-only /* * Implementation of the Xen vTPM device frontend * * Author: Daniel De Graaf <[email protected]> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/freezer.h> #include <xen/xen.h> #include <xen/events.h> #include <xen/interface/io/tpmif.h> #include <xen/grant_table.h> #include <xen/xenbus.h> #include <xen/page.h> #include "tpm.h" #include <xen/platform_pci.h> struct tpm_private { struct tpm_chip *chip; struct xenbus_device *dev; struct vtpm_shared_page *shr; unsigned int evtchn; int ring_ref; domid_t backend_id; int irq; wait_queue_head_t read_queue; }; enum status_bits { VTPM_STATUS_RUNNING = 0x1, VTPM_STATUS_IDLE = 0x2, VTPM_STATUS_RESULT = 0x4, VTPM_STATUS_CANCELED = 0x8, }; static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, bool *canceled) { u8 status = chip->ops->status(chip); *canceled = false; if ((status & mask) == mask) return true; if (check_cancel && chip->ops->req_canceled(chip, status)) { *canceled = true; return true; } return false; } static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { unsigned long stop; long rc; u8 status; bool canceled = false; /* check current status */ status = chip->ops->status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->flags & TPM_CHIP_FLAG_IRQ) { again: timeout = stop - jiffies; if ((long)timeout <= 0) return -ETIME; rc = wait_event_interruptible_timeout(*queue, wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled), timeout); if (rc > 0) { if (canceled) return -ECANCELED; return 0; } if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } else { do { tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; } static u8 vtpm_status(struct tpm_chip *chip) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); switch (priv->shr->state) { case VTPM_STATE_IDLE: return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; case VTPM_STATE_FINISH: return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; case VTPM_STATE_SUBMIT: case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */ return VTPM_STATUS_RUNNING; default: return 0; } } static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) { return status & VTPM_STATUS_CANCELED; } static void vtpm_cancel(struct tpm_chip *chip) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); priv->shr->state = VTPM_STATE_CANCEL; wmb(); notify_remote_via_evtchn(priv->evtchn); } static size_t shr_data_offset(struct vtpm_shared_page *shr) { return struct_size(shr, extra_pages, shr->nr_extra_pages); } static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; size_t offset = shr_data_offset(shr); u32 ordinal; unsigned long duration; if (offset > PAGE_SIZE) return -EINVAL; if (offset + count > PAGE_SIZE) return -EINVAL; /* Wait for completion of any existing command or cancellation */ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c, &priv->read_queue, true) < 0) { vtpm_cancel(chip); return -ETIME; } memcpy(offset + (u8 *)shr, buf, count); shr->length = count; barrier(); shr->state = VTPM_STATE_SUBMIT; wmb(); notify_remote_via_evtchn(priv->evtchn); ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal); duration = tpm_calc_ordinal_duration(chip, ordinal); if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, &priv->read_queue, true) < 0) { /* got a signal or timeout, try to cancel */ vtpm_cancel(chip); return -ETIME; } return 0; } static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; size_t offset = shr_data_offset(shr); size_t length = shr->length; if (shr->state == VTPM_STATE_IDLE) return -ECANCELED; /* In theory the wait at the end of _send makes this one unnecessary */ if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c, &priv->read_queue, true) < 0) { vtpm_cancel(chip); return -ETIME; } if (offset > PAGE_SIZE) return -EIO; if (offset + length > PAGE_SIZE) length = PAGE_SIZE - offset; if (length > count) length = count; memcpy(buf, offset + (u8 *)shr, length); return length; } static const struct tpm_class_ops tpm_vtpm = { .status = vtpm_status, .recv = vtpm_recv, .send = vtpm_send, .cancel = vtpm_cancel, .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, .req_canceled = vtpm_req_canceled, }; static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) { struct tpm_private *priv = dev_id; switch (priv->shr->state) { case VTPM_STATE_IDLE: case VTPM_STATE_FINISH: wake_up_interruptible(&priv->read_queue); break; case VTPM_STATE_SUBMIT: case VTPM_STATE_CANCEL: default: break; } return IRQ_HANDLED; } static int setup_chip(struct device *dev, struct tpm_private *priv) { struct tpm_chip *chip; chip = tpmm_chip_alloc(dev, &tpm_vtpm); if (IS_ERR(chip)) return PTR_ERR(chip); init_waitqueue_head(&priv->read_queue); priv->chip = chip; dev_set_drvdata(&chip->dev, priv); return 0; } /* caller must clean up in case of errors */ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) { struct xenbus_transaction xbt; const char *message = NULL; int rv; rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1, &priv->ring_ref); if (rv < 0) return rv; rv = xenbus_alloc_evtchn(dev, &priv->evtchn); if (rv) return rv; rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, "tpmif", priv); if (rv <= 0) { xenbus_dev_fatal(dev, rv, "allocating TPM irq"); return rv; } priv->irq = rv; again: rv = xenbus_transaction_start(&xbt); if (rv) { xenbus_dev_fatal(dev, rv, "starting transaction"); return rv; } rv = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", priv->ring_ref); if (rv) { message = "writing ring-ref"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", priv->evtchn); if (rv) { message = "writing event-channel"; goto abort_transaction; } rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); if (rv) { message = "writing feature-protocol-v2"; goto abort_transaction; } rv = xenbus_transaction_end(xbt, 0); if (rv == -EAGAIN) goto again; if (rv) { xenbus_dev_fatal(dev, rv, "completing transaction"); return rv; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_error(dev, rv, "%s", message); return rv; } static void ring_free(struct tpm_private *priv) { if (!priv) return; xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref); if (priv->irq) unbind_from_irqhandler(priv->irq, priv); kfree(priv); } static int tpmfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct tpm_private *priv; int rv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); return -ENOMEM; } rv = setup_chip(&dev->dev, priv); if (rv) { kfree(priv); return rv; } rv = setup_ring(dev, priv); if (rv) { ring_free(priv); return rv; } tpm_get_timeouts(priv->chip); return tpm_chip_register(priv->chip); } static void tpmfront_remove(struct xenbus_device *dev) { struct tpm_chip *chip = dev_get_drvdata(&dev->dev); struct tpm_private *priv = dev_get_drvdata(&chip->dev); tpm_chip_unregister(chip); ring_free(priv); dev_set_drvdata(&chip->dev, NULL); } static int tpmfront_resume(struct xenbus_device *dev) { /* A suspend/resume/migrate will interrupt a vTPM anyway */ tpmfront_remove(dev); return tpmfront_probe(dev, NULL); } static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { switch (backend_state) { case XenbusStateInitialised: case XenbusStateConnected: if (dev->state == XenbusStateConnected) break; if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2", 0)) { xenbus_dev_fatal(dev, -EINVAL, "vTPM protocol 2 required"); return; } xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: case XenbusStateClosed: device_unregister(&dev->dev); xenbus_frontend_closed(dev); break; default: break; } } static const struct xenbus_device_id tpmfront_ids[] = { { "vtpm" }, { "" } }; MODULE_ALIAS("xen:vtpm"); static struct xenbus_driver tpmfront_driver = { .ids = tpmfront_ids, .probe = tpmfront_probe, .remove = tpmfront_remove, .resume = tpmfront_resume, .otherend_changed = backend_changed, }; static int __init xen_tpmfront_init(void) { if (!xen_domain()) return -ENODEV; if (!xen_has_pv_devices()) return -ENODEV; return xenbus_register_frontend(&tpmfront_driver); } module_init(xen_tpmfront_init); static void __exit xen_tpmfront_exit(void) { xenbus_unregister_driver(&tpmfront_driver); } module_exit(xen_tpmfront_exit); MODULE_AUTHOR("Daniel De Graaf <[email protected]>"); MODULE_DESCRIPTION("Xen vTPM Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/xen-tpmfront.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012-2014 Intel Corporation * * Authors: * Xiaoyan Zhang <[email protected]> * Jiang Liu <[email protected]> * Jarkko Sakkinen <[email protected]> * * Maintained by: <[email protected]> * * This file contains implementation of the sysfs interface for PPI. */ #include <linux/acpi.h> #include "tpm.h" #define TPM_PPI_REVISION_ID_1 1 #define TPM_PPI_REVISION_ID_2 2 #define TPM_PPI_FN_VERSION 1 #define TPM_PPI_FN_SUBREQ 2 #define TPM_PPI_FN_GETREQ 3 #define TPM_PPI_FN_GETACT 4 #define TPM_PPI_FN_GETRSP 5 #define TPM_PPI_FN_SUBREQ2 7 #define TPM_PPI_FN_GETOPR 8 #define PPI_TPM_REQ_MAX 101 /* PPI 1.3 for TPM 2 */ #define PPI_VS_REQ_START 128 #define PPI_VS_REQ_END 255 static const guid_t tpm_ppi_guid = GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4, 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53); static bool tpm_ppi_req_has_parameter(u64 req) { return req == 23; } static inline union acpi_object * tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type, union acpi_object *argv4, u64 rev) { BUG_ON(!ppi_handle); return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid, rev, func, argv4, type); } static ssize_t tpm_show_ppi_version(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version); } static ssize_t tpm_show_ppi_request(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t size = -EINVAL; union acpi_object *obj; struct tpm_chip *chip = to_tpm_chip(dev); u64 rev = TPM_PPI_REVISION_ID_2; u64 req; if (strcmp(chip->ppi_version, "1.2") < 0) rev = TPM_PPI_REVISION_ID_1; obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ, ACPI_TYPE_PACKAGE, NULL, rev); if (!obj) return -ENXIO; /* * output.pointer should be of package type, including two integers. * The first is function return code, 0 means success and 1 means * error. The second is pending TPM operation requested by the OS, 0 * means none and >0 means operation value. */ if (obj->package.count == 3 && obj->package.elements[0].type == ACPI_TYPE_INTEGER && obj->package.elements[1].type == ACPI_TYPE_INTEGER && obj->package.elements[2].type == ACPI_TYPE_INTEGER) { if (obj->package.elements[0].integer.value) size = -EFAULT; else { req = obj->package.elements[1].integer.value; if (tpm_ppi_req_has_parameter(req)) size = scnprintf(buf, PAGE_SIZE, "%llu %llu\n", req, obj->package.elements[2].integer.value); else size = scnprintf(buf, PAGE_SIZE, "%llu\n", req); } } else if (obj->package.count == 2 && obj->package.elements[0].type == ACPI_TYPE_INTEGER && obj->package.elements[1].type == ACPI_TYPE_INTEGER) { if (obj->package.elements[0].integer.value) size = -EFAULT; else size = scnprintf(buf, PAGE_SIZE, "%llu\n", obj->package.elements[1].integer.value); } ACPI_FREE(obj); return size; } static ssize_t tpm_store_ppi_request(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 req; u64 ret; int func = TPM_PPI_FN_SUBREQ; union acpi_object *obj, tmp[2]; union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(2, tmp); struct tpm_chip *chip = to_tpm_chip(dev); u64 rev = TPM_PPI_REVISION_ID_1; /* * the function to submit TPM operation request to pre-os environment * is updated with function index from SUBREQ to SUBREQ2 since PPI * version 1.1 */ if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_SUBREQ2)) func = TPM_PPI_FN_SUBREQ2; /* * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS * accept buffer/string/integer type, but some BIOS accept buffer/ * string/package type. For PPI version 1.0 and 1.1, use buffer type * for compatibility, and use package type since 1.2 according to spec. */ if (strcmp(chip->ppi_version, "1.3") == 0) { if (sscanf(buf, "%llu %llu", &tmp[0].integer.value, &tmp[1].integer.value) != 2) goto ppi12; rev = TPM_PPI_REVISION_ID_2; tmp[0].type = ACPI_TYPE_INTEGER; tmp[1].type = ACPI_TYPE_INTEGER; } else if (strcmp(chip->ppi_version, "1.2") < 0) { if (sscanf(buf, "%d", &req) != 1) return -EINVAL; argv4.type = ACPI_TYPE_BUFFER; argv4.buffer.length = sizeof(req); argv4.buffer.pointer = (u8 *)&req; } else { ppi12: argv4.package.count = 1; tmp[0].type = ACPI_TYPE_INTEGER; if (sscanf(buf, "%llu", &tmp[0].integer.value) != 1) return -EINVAL; } obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER, &argv4, rev); if (!obj) { return -ENXIO; } else { ret = obj->integer.value; ACPI_FREE(obj); } if (ret == 0) return (acpi_status)count; return (ret == 1) ? -EPERM : -EFAULT; } static ssize_t tpm_show_ppi_transition_action(struct device *dev, struct device_attribute *attr, char *buf) { u32 ret; acpi_status status; union acpi_object *obj = NULL; union acpi_object tmp = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 0, .buffer.pointer = NULL }; struct tpm_chip *chip = to_tpm_chip(dev); static char *info[] = { "None", "Shutdown", "Reboot", "OS Vendor-specific", "Error", }; /* * PPI spec defines params[3].type as empty package, but some platforms * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for * compatibility, define params[3].type as buffer, if PPI version < 1.2 */ if (strcmp(chip->ppi_version, "1.2") < 0) obj = &tmp; obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT, ACPI_TYPE_INTEGER, obj, TPM_PPI_REVISION_ID_1); if (!obj) { return -ENXIO; } else { ret = obj->integer.value; ACPI_FREE(obj); } if (ret < ARRAY_SIZE(info) - 1) status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]); else status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ARRAY_SIZE(info)-1]); return status; } static ssize_t tpm_show_ppi_response(struct device *dev, struct device_attribute *attr, char *buf) { acpi_status status = -EINVAL; union acpi_object *obj, *ret_obj; u64 req, res; struct tpm_chip *chip = to_tpm_chip(dev); obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP, ACPI_TYPE_PACKAGE, NULL, TPM_PPI_REVISION_ID_1); if (!obj) return -ENXIO; /* * parameter output.pointer should be of package type, including * 3 integers. The first means function return code, the second means * most recent TPM operation request, and the last means response to * the most recent TPM operation request. Only if the first is 0, and * the second integer is not 0, the response makes sense. */ ret_obj = obj->package.elements; if (obj->package.count < 3 || ret_obj[0].type != ACPI_TYPE_INTEGER || ret_obj[1].type != ACPI_TYPE_INTEGER || ret_obj[2].type != ACPI_TYPE_INTEGER) goto cleanup; if (ret_obj[0].integer.value) { status = -EFAULT; goto cleanup; } req = ret_obj[1].integer.value; res = ret_obj[2].integer.value; if (req) { if (res == 0) status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, "0: Success"); else if (res == 0xFFFFFFF0) status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, "0xFFFFFFF0: User Abort"); else if (res == 0xFFFFFFF1) status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req, "0xFFFFFFF1: BIOS Failure"); else if (res >= 1 && res <= 0x00000FFF) status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", req, res, "Corresponding TPM error"); else status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n", req, res, "Error"); } else { status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n", req, "No Recent Request"); } cleanup: ACPI_FREE(obj); return status; } static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, u32 end) { int i; u32 ret; char *str = buf; union acpi_object *obj, tmp; union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp); static char *info[] = { "Not implemented", "BIOS only", "Blocked for OS by BIOS", "User required", "User not required", }; if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_GETOPR)) return -EPERM; tmp.integer.type = ACPI_TYPE_INTEGER; for (i = start; i <= end; i++) { tmp.integer.value = i; obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv, TPM_PPI_REVISION_ID_1); if (!obj) { return -ENOMEM; } else { ret = obj->integer.value; ACPI_FREE(obj); } if (ret > 0 && ret < ARRAY_SIZE(info)) str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n", i, ret, info[ret]); } return str - buf; } static ssize_t tpm_show_ppi_tcg_operations(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); return show_ppi_operations(chip->acpi_dev_handle, buf, 0, PPI_TPM_REQ_MAX); } static ssize_t tpm_show_ppi_vs_operations(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START, PPI_VS_REQ_END); } static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL); static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP, tpm_show_ppi_request, tpm_store_ppi_request); static DEVICE_ATTR(transition_action, S_IRUGO, tpm_show_ppi_transition_action, NULL); static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL); static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL); static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL); static struct attribute *ppi_attrs[] = { &dev_attr_version.attr, &dev_attr_request.attr, &dev_attr_transition_action.attr, &dev_attr_response.attr, &dev_attr_tcg_operations.attr, &dev_attr_vs_operations.attr, NULL, }; static const struct attribute_group ppi_attr_grp = { .name = "ppi", .attrs = ppi_attrs }; void tpm_add_ppi(struct tpm_chip *chip) { union acpi_object *obj; if (!chip->acpi_dev_handle) return; if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_VERSION)) return; /* Cache PPI version string. */ obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, TPM_PPI_FN_VERSION, NULL, ACPI_TYPE_STRING); if (obj) { strscpy(chip->ppi_version, obj->string.pointer, sizeof(chip->ppi_version)); ACPI_FREE(obj); } chip->groups[chip->groups_cnt++] = &ppi_attr_grp; }
linux-master
drivers/char/tpm/tpm_ppi.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Linaro Ltd. * * This device driver implements MMIO TPM on SynQuacer Platform. */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/kernel.h> #include "tpm.h" #include "tpm_tis_core.h" /* * irq > 0 means: use irq $irq; * irq = 0 means: autoprobe for an irq; * irq = -1 means: no irq support */ struct tpm_tis_synquacer_info { struct resource res; int irq; }; struct tpm_tis_synquacer_phy { struct tpm_tis_data priv; void __iomem *iobase; }; static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data) { return container_of(data, struct tpm_tis_synquacer_phy, priv); } static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); switch (io_mode) { case TPM_TIS_PHYS_8: while (len--) *result++ = ioread8(phy->iobase + addr); break; case TPM_TIS_PHYS_16: result[1] = ioread8(phy->iobase + addr + 1); result[0] = ioread8(phy->iobase + addr); break; case TPM_TIS_PHYS_32: result[3] = ioread8(phy->iobase + addr + 3); result[2] = ioread8(phy->iobase + addr + 2); result[1] = ioread8(phy->iobase + addr + 1); result[0] = ioread8(phy->iobase + addr); break; } return 0; } static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data); switch (io_mode) { case TPM_TIS_PHYS_8: while (len--) iowrite8(*value++, phy->iobase + addr); break; case TPM_TIS_PHYS_16: return -EINVAL; case TPM_TIS_PHYS_32: /* * Due to the limitation of SPI controller on SynQuacer, * 16/32 bits access must be done in byte-wise and descending order. */ iowrite8(value[3], phy->iobase + addr + 3); iowrite8(value[2], phy->iobase + addr + 2); iowrite8(value[1], phy->iobase + addr + 1); iowrite8(value[0], phy->iobase + addr); break; } return 0; } static const struct tpm_tis_phy_ops tpm_tcg_bw = { .read_bytes = tpm_tis_synquacer_read_bytes, .write_bytes = tpm_tis_synquacer_write_bytes, }; static int tpm_tis_synquacer_init(struct device *dev, struct tpm_tis_synquacer_info *tpm_info) { struct tpm_tis_synquacer_phy *phy; phy = devm_kzalloc(dev, sizeof(struct tpm_tis_synquacer_phy), GFP_KERNEL); if (phy == NULL) return -ENOMEM; phy->iobase = devm_ioremap_resource(dev, &tpm_info->res); if (IS_ERR(phy->iobase)) return PTR_ERR(phy->iobase); return tpm_tis_core_init(dev, &phy->priv, tpm_info->irq, &tpm_tcg_bw, ACPI_HANDLE(dev)); } static SIMPLE_DEV_PM_OPS(tpm_tis_synquacer_pm, tpm_pm_suspend, tpm_tis_resume); static int tpm_tis_synquacer_probe(struct platform_device *pdev) { struct tpm_tis_synquacer_info tpm_info = {}; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); return -ENODEV; } tpm_info.res = *res; tpm_info.irq = -1; return tpm_tis_synquacer_init(&pdev->dev, &tpm_info); } static void tpm_tis_synquacer_remove(struct platform_device *pdev) { struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); } #ifdef CONFIG_OF static const struct of_device_id tis_synquacer_of_platform_match[] = { {.compatible = "socionext,synquacer-tpm-mmio"}, {}, }; MODULE_DEVICE_TABLE(of, tis_synquacer_of_platform_match); #endif #ifdef CONFIG_ACPI static const struct acpi_device_id tpm_synquacer_acpi_tbl[] = { { "SCX0009" }, {}, }; MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl); #endif static struct platform_driver tis_synquacer_drv = { .probe = tpm_tis_synquacer_probe, .remove_new = tpm_tis_synquacer_remove, .driver = { .name = "tpm_tis_synquacer", .pm = &tpm_tis_synquacer_pm, .of_match_table = of_match_ptr(tis_synquacer_of_platform_match), .acpi_match_table = ACPI_PTR(tpm_synquacer_acpi_tbl), }, }; module_platform_driver(tis_synquacer_drv); MODULE_DESCRIPTION("TPM MMIO Driver for Socionext SynQuacer platform"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis_synquacer.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Intel Corporation * * Authors: * Jarkko Sakkinen <[email protected]> * * Maintained by: <[email protected]> * * This device driver implements the TPM interface as defined in * the TCG CRB 2.0 TPM specification. */ #include <linux/acpi.h> #include <linux/highmem.h> #include <linux/rculist.h> #include <linux/module.h> #include <linux/pm_runtime.h> #ifdef CONFIG_ARM64 #include <linux/arm-smccc.h> #endif #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" #define TPM_CRB_MAX_RESOURCES 3 static const guid_t crb_acpi_start_guid = GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4); enum crb_defaults { CRB_ACPI_START_REVISION_ID = 1, CRB_ACPI_START_INDEX = 1, }; enum crb_loc_ctrl { CRB_LOC_CTRL_REQUEST_ACCESS = BIT(0), CRB_LOC_CTRL_RELINQUISH = BIT(1), }; enum crb_loc_state { CRB_LOC_STATE_LOC_ASSIGNED = BIT(1), CRB_LOC_STATE_TPM_REG_VALID_STS = BIT(7), }; enum crb_ctrl_req { CRB_CTRL_REQ_CMD_READY = BIT(0), CRB_CTRL_REQ_GO_IDLE = BIT(1), }; enum crb_ctrl_sts { CRB_CTRL_STS_ERROR = BIT(0), CRB_CTRL_STS_TPM_IDLE = BIT(1), }; enum crb_start { CRB_START_INVOKE = BIT(0), }; enum crb_cancel { CRB_CANCEL_INVOKE = BIT(0), }; struct crb_regs_head { u32 loc_state; u32 reserved1; u32 loc_ctrl; u32 loc_sts; u8 reserved2[32]; u64 intf_id; u64 ctrl_ext; } __packed; struct crb_regs_tail { u32 ctrl_req; u32 ctrl_sts; u32 ctrl_cancel; u32 ctrl_start; u32 ctrl_int_enable; u32 ctrl_int_sts; u32 ctrl_cmd_size; u32 ctrl_cmd_pa_low; u32 ctrl_cmd_pa_high; u32 ctrl_rsp_size; u64 ctrl_rsp_pa; } __packed; enum crb_status { CRB_DRV_STS_COMPLETE = BIT(0), }; struct crb_priv { u32 sm; const char *hid; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; u8 __iomem *cmd; u8 __iomem *rsp; u32 cmd_size; u32 smc_func_id; u32 __iomem *pluton_start_addr; u32 __iomem *pluton_reply_addr; }; struct tpm2_crb_smc { u32 interrupt; u8 interrupt_flags; u8 op_flags; u16 reserved2; u32 smc_func_id; }; struct tpm2_crb_pluton { u64 start_addr; u64 reply_addr; }; static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, unsigned long timeout) { ktime_t start; ktime_t stop; start = ktime_get(); stop = ktime_add(start, ms_to_ktime(timeout)); do { if ((ioread32(reg) & mask) == value) return true; usleep_range(50, 100); } while (ktime_before(ktime_get(), stop)); return ((ioread32(reg) & mask) == value); } static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete) { if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) return 0; if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C)) return -ETIME; iowrite32(1, priv->pluton_start_addr); if (wait_for_complete == false) return 0; if (!crb_wait_for_reg_32(priv->pluton_start_addr, 0xffffffff, 0, 200)) return -ETIME; return 0; } /** * __crb_go_idle - request tpm crb device to go the idle state * * @dev: crb device * @priv: crb private data * * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ * The device should respond within TIMEOUT_C by clearing the bit. * Anyhow, we do not wait here as a consequent CMD_READY request * will be handled correctly even if idle was not completed. * * The function does nothing for devices with ACPI-start method * or SMC-start method. * * Return: 0 always */ static int __crb_go_idle(struct device *dev, struct crb_priv *priv) { int rc; if ((priv->sm == ACPI_TPM2_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) return 0; iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req, CRB_CTRL_REQ_GO_IDLE/* mask */, 0, /* value */ TPM2_TIMEOUT_C)) { dev_warn(dev, "goIdle timed out\n"); return -ETIME; } return 0; } static int crb_go_idle(struct tpm_chip *chip) { struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); return __crb_go_idle(dev, priv); } /** * __crb_cmd_ready - request tpm crb device to enter ready state * * @dev: crb device * @priv: crb private data * * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ * and poll till the device acknowledge it by clearing the bit. * The device should respond within TIMEOUT_C. * * The function does nothing for devices with ACPI-start method * or SMC-start method. * * Return: 0 on success -ETIME on timeout; */ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) { int rc; if ((priv->sm == ACPI_TPM2_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) return 0; iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req, CRB_CTRL_REQ_CMD_READY /* mask */, 0, /* value */ TPM2_TIMEOUT_C)) { dev_warn(dev, "cmdReady timed out\n"); return -ETIME; } return 0; } static int crb_cmd_ready(struct tpm_chip *chip) { struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); return __crb_cmd_ready(dev, priv); } static int __crb_request_locality(struct device *dev, struct crb_priv *priv, int loc) { u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); return -ETIME; } return 0; } static int crb_request_locality(struct tpm_chip *chip, int loc) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); return __crb_request_locality(&chip->dev, priv, loc); } static int __crb_relinquish_locality(struct device *dev, struct crb_priv *priv, int loc) { u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS; u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS; if (!priv->regs_h) return 0; iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); return -ETIME; } return 0; } static int crb_relinquish_locality(struct tpm_chip *chip, int loc) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); return __crb_relinquish_locality(&chip->dev, priv, loc); } static u8 crb_status(struct tpm_chip *chip) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); u8 sts = 0; if ((ioread32(&priv->regs_t->ctrl_start) & CRB_START_INVOKE) != CRB_START_INVOKE) sts |= CRB_DRV_STS_COMPLETE; return sts; } static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); unsigned int expected; /* A sanity check that the upper layer wants to get at least the header * as that is the minimum size for any TPM response. */ if (count < TPM_HEADER_SIZE) return -EIO; /* If this bit is set, according to the spec, the TPM is in * unrecoverable condition. */ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR) return -EIO; /* Read the first 8 bytes in order to get the length of the response. * We read exactly a quad word in order to make sure that the remaining * reads will be aligned. */ memcpy_fromio(buf, priv->rsp, 8); expected = be32_to_cpup((__be32 *)&buf[2]); if (expected > count || expected < TPM_HEADER_SIZE) return -EIO; memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8); return expected; } static int crb_do_acpi_start(struct tpm_chip *chip) { union acpi_object *obj; int rc; obj = acpi_evaluate_dsm(chip->acpi_dev_handle, &crb_acpi_start_guid, CRB_ACPI_START_REVISION_ID, CRB_ACPI_START_INDEX, NULL); if (!obj) return -ENXIO; rc = obj->integer.value == 0 ? 0 : -ENXIO; ACPI_FREE(obj); return rc; } #ifdef CONFIG_ARM64 /* * This is a TPM Command Response Buffer start method that invokes a * Secure Monitor Call to requrest the firmware to execute or cancel * a TPM 2.0 command. */ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id) { struct arm_smccc_res res; arm_smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res); if (res.a0 != 0) { dev_err(dev, FW_BUG "tpm_crb_smc_start() returns res.a0 = 0x%lx\n", res.a0); return -EIO; } return 0; } #else static int tpm_crb_smc_start(struct device *dev, unsigned long func_id) { dev_err(dev, FW_BUG "tpm_crb: incorrect start method\n"); return -EINVAL; } #endif static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); int rc = 0; /* Zero the cancel register so that the next command will not get * canceled. */ iowrite32(0, &priv->regs_t->ctrl_cancel); if (len > priv->cmd_size) { dev_err(&chip->dev, "invalid command count value %zd %d\n", len, priv->cmd_size); return -E2BIG; } /* Seems to be necessary for every command */ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) __crb_cmd_ready(&chip->dev, priv); memcpy_toio(priv->cmd, buf, len); /* Make sure that cmd is populated before issuing start. */ wmb(); /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs * report only ACPI start but in practice seems to require both * CRB start, hence invoking CRB start method if hid == MSFT0101. */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED) || (!strcmp(priv->hid, "MSFT0101"))) iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); if ((priv->sm == ACPI_TPM2_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) rc = crb_do_acpi_start(chip); if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id); } if (rc) return rc; return crb_try_pluton_doorbell(priv, false); } static void crb_cancel(struct tpm_chip *chip) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel); if (((priv->sm == ACPI_TPM2_START_METHOD) || (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) && crb_do_acpi_start(chip)) dev_err(&chip->dev, "ACPI Start failed\n"); } static bool crb_req_canceled(struct tpm_chip *chip, u8 status) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); u32 cancel = ioread32(&priv->regs_t->ctrl_cancel); return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE; } static const struct tpm_class_ops tpm_crb = { .flags = TPM_OPS_AUTO_STARTUP, .status = crb_status, .recv = crb_recv, .send = crb_send, .cancel = crb_cancel, .req_canceled = crb_req_canceled, .go_idle = crb_go_idle, .cmd_ready = crb_cmd_ready, .request_locality = crb_request_locality, .relinquish_locality = crb_relinquish_locality, .req_complete_mask = CRB_DRV_STS_COMPLETE, .req_complete_val = CRB_DRV_STS_COMPLETE, }; static int crb_check_resource(struct acpi_resource *ares, void *data) { struct resource *iores_array = data; struct resource_win win; struct resource *res = &(win.res); int i; if (acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win)) { for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { if (resource_type(iores_array + i) != IORESOURCE_MEM) { iores_array[i] = *res; iores_array[i].name = NULL; break; } } } return 1; } static void __iomem *crb_map_res(struct device *dev, struct resource *iores, void __iomem **iobase_ptr, u64 start, u32 size) { struct resource new_res = { .start = start, .end = start + size - 1, .flags = IORESOURCE_MEM, }; /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) return IOMEM_ERR_PTR(-EINVAL); if (!iores) return devm_ioremap_resource(dev, &new_res); if (!*iobase_ptr) { *iobase_ptr = devm_ioremap_resource(dev, iores); if (IS_ERR(*iobase_ptr)) return *iobase_ptr; } return *iobase_ptr + (new_res.start - iores->start); } /* * Work around broken BIOSs that return inconsistent values from the ACPI * region vs the registers. Trust the ACPI region. Such broken systems * probably cannot send large TPM commands since the buffer will be truncated. */ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, u64 start, u64 size) { if (io_res->start > start || io_res->end < start) return size; if (start + size - 1 <= io_res->end) return size; dev_err(dev, FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n", io_res, start, size); return io_res->end - start + 1; } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head acpi_resource_list; struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; struct device *dev = &device->dev; struct resource *iores; void __iomem **iobase_ptr; int i; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; __le64 __rsp_pa; u64 rsp_pa; u32 rsp_size; int ret; /* * Pluton sometimes does not define ACPI memory regions. * Mapping is then done in crb_map_pluton */ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { INIT_LIST_HEAD(&acpi_resource_list); ret = acpi_dev_get_resources(device, &acpi_resource_list, crb_check_resource, iores_array); if (ret < 0) return ret; acpi_dev_free_resource_list(&acpi_resource_list); if (resource_type(iores_array) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == IORESOURCE_MEM) { dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); memset(iores_array + TPM_CRB_MAX_RESOURCES, 0, sizeof(*iores_array)); iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; } } iores = NULL; iobase_ptr = NULL; for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { if (buf->control_address >= iores_array[i].start && buf->control_address + sizeof(struct crb_regs_tail) - 1 <= iores_array[i].end) { iores = iores_array + i; iobase_ptr = iobase_array + i; break; } } priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, sizeof(struct crb_regs_tail)); if (IS_ERR(priv->regs_t)) return PTR_ERR(priv->regs_t); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { if (iores && buf->control_address == iores->start + sizeof(*priv->regs_h)) priv->regs_h = *iobase_ptr; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } ret = __crb_request_locality(dev, priv, 0); if (ret) return ret; /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ ret = __crb_cmd_ready(dev, priv); if (ret) goto out_relinquish_locality; pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); iores = NULL; iobase_ptr = NULL; for (i = 0; iores_array[i].end; ++i) { if (cmd_pa >= iores_array[i].start && cmd_pa <= iores_array[i].end) { iores = iores_array + i; iobase_ptr = iobase_array + i; break; } } if (iores) cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; } memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); iores = NULL; iobase_ptr = NULL; for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { if (rsp_pa >= iores_array[i].start && rsp_pa <= iores_array[i].end) { iores = iores_array + i; iobase_ptr = iobase_array + i; break; } } if (iores) rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size); if (cmd_pa != rsp_pa) { priv->rsp = crb_map_res(dev, iores, iobase_ptr, rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } /* According to the PTP specification, overlapping command and response * buffer sizes must be identical. */ if (cmd_size != rsp_size) { dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); ret = -EINVAL; goto out; } priv->rsp = priv->cmd; out: if (!ret) priv->cmd_size = cmd_size; __crb_go_idle(dev, priv); out_relinquish_locality: __crb_relinquish_locality(dev, priv, 0); return ret; } static int crb_map_pluton(struct device *dev, struct crb_priv *priv, struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton) { priv->pluton_start_addr = crb_map_res(dev, NULL, NULL, crb_pluton->start_addr, 4); if (IS_ERR(priv->pluton_start_addr)) return PTR_ERR(priv->pluton_start_addr); priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL, crb_pluton->reply_addr, 4); if (IS_ERR(priv->pluton_reply_addr)) return PTR_ERR(priv->pluton_reply_addr); return 0; } static int crb_acpi_add(struct acpi_device *device) { struct acpi_table_tpm2 *buf; struct crb_priv *priv; struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; struct tpm2_crb_pluton *crb_pluton; acpi_status status; u32 sm; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* Should the FIFO driver handle this? */ sm = buf->start_method; if (sm == ACPI_TPM2_MEMORY_MAPPED) { rc = -ENODEV; goto out; } priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto out; } if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); rc = -EINVAL; goto out; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; } if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON); rc = -EINVAL; goto out; } crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf)); rc = crb_map_pluton(dev, priv, buf, crb_pluton); if (rc) goto out; } priv->sm = sm; priv->hid = acpi_device_hid(device); rc = crb_map_io(device, priv, buf); if (rc) goto out; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) { rc = PTR_ERR(chip); goto out; } dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; rc = tpm_chip_bootstrap(chip); if (rc) goto out; #ifdef CONFIG_X86 /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) { dev_info(dev, "Disabling hwrng\n"); chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED; } #endif /* CONFIG_X86 */ rc = tpm_chip_register(chip); out: acpi_put_table((struct acpi_table_header *)buf); return rc; } static void crb_acpi_remove(struct acpi_device *device) { struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); } static const struct dev_pm_ops crb_pm = { SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume) }; static const struct acpi_device_id crb_device_ids[] = { {"MSFT0101", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, crb_device_ids); static struct acpi_driver crb_acpi_driver = { .name = "tpm_crb", .ids = crb_device_ids, .ops = { .add = crb_acpi_add, .remove = crb_acpi_remove, }, .drv = { .pm = &crb_pm, }, }; module_acpi_driver(crb_acpi_driver); MODULE_AUTHOR("Jarkko Sakkinen <[email protected]>"); MODULE_DESCRIPTION("TPM2 Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_crb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <[email protected]> * Dave Safford <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org */ #include <linux/platform_device.h> #include <linux/slab.h> #include "tpm.h" /* National definitions */ enum tpm_nsc_addr{ TPM_NSC_IRQ = 0x07, TPM_NSC_BASE0_HI = 0x60, TPM_NSC_BASE0_LO = 0x61, TPM_NSC_BASE1_HI = 0x62, TPM_NSC_BASE1_LO = 0x63 }; enum tpm_nsc_index { NSC_LDN_INDEX = 0x07, NSC_SID_INDEX = 0x20, NSC_LDC_INDEX = 0x30, NSC_DIO_INDEX = 0x60, NSC_CIO_INDEX = 0x62, NSC_IRQ_INDEX = 0x70, NSC_ITS_INDEX = 0x71 }; enum tpm_nsc_status_loc { NSC_STATUS = 0x01, NSC_COMMAND = 0x01, NSC_DATA = 0x00 }; /* status bits */ enum tpm_nsc_status { NSC_STATUS_OBF = 0x01, /* output buffer full */ NSC_STATUS_IBF = 0x02, /* input buffer full */ NSC_STATUS_F0 = 0x04, /* F0 */ NSC_STATUS_A2 = 0x08, /* A2 */ NSC_STATUS_RDY = 0x10, /* ready to receive command */ NSC_STATUS_IBR = 0x20 /* ready to receive data */ }; /* command bits */ enum tpm_nsc_cmd_mode { NSC_COMMAND_NORMAL = 0x01, /* normal mode */ NSC_COMMAND_EOC = 0x03, NSC_COMMAND_CANCEL = 0x22 }; struct tpm_nsc_priv { unsigned long base; }; /* * Wait for a certain status to appear */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); unsigned long stop; /* status immediately available check */ *data = inb(priv->base + NSC_STATUS); if ((*data & mask) == val) return 0; /* wait for status */ stop = jiffies + 10 * HZ; do { msleep(TPM_TIMEOUT); *data = inb(priv->base + 1); if ((*data & mask) == val) return 0; } while (time_before(jiffies, stop)); return -EBUSY; } static int nsc_wait_for_ready(struct tpm_chip *chip) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); int status; unsigned long stop; /* status immediately available check */ status = inb(priv->base + NSC_STATUS); if (status & NSC_STATUS_OBF) status = inb(priv->base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; /* wait for status */ stop = jiffies + 100; do { msleep(TPM_TIMEOUT); status = inb(priv->base + NSC_STATUS); if (status & NSC_STATUS_OBF) status = inb(priv->base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; } while (time_before(jiffies, stop)); dev_info(&chip->dev, "wait for ready failed\n"); return -EBUSY; } static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 *buffer = buf; u8 data, *p; u32 size; __be32 *native_size; if (count < 6) return -EIO; if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { dev_err(&chip->dev, "F0 timeout\n"); return -EIO; } data = inb(priv->base + NSC_DATA); if (data != NSC_COMMAND_NORMAL) { dev_err(&chip->dev, "not in normal mode (0x%x)\n", data); return -EIO; } /* read the whole packet */ for (p = buffer; p < &buffer[count]; p++) { if (wait_for_stat (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { dev_err(&chip->dev, "OBF timeout (while reading data)\n"); return -EIO; } if (data & NSC_STATUS_F0) break; *p = inb(priv->base + NSC_DATA); } if ((data & NSC_STATUS_F0) == 0 && (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { dev_err(&chip->dev, "F0 not set\n"); return -EIO; } data = inb(priv->base + NSC_DATA); if (data != NSC_COMMAND_EOC) { dev_err(&chip->dev, "expected end of command(0x%x)\n", data); return -EIO; } native_size = (__force __be32 *) (buf + 2); size = be32_to_cpu(*native_size); if (count < size) return -EIO; return size; } static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); u8 data; int i; /* * If we hit the chip with back to back commands it locks up * and never set IBF. Hitting it with this "hammer" seems to * fix it. Not sure why this is needed, we followed the flow * chart in the manual to the letter. */ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); if (nsc_wait_for_ready(chip) != 0) return -EIO; if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND); if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { dev_err(&chip->dev, "IBR timeout\n"); return -EIO; } for (i = 0; i < count; i++) { if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(&chip->dev, "IBF timeout (while writing data)\n"); return -EIO; } outb(buf[i], priv->base + NSC_DATA); } if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND); return 0; } static void tpm_nsc_cancel(struct tpm_chip *chip) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND); } static u8 tpm_nsc_status(struct tpm_chip *chip) { struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); return inb(priv->base + NSC_STATUS); } static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status) { return (status == NSC_STATUS_RDY); } static const struct tpm_class_ops tpm_nsc = { .recv = tpm_nsc_recv, .send = tpm_nsc_send, .cancel = tpm_nsc_cancel, .status = tpm_nsc_status, .req_complete_mask = NSC_STATUS_OBF, .req_complete_val = NSC_STATUS_OBF, .req_canceled = tpm_nsc_req_canceled, }; static struct platform_device *pdev = NULL; static void tpm_nsc_remove(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev); tpm_chip_unregister(chip); release_region(priv->base, 2); } static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); static struct platform_driver nsc_drv = { .driver = { .name = "tpm_nsc", .pm = &tpm_nsc_pm, }, }; static inline int tpm_read_index(int base, int index) { outb(index, base); return inb(base+1) & 0xFF; } static inline void tpm_write_index(int base, int index, int value) { outb(index, base); outb(value & 0xFF, base+1); } static int __init init_nsc(void) { int rc = 0; int lo, hi, err; int nscAddrBase = TPM_ADDR; struct tpm_chip *chip; unsigned long base; struct tpm_nsc_priv *priv; /* verify that it is a National part (SID) */ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)| (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE); if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) return -ENODEV; } err = platform_driver_register(&nsc_drv); if (err) return err; hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); base = (hi<<8) | lo; /* enable the DPM module */ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); pdev = platform_device_alloc("tpm_nscl0", -1); if (!pdev) { rc = -ENOMEM; goto err_unreg_drv; } pdev->num_resources = 0; pdev->dev.driver = &nsc_drv.driver; pdev->dev.release = tpm_nsc_remove; if ((rc = platform_device_add(pdev)) < 0) goto err_put_dev; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto err_del_dev; } priv->base = base; if (request_region(base, 2, "tpm_nsc0") == NULL ) { rc = -EBUSY; goto err_del_dev; } chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc); if (IS_ERR(chip)) { rc = -ENODEV; goto err_rel_reg; } dev_set_drvdata(&chip->dev, priv); rc = tpm_chip_register(chip); if (rc) goto err_rel_reg; dev_dbg(&pdev->dev, "NSC TPM detected\n"); dev_dbg(&pdev->dev, "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20), tpm_read_index(nscAddrBase,0x27)); dev_dbg(&pdev->dev, "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25), tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28)); dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n", (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61)); dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n", (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63)); dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n", tpm_read_index(nscAddrBase,0x70)); dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n", tpm_read_index(nscAddrBase,0x71)); dev_dbg(&pdev->dev, "NSC DMA channel select0 0x%x, select1 0x%x\n", tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75)); dev_dbg(&pdev->dev, "NSC Config " "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1), tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3), tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5), tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7), tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9)); dev_info(&pdev->dev, "NSC TPM revision %d\n", tpm_read_index(nscAddrBase, 0x27) & 0x1F); return 0; err_rel_reg: release_region(base, 2); err_del_dev: platform_device_del(pdev); err_put_dev: platform_device_put(pdev); err_unreg_drv: platform_driver_unregister(&nsc_drv); return rc; } static void __exit cleanup_nsc(void) { if (pdev) { tpm_nsc_remove(&pdev->dev); platform_device_unregister(pdev); } platform_driver_unregister(&nsc_drv); } module_init(init_nsc); module_exit(cleanup_nsc); MODULE_AUTHOR("Leendert van Doorn ([email protected])"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_nsc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Infineon Technologies AG * Copyright (C) 2016 STMicroelectronics SAS * * Authors: * Peter Huewe <[email protected]> * Christophe Ricard <[email protected]> * * Maintained by: <[email protected]> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This device driver implements the TPM interface as defined in * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native * SPI access_. * * It is based on the original tpm_tis device driver from Leendert van * Dorn and Kyleen Hall and Jarko Sakkinnen. */ #include <linux/acpi.h> #include <linux/completion.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/spi/spi.h> #include <linux/tpm.h> #include "tpm.h" #include "tpm_tis_core.h" #include "tpm_tis_spi.h" #define MAX_SPI_FRAMESIZE 64 /* * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, * keep trying to read from the device until MISO goes high indicating the * wait state has ended. * * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ */ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy, struct spi_transfer *spi_xfer) { struct spi_message m; int ret, i; if ((phy->iobuf[3] & 0x01) == 0) { // handle SPI wait states for (i = 0; i < TPM_RETRY; i++) { spi_xfer->len = 1; spi_message_init(&m); spi_message_add_tail(spi_xfer, &m); ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) return ret; if (phy->iobuf[0] & 0x01) break; } if (i == TPM_RETRY) return -ETIMEDOUT; } return 0; } /* * Half duplex controller with support for TPM wait state detection like * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow * control. Each phase sent in different transfer for controller to idenity * phase. */ static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr, u16 len, u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); struct spi_transfer spi_xfer[3]; struct spi_message m; u8 transfer_len; int ret; while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); spi_message_init(&m); phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); phy->iobuf[1] = 0xd4; phy->iobuf[2] = addr >> 8; phy->iobuf[3] = addr; memset(&spi_xfer, 0, sizeof(spi_xfer)); spi_xfer[0].tx_buf = phy->iobuf; spi_xfer[0].len = 1; spi_message_add_tail(&spi_xfer[0], &m); spi_xfer[1].tx_buf = phy->iobuf + 1; spi_xfer[1].len = 3; spi_message_add_tail(&spi_xfer[1], &m); if (out) { spi_xfer[2].tx_buf = &phy->iobuf[4]; spi_xfer[2].rx_buf = NULL; memcpy(&phy->iobuf[4], out, transfer_len); out += transfer_len; } if (in) { spi_xfer[2].tx_buf = NULL; spi_xfer[2].rx_buf = &phy->iobuf[4]; } spi_xfer[2].len = transfer_len; spi_message_add_tail(&spi_xfer[2], &m); reinit_completion(&phy->ready); ret = spi_sync(phy->spi_device, &m); if (ret < 0) return ret; if (in) { memcpy(in, &phy->iobuf[4], transfer_len); in += transfer_len; } len -= transfer_len; } return ret; } static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr, u16 len, u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); int ret = 0; struct spi_message m; struct spi_transfer spi_xfer; u8 transfer_len; spi_bus_lock(phy->spi_device->master); while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); phy->iobuf[1] = 0xd4; phy->iobuf[2] = addr >> 8; phy->iobuf[3] = addr; memset(&spi_xfer, 0, sizeof(spi_xfer)); spi_xfer.tx_buf = phy->iobuf; spi_xfer.rx_buf = phy->iobuf; spi_xfer.len = 4; spi_xfer.cs_change = 1; spi_message_init(&m); spi_message_add_tail(&spi_xfer, &m); ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) goto exit; /* Flow control transfers are receive only */ spi_xfer.tx_buf = NULL; ret = phy->flow_control(phy, &spi_xfer); if (ret < 0) goto exit; spi_xfer.cs_change = 0; spi_xfer.len = transfer_len; spi_xfer.delay.value = 5; spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; if (out) { spi_xfer.tx_buf = phy->iobuf; spi_xfer.rx_buf = NULL; memcpy(phy->iobuf, out, transfer_len); out += transfer_len; } spi_message_init(&m); spi_message_add_tail(&spi_xfer, &m); reinit_completion(&phy->ready); ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) goto exit; if (in) { memcpy(in, phy->iobuf, transfer_len); in += transfer_len; } len -= transfer_len; } exit: if (ret < 0) { /* Deactivate chip select */ memset(&spi_xfer, 0, sizeof(spi_xfer)); spi_message_init(&m); spi_message_add_tail(&spi_xfer, &m); spi_sync_locked(phy->spi_device, &m); } spi_bus_unlock(phy->spi_device->master); return ret; } int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); struct spi_controller *ctlr = phy->spi_device->controller; /* * TPM flow control over SPI requires full duplex support. * Send entire message to a half duplex controller to handle * wait polling in controller. * Set TPM HW flow control flag.. */ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) return tpm_tis_spi_transfer_half(data, addr, len, in, out); else return tpm_tis_spi_transfer_full(data, addr, len, in, out); } static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) { return tpm_tis_spi_transfer(data, addr, len, NULL, value); } int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, int irq, const struct tpm_tis_phy_ops *phy_ops) { phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); if (!phy->iobuf) return -ENOMEM; phy->spi_device = spi; return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL); } static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { .read_bytes = tpm_tis_spi_read_bytes, .write_bytes = tpm_tis_spi_write_bytes, }; static int tpm_tis_spi_probe(struct spi_device *dev) { struct tpm_tis_spi_phy *phy; int irq; phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->flow_control = tpm_tis_spi_flow_control; if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) dev->mode |= SPI_TPM_HW_FLOW; /* If the SPI device has an IRQ then use that */ if (dev->irq > 0) irq = dev->irq; else irq = -1; init_completion(&phy->ready); return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops); } typedef int (*tpm_tis_spi_probe_func)(struct spi_device *); static int tpm_tis_spi_driver_probe(struct spi_device *spi) { const struct spi_device_id *spi_dev_id = spi_get_device_id(spi); tpm_tis_spi_probe_func probe_func; probe_func = of_device_get_match_data(&spi->dev); if (!probe_func) { if (spi_dev_id) { probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data; if (!probe_func) return -ENODEV; } else probe_func = tpm_tis_spi_probe; } return probe_func(spi); } static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume); static void tpm_tis_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); } static const struct spi_device_id tpm_tis_spi_id[] = { { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe }, { "cr50", (unsigned long)cr50_spi_probe }, {} }; MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); static const struct of_device_id of_tis_spi_match[] __maybe_unused = { { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe }, { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe }, { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe }, { .compatible = "google,cr50", .data = cr50_spi_probe }, {} }; MODULE_DEVICE_TABLE(of, of_tis_spi_match); static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = { {"SMO0768", 0}, {} }; MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match); static struct spi_driver tpm_tis_spi_driver = { .driver = { .name = "tpm_tis_spi", .pm = &tpm_tis_pm, .of_match_table = of_match_ptr(of_tis_spi_match), .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = tpm_tis_spi_driver_probe, .remove = tpm_tis_spi_remove, .id_table = tpm_tis_spi_id, }; module_spi_driver(tpm_tis_spi_driver); MODULE_DESCRIPTION("TPM Driver for native SPI access"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/tpm_tis_spi_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * STMicroelectronics TPM Linux driver for TPM ST33ZP24 * Copyright (C) 2009 - 2016 STMicroelectronics */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/freezer.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/gpio/consumer.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/slab.h> #include "../tpm.h" #include "st33zp24.h" #define TPM_ACCESS 0x0 #define TPM_STS 0x18 #define TPM_DATA_FIFO 0x24 #define TPM_INTF_CAPABILITY 0x14 #define TPM_INT_STATUS 0x10 #define TPM_INT_ENABLE 0x08 #define LOCALITY0 0 enum st33zp24_access { TPM_ACCESS_VALID = 0x80, TPM_ACCESS_ACTIVE_LOCALITY = 0x20, TPM_ACCESS_REQUEST_PENDING = 0x04, TPM_ACCESS_REQUEST_USE = 0x02, }; enum st33zp24_status { TPM_STS_VALID = 0x80, TPM_STS_COMMAND_READY = 0x40, TPM_STS_GO = 0x20, TPM_STS_DATA_AVAIL = 0x10, TPM_STS_DATA_EXPECT = 0x08, }; enum st33zp24_int_flags { TPM_GLOBAL_INT_ENABLE = 0x80, TPM_INTF_CMD_READY_INT = 0x080, TPM_INTF_FIFO_AVALAIBLE_INT = 0x040, TPM_INTF_WAKE_UP_READY_INT = 0x020, TPM_INTF_LOCALITY_CHANGE_INT = 0x004, TPM_INTF_STS_VALID_INT = 0x002, TPM_INTF_DATA_AVAIL_INT = 0x001, }; enum tis_defaults { TIS_SHORT_TIMEOUT = 750, TIS_LONG_TIMEOUT = 2000, }; /* * clear the pending interrupt. */ static u8 clear_interruption(struct st33zp24_dev *tpm_dev) { u8 interrupt; tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1); tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1); return interrupt; } /* * cancel the current command execution or set STS to COMMAND READY. */ static void st33zp24_cancel(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; data = TPM_STS_COMMAND_READY; tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); } /* * return the TPM_STS register */ static u8 st33zp24_status(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1); return data; } /* * if the locality is active */ static bool check_locality(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; u8 status; status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1); if (status && (data & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) return true; return false; } static int request_locality(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; long ret; u8 data; if (check_locality(chip)) return tpm_dev->locality; data = TPM_ACCESS_REQUEST_USE; ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); if (ret < 0) return ret; stop = jiffies + chip->timeout_a; /* Request locality is usually effective after the request */ do { if (check_locality(chip)) return tpm_dev->locality; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); /* could not get locality */ return -EACCES; } static void release_locality(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u8 data; data = TPM_ACCESS_ACTIVE_LOCALITY; tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1); } /* * get_burstcount return the burstcount value */ static int get_burstcount(struct tpm_chip *chip) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; int burstcnt, status; u8 temp; stop = jiffies + chip->timeout_d; do { status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1, &temp, 1); if (status < 0) return -EBUSY; burstcnt = temp; status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2, &temp, 1); if (status < 0) return -EBUSY; burstcnt |= temp << 8; if (burstcnt) return burstcnt; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -EBUSY; } static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, bool *canceled) { u8 status = chip->ops->status(chip); *canceled = false; if ((status & mask) == mask) return true; if (check_cancel && chip->ops->req_canceled(chip, status)) { *canceled = true; return true; } return false; } /* * wait for a TPM_STS value */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; int ret = 0; bool canceled = false; bool condition; u32 cur_intrs; u8 status; /* check current status */ status = st33zp24_status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->flags & TPM_CHIP_FLAG_IRQ) { cur_intrs = tpm_dev->intrs; clear_interruption(tpm_dev); enable_irq(tpm_dev->irq); do { if (ret == -ERESTARTSYS && freezing(current)) clear_thread_flag(TIF_SIGPENDING); timeout = stop - jiffies; if ((long) timeout <= 0) return -1; ret = wait_event_interruptible_timeout(*queue, cur_intrs != tpm_dev->intrs, timeout); clear_interruption(tpm_dev); condition = wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled); if (ret >= 0 && condition) { if (canceled) return -ECANCELED; return 0; } } while (ret == -ERESTARTSYS && freezing(current)); disable_irq_nosync(tpm_dev->irq); } else { do { msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; } static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); int size = 0, burstcnt, len, ret; while (size < count && wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, chip->timeout_c, &tpm_dev->read_queue, true) == 0) { burstcnt = get_burstcount(chip); if (burstcnt < 0) return burstcnt; len = min_t(int, burstcnt, count - size); ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO, buf + size, len); if (ret < 0) return ret; size += len; } return size; } static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) { struct tpm_chip *chip = dev_id; struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); tpm_dev->intrs++; wake_up_interruptible(&tpm_dev->read_queue); disable_irq_nosync(tpm_dev->irq); return IRQ_HANDLED; } /* * send TPM commands through the I2C bus. */ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, size_t len) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); u32 status, i, size, ordinal; int burstcnt = 0; int ret; u8 data; if (len < TPM_HEADER_SIZE) return -EBUSY; ret = request_locality(chip); if (ret < 0) return ret; status = st33zp24_status(chip); if ((status & TPM_STS_COMMAND_READY) == 0) { st33zp24_cancel(chip); if (wait_for_stat (chip, TPM_STS_COMMAND_READY, chip->timeout_b, &tpm_dev->read_queue, false) < 0) { ret = -ETIME; goto out_err; } } for (i = 0; i < len - 1;) { burstcnt = get_burstcount(chip); if (burstcnt < 0) return burstcnt; size = min_t(int, len - i - 1, burstcnt); ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO, buf + i, size); if (ret < 0) goto out_err; i += size; } status = st33zp24_status(chip); if ((status & TPM_STS_DATA_EXPECT) == 0) { ret = -EIO; goto out_err; } ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO, buf + len - 1, 1); if (ret < 0) goto out_err; status = st33zp24_status(chip); if ((status & TPM_STS_DATA_EXPECT) != 0) { ret = -EIO; goto out_err; } data = TPM_STS_GO; ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1); if (ret < 0) goto out_err; if (chip->flags & TPM_CHIP_FLAG_IRQ) { ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, tpm_calc_ordinal_duration(chip, ordinal), &tpm_dev->read_queue, false); if (ret < 0) goto out_err; } return 0; out_err: st33zp24_cancel(chip); release_locality(chip); return ret; } static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, size_t count) { int size = 0; u32 expected; if (!chip) return -EBUSY; if (count < TPM_HEADER_SIZE) { size = -EIO; goto out; } size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { dev_err(&chip->dev, "Unable to read header\n"); goto out; } expected = be32_to_cpu(*(__be32 *)(buf + 2)); if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; } out: st33zp24_cancel(chip); release_locality(chip); return size; } static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status) { return (status == TPM_STS_COMMAND_READY); } static const struct tpm_class_ops st33zp24_tpm = { .flags = TPM_OPS_AUTO_STARTUP, .send = st33zp24_send, .recv = st33zp24_recv, .cancel = st33zp24_cancel, .status = st33zp24_status, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = st33zp24_req_canceled, }; static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false }; static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = { { "lpcpd-gpios", &lpcpd_gpios, 1 }, { }, }; /* * initialize the TPM device */ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops, struct device *dev, int irq) { int ret; u8 intmask = 0; struct tpm_chip *chip; struct st33zp24_dev *tpm_dev; chip = tpmm_chip_alloc(dev, &st33zp24_tpm); if (IS_ERR(chip)) return PTR_ERR(chip); tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev), GFP_KERNEL); if (!tpm_dev) return -ENOMEM; tpm_dev->phy_id = phy_id; tpm_dev->ops = ops; dev_set_drvdata(&chip->dev, tpm_dev); chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); tpm_dev->locality = LOCALITY0; if (ACPI_COMPANION(dev)) { ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios); if (ret) return ret; } /* * Get LPCPD GPIO. If lpcpd pin is not specified. This is not an * issue as power management can be also managed by TPM specific * commands. */ tpm_dev->io_lpcpd = devm_gpiod_get_optional(dev, "lpcpd", GPIOD_OUT_HIGH); ret = PTR_ERR_OR_ZERO(tpm_dev->io_lpcpd); if (ret) { dev_err(dev, "failed to request lpcpd gpio: %d\n", ret); return ret; } if (irq) { /* INTERRUPT Setup */ init_waitqueue_head(&tpm_dev->read_queue); tpm_dev->intrs = 0; if (request_locality(chip) != LOCALITY0) { ret = -ENODEV; goto _tpm_clean_answer; } clear_interruption(tpm_dev); ret = devm_request_irq(dev, irq, tpm_ioserirq_handler, IRQF_TRIGGER_HIGH, "TPM SERIRQ management", chip); if (ret < 0) { dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n", irq); goto _tpm_clean_answer; } intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_DATA_AVAIL_INT; ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE, &intmask, 1); if (ret < 0) goto _tpm_clean_answer; intmask = TPM_GLOBAL_INT_ENABLE; ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3), &intmask, 1); if (ret < 0) goto _tpm_clean_answer; tpm_dev->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; disable_irq_nosync(tpm_dev->irq); } return tpm_chip_register(chip); _tpm_clean_answer: dev_info(&chip->dev, "TPM initialization fail\n"); return ret; } EXPORT_SYMBOL(st33zp24_probe); void st33zp24_remove(struct tpm_chip *chip) { tpm_chip_unregister(chip); } EXPORT_SYMBOL(st33zp24_remove); #ifdef CONFIG_PM_SLEEP int st33zp24_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); int ret = 0; if (tpm_dev->io_lpcpd) gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 0); else ret = tpm_pm_suspend(dev); return ret; } EXPORT_SYMBOL(st33zp24_pm_suspend); int st33zp24_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); int ret = 0; if (tpm_dev->io_lpcpd) { gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 1); ret = wait_for_stat(chip, TPM_STS_VALID, chip->timeout_b, &tpm_dev->read_queue, false); } else { ret = tpm_pm_resume(dev); if (!ret) tpm1_do_selftest(chip); } return ret; } EXPORT_SYMBOL(st33zp24_pm_resume); #endif MODULE_AUTHOR("TPM support ([email protected])"); MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/st33zp24/st33zp24.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24 * Copyright (C) 2009 - 2016 STMicroelectronics */ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/of.h> #include <linux/acpi.h> #include <linux/tpm.h> #include "../tpm.h" #include "st33zp24.h" #define TPM_DATA_FIFO 0x24 #define TPM_INTF_CAPABILITY 0x14 #define TPM_DUMMY_BYTE 0x00 #define MAX_SPI_LATENCY 15 #define LOCALITY0 0 #define ST33ZP24_OK 0x5A #define ST33ZP24_UNDEFINED_ERR 0x80 #define ST33ZP24_BADLOCALITY 0x81 #define ST33ZP24_TISREGISTER_UNKNOWN 0x82 #define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83 #define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84 #define ST33ZP24_BAD_COMMAND_ORDER 0x85 #define ST33ZP24_INCORECT_RECEIVED_LENGTH 0x86 #define ST33ZP24_TPM_FIFO_OVERFLOW 0x89 #define ST33ZP24_UNEXPECTED_READ_FIFO 0x8A #define ST33ZP24_UNEXPECTED_WRITE_FIFO 0x8B #define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END 0x90 #define ST33ZP24_DUMMY_BYTES 0x00 /* * TPM command can be up to 2048 byte, A TPM response can be up to * 1024 byte. * Between command and response, there are latency byte (up to 15 * usually on st33zp24 2 are enough). * * Overall when sending a command and expecting an answer we need if * worst case: * 2048 (for the TPM command) + 1024 (for the TPM answer). We need * some latency byte before the answer is available (max 15). * We have 2048 + 1024 + 15. */ #define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\ MAX_SPI_LATENCY) struct st33zp24_spi_phy { struct spi_device *spi_device; u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE]; u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE]; int latency; }; static int st33zp24_status_to_errno(u8 code) { switch (code) { case ST33ZP24_OK: return 0; case ST33ZP24_UNDEFINED_ERR: case ST33ZP24_BADLOCALITY: case ST33ZP24_TISREGISTER_UNKNOWN: case ST33ZP24_LOCALITY_NOT_ACTIVATED: case ST33ZP24_HASH_END_BEFORE_HASH_START: case ST33ZP24_BAD_COMMAND_ORDER: case ST33ZP24_UNEXPECTED_READ_FIFO: case ST33ZP24_UNEXPECTED_WRITE_FIFO: case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END: return -EPROTO; case ST33ZP24_INCORECT_RECEIVED_LENGTH: case ST33ZP24_TPM_FIFO_OVERFLOW: return -EMSGSIZE; case ST33ZP24_DUMMY_BYTES: return -ENOSYS; } return code; } /* * st33zp24_spi_send * Send byte to the TIS register according to the ST33ZP24 SPI protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be written * @param: tpm_data, the tpm_data to write inside the tpm_register * @param: tpm_size, The length of the data * @return: should be zero if success else a negative error code. */ static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { int total_length = 0, ret = 0; struct st33zp24_spi_phy *phy = phy_id; struct spi_device *dev = phy->spi_device; struct spi_transfer spi_xfer = { .tx_buf = phy->tx_buf, .rx_buf = phy->rx_buf, }; /* Pre-Header */ phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0; phy->tx_buf[total_length++] = tpm_register; if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) { phy->tx_buf[total_length++] = tpm_size >> 8; phy->tx_buf[total_length++] = tpm_size; } memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size); total_length += tpm_size; memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency); spi_xfer.len = total_length + phy->latency; ret = spi_sync_transfer(dev, &spi_xfer, 1); if (ret == 0) ret = phy->rx_buf[total_length + phy->latency - 1]; return st33zp24_status_to_errno(ret); } /* st33zp24_spi_send() */ /* * st33zp24_spi_read8_recv * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: should be zero if success else a negative error code. */ static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { int total_length = 0, ret; struct st33zp24_spi_phy *phy = phy_id; struct spi_device *dev = phy->spi_device; struct spi_transfer spi_xfer = { .tx_buf = phy->tx_buf, .rx_buf = phy->rx_buf, }; /* Pre-Header */ phy->tx_buf[total_length++] = LOCALITY0; phy->tx_buf[total_length++] = tpm_register; memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency + tpm_size); spi_xfer.len = total_length + phy->latency + tpm_size; /* header + status byte + size of the data + status byte */ ret = spi_sync_transfer(dev, &spi_xfer, 1); if (tpm_size > 0 && ret == 0) { ret = phy->rx_buf[total_length + phy->latency - 1]; memcpy(tpm_data, phy->rx_buf + total_length + phy->latency, tpm_size); } return ret; } /* st33zp24_spi_read8_reg() */ /* * st33zp24_spi_recv * Recv byte from the TIS register according to the ST33ZP24 SPI protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: number of byte read successfully: should be one if success. */ static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { int ret; ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size); if (!st33zp24_status_to_errno(ret)) return tpm_size; return ret; } /* st33zp24_spi_recv() */ static int st33zp24_spi_evaluate_latency(void *phy_id) { struct st33zp24_spi_phy *phy = phy_id; int latency = 1, status = 0; u8 data = 0; while (!status && latency < MAX_SPI_LATENCY) { phy->latency = latency; status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1); latency++; } if (status < 0) return status; if (latency == MAX_SPI_LATENCY) return -ENODEV; return latency - 1; } /* evaluate_latency() */ static const struct st33zp24_phy_ops spi_phy_ops = { .send = st33zp24_spi_send, .recv = st33zp24_spi_recv, }; /* * st33zp24_spi_probe initialize the TPM device * @param: dev, the spi_device description (TPM SPI description). * @return: 0 in case of success. * or a negative value describing the error. */ static int st33zp24_spi_probe(struct spi_device *dev) { struct st33zp24_spi_phy *phy; phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->spi_device = dev; phy->latency = st33zp24_spi_evaluate_latency(phy); if (phy->latency <= 0) return -ENODEV; return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq); } /* * st33zp24_spi_remove remove the TPM device * @param: client, the spi_device description (TPM SPI description). * @return: 0 in case of success. */ static void st33zp24_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); st33zp24_remove(chip); } static const struct spi_device_id st33zp24_spi_id[] = { {TPM_ST33_SPI, 0}, {} }; MODULE_DEVICE_TABLE(spi, st33zp24_spi_id); static const struct of_device_id of_st33zp24_spi_match[] __maybe_unused = { { .compatible = "st,st33zp24-spi", }, {} }; MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match); static const struct acpi_device_id st33zp24_spi_acpi_match[] __maybe_unused = { {"SMO3324"}, {} }; MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match); static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend, st33zp24_pm_resume); static struct spi_driver st33zp24_spi_driver = { .driver = { .name = "st33zp24-spi", .pm = &st33zp24_spi_ops, .of_match_table = of_match_ptr(of_st33zp24_spi_match), .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match), }, .probe = st33zp24_spi_probe, .remove = st33zp24_spi_remove, .id_table = st33zp24_spi_id, }; module_spi_driver(st33zp24_spi_driver); MODULE_AUTHOR("TPM support ([email protected])"); MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/st33zp24/spi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 * Copyright (C) 2009 - 2016 STMicroelectronics */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/of.h> #include <linux/acpi.h> #include <linux/tpm.h> #include "../tpm.h" #include "st33zp24.h" #define TPM_DUMMY_BYTE 0xAA struct st33zp24_i2c_phy { struct i2c_client *client; u8 buf[ST33ZP24_BUFSIZE + 1]; }; /* * write8_reg * Send byte to the TIS register according to the ST33ZP24 I2C protocol. * @param: tpm_register, the tpm tis register where the data should be written * @param: tpm_data, the tpm_data to write inside the tpm_register * @param: tpm_size, The length of the data * @return: Returns negative errno, or else the number of bytes written. */ static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { struct st33zp24_i2c_phy *phy = phy_id; phy->buf[0] = tpm_register; memcpy(phy->buf + 1, tpm_data, tpm_size); return i2c_master_send(phy->client, phy->buf, tpm_size + 1); } /* write8_reg() */ /* * read8_reg * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: number of byte read successfully: should be one if success. */ static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { struct st33zp24_i2c_phy *phy = phy_id; u8 status = 0; u8 data; data = TPM_DUMMY_BYTE; status = write8_reg(phy, tpm_register, &data, 1); if (status == 2) status = i2c_master_recv(phy->client, tpm_data, tpm_size); return status; } /* read8_reg() */ /* * st33zp24_i2c_send * Send byte to the TIS register according to the ST33ZP24 I2C protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be written * @param: tpm_data, the tpm_data to write inside the tpm_register * @param: tpm_size, the length of the data * @return: number of byte written successfully: should be one if success. */ static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data, tpm_size); } /* * st33zp24_i2c_recv * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. * @param: phy_id, the phy description * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: number of byte read successfully: should be one if success. */ static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size) { return read8_reg(phy_id, tpm_register, tpm_data, tpm_size); } static const struct st33zp24_phy_ops i2c_phy_ops = { .send = st33zp24_i2c_send, .recv = st33zp24_i2c_recv, }; /* * st33zp24_i2c_probe initialize the TPM device * @param: client, the i2c_client description (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. */ static int st33zp24_i2c_probe(struct i2c_client *client) { struct st33zp24_i2c_phy *phy; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_info(&client->dev, "client not i2c capable\n"); return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->client = client; return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq); } /* * st33zp24_i2c_remove remove the TPM device * @param: client, the i2c_client description (TPM I2C description). * @return: 0 in case of success. */ static void st33zp24_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = i2c_get_clientdata(client); st33zp24_remove(chip); } static const struct i2c_device_id st33zp24_i2c_id[] = { {TPM_ST33_I2C, 0}, {} }; MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); static const struct of_device_id of_st33zp24_i2c_match[] __maybe_unused = { { .compatible = "st,st33zp24-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match); static const struct acpi_device_id st33zp24_i2c_acpi_match[] __maybe_unused = { {"SMO3324"}, {} }; MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match); static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend, st33zp24_pm_resume); static struct i2c_driver st33zp24_i2c_driver = { .driver = { .name = TPM_ST33_I2C, .pm = &st33zp24_i2c_ops, .of_match_table = of_match_ptr(of_st33zp24_i2c_match), .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match), }, .probe = st33zp24_i2c_probe, .remove = st33zp24_i2c_remove, .id_table = st33zp24_i2c_id }; module_i2c_driver(st33zp24_i2c_driver); MODULE_AUTHOR("TPM support ([email protected])"); MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver"); MODULE_VERSION("1.3.0"); MODULE_LICENSE("GPL");
linux-master
drivers/char/tpm/st33zp24/i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2005, 2012 IBM Corporation * * Authors: * Kent Yoder <[email protected]> * Seiji Munetoh <[email protected]> * Stefan Berger <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * Nayna Jain <[email protected]> * * Access to the event log created by a system's firmware / BIOS */ #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" static int tpm_bios_measurements_open(struct inode *inode, struct file *file) { int err; struct seq_file *seq; struct tpm_chip_seqops *chip_seqops; const struct seq_operations *seqops; struct tpm_chip *chip; inode_lock(inode); if (!inode->i_private) { inode_unlock(inode); return -ENODEV; } chip_seqops = inode->i_private; seqops = chip_seqops->seqops; chip = chip_seqops->chip; get_device(&chip->dev); inode_unlock(inode); /* now register seq file */ err = seq_open(file, seqops); if (!err) { seq = file->private_data; seq->private = chip; } return err; } static int tpm_bios_measurements_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct tpm_chip *chip = seq->private; put_device(&chip->dev); return seq_release(inode, file); } static const struct file_operations tpm_bios_measurements_ops = { .owner = THIS_MODULE, .open = tpm_bios_measurements_open, .read = seq_read, .llseek = seq_lseek, .release = tpm_bios_measurements_release, }; static int tpm_read_log(struct tpm_chip *chip) { int rc; if (chip->log.bios_event_log != NULL) { dev_dbg(&chip->dev, "%s: ERROR - event log already initialized\n", __func__); return -EFAULT; } rc = tpm_read_log_acpi(chip); if (rc != -ENODEV) return rc; rc = tpm_read_log_efi(chip); if (rc != -ENODEV) return rc; return tpm_read_log_of(chip); } /* * tpm_bios_log_setup() - Read the event log from the firmware * @chip: TPM chip to use. * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. */ void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; int log_version; int rc = 0; if (chip->flags & TPM_CHIP_FLAG_VIRTUAL) return; rc = tpm_read_log(chip); if (rc < 0) return; log_version = rc; cnt = 0; chip->bios_dir[cnt] = securityfs_create_dir(name, NULL); /* NOTE: securityfs_create_dir can return ENODEV if securityfs is * compiled out. The caller should ignore the ENODEV return code. */ if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; chip->bin_log_seqops.chip = chip; if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) chip->bin_log_seqops.seqops = &tpm2_binary_b_measurements_seqops; else chip->bin_log_seqops.seqops = &tpm1_binary_b_measurements_seqops; chip->bios_dir[cnt] = securityfs_create_file("binary_bios_measurements", 0440, chip->bios_dir[0], (void *)&chip->bin_log_seqops, &tpm_bios_measurements_ops); if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { chip->ascii_log_seqops.chip = chip; chip->ascii_log_seqops.seqops = &tpm1_ascii_b_measurements_seqops; chip->bios_dir[cnt] = securityfs_create_file("ascii_bios_measurements", 0440, chip->bios_dir[0], (void *)&chip->ascii_log_seqops, &tpm_bios_measurements_ops); if (IS_ERR(chip->bios_dir[cnt])) goto err; cnt++; } return; err: chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); return; } void tpm_bios_log_teardown(struct tpm_chip *chip) { int i; struct inode *inode; /* securityfs_remove currently doesn't take care of handling sync * between removal and opening of pseudo files. To handle this, a * workaround is added by making i_private = NULL here during removal * and to check it during open(), both within inode_lock()/unlock(). * This design ensures that open() either safely gets kref or fails. */ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { if (chip->bios_dir[i]) { inode = d_inode(chip->bios_dir[i]); inode_lock(inode); inode->i_private = NULL; inode_unlock(inode); securityfs_remove(chip->bios_dir[i]); } } }
linux-master
drivers/char/tpm/eventlog/common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 IBM Corporation * * Authors: * Nayna Jain <[email protected]> * * Access to TPM 2.0 event log as written by Firmware. * It assumes that writer of event log has followed TCG Specification * for Family "2.0" and written the event data in little endian. * With that, it doesn't need any endian conversion for structure * content. */ #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" /* * calc_tpm2_event_size() - calculate the event size, where event * is an entry in the TPM 2.0 event log. The event is of type Crypto * Agile Log Entry Format as defined in TCG EFI Protocol Specification * Family "2.0". * @event: event whose size is to be calculated. * @event_header: the first event in the event log. * * Returns size of the event. If it is an invalid event, returns 0. */ static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header) { return __calc_tpm2_event_size(event, event_header, false); } static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos) { struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *addr = log->bios_event_log; void *limit = log->bios_event_log_end; struct tcg_pcr_event *event_header; struct tcg_pcr_event2_head *event; size_t size; int i; event_header = addr; size = struct_size(event_header, event, event_header->event_size); if (*pos == 0) { if (addr + size < limit) { if ((event_header->event_type == 0) && (event_header->event_size == 0)) return NULL; return SEQ_START_TOKEN; } } if (*pos > 0) { addr += size; event = addr; size = calc_tpm2_event_size(event, event_header); if ((addr + size >= limit) || (size == 0)) return NULL; } for (i = 0; i < (*pos - 1); i++) { event = addr; size = calc_tpm2_event_size(event, event_header); if ((addr + size >= limit) || (size == 0)) return NULL; addr += size; } return addr; } static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, loff_t *pos) { struct tcg_pcr_event *event_header; struct tcg_pcr_event2_head *event; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *limit = log->bios_event_log_end; size_t event_size; void *marker; (*pos)++; event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { event_size = struct_size(event_header, event, event_header->event_size); marker = event_header; } else { event = v; event_size = calc_tpm2_event_size(event, event_header); if (event_size == 0) return NULL; marker = event; } marker = marker + event_size; if (marker >= limit) return NULL; v = marker; event = v; event_size = calc_tpm2_event_size(event, event_header); if (((v + event_size) >= limit) || (event_size == 0)) return NULL; return v; } static void tpm2_bios_measurements_stop(struct seq_file *m, void *v) { } static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v) { struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; struct tcg_pcr_event *event_header = log->bios_event_log; struct tcg_pcr_event2_head *event = v; void *temp_ptr; size_t size; if (v == SEQ_START_TOKEN) { size = struct_size(event_header, event, event_header->event_size); temp_ptr = event_header; if (size > 0) seq_write(m, temp_ptr, size); } else { size = calc_tpm2_event_size(event, event_header); temp_ptr = event; if (size > 0) seq_write(m, temp_ptr, size); } return 0; } const struct seq_operations tpm2_binary_b_measurements_seqops = { .start = tpm2_bios_measurements_start, .next = tpm2_bios_measurements_next, .stop = tpm2_bios_measurements_stop, .show = tpm2_binary_bios_measurements_show, };
linux-master
drivers/char/tpm/eventlog/tpm2.c