python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <[email protected]>
* Yaniv Kamay <[email protected]>
*/
#include <kvm/iodev.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/stat.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
#include "coalesced_mmio.h"
#include "async_pf.h"
#include "kvm_mm.h"
#include "vfio.h"
#include <trace/events/ipi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
#include <linux/kvm_dirty_ring.h>
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
/* Default resets per-vcpu halt_poll_ns . */
unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
/*
* Ordering of locks:
*
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
DEFINE_MUTEX(kvm_lock);
LIST_HEAD(vm_list);
static struct kmem_cache *kvm_vcpu_cache;
static __read_mostly struct preempt_ops kvm_preempt_ops;
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
struct dentry *kvm_debugfs_dir;
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
static const struct file_operations stat_fops_per_vm;
static struct file_operations kvm_chardev_ops;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#define KVM_COMPAT(c) .compat_ioctl = (c)
#else
/*
* For architectures that don't implement a compat infrastructure,
* adopt a double line of defense:
* - Prevent a compat task from opening /dev/kvm
* - If the open has been done by a 64bit task, and the KVM fd
* passed to a compat task, let the ioctls fail.
*/
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg) { return -EINVAL; }
static int kvm_no_compat_open(struct inode *inode, struct file *file)
{
return is_compat_task() ? -ENODEV : 0;
}
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
.open = kvm_no_compat_open
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
#define KVM_EVENT_CREATE_VM 0
#define KVM_EVENT_DESTROY_VM 1
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{
}
bool kvm_is_zone_device_page(struct page *page)
{
/*
* The metadata used by is_zone_device_page() to determine whether or
* not a page is ZONE_DEVICE is guaranteed to be valid if and only if
* the device has been pinned, e.g. by get_user_pages(). WARN if the
* page_count() is zero to help detect bad usage of this helper.
*/
if (WARN_ON_ONCE(!page_count(page)))
return false;
return is_zone_device_page(page);
}
/*
* Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
* page, NULL otherwise. Note, the list of refcounted PG_reserved page types
* is likely incomplete, it has been compiled purely through people wanting to
* back guest with a certain type of memory and encountering issues.
*/
struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
{
struct page *page;
if (!pfn_valid(pfn))
return NULL;
page = pfn_to_page(pfn);
if (!PageReserved(page))
return page;
/* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
if (is_zero_pfn(pfn))
return page;
/*
* ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
* perspective they are "normal" pages, albeit with slightly different
* usage rules.
*/
if (kvm_is_zone_device_page(page))
return page;
return NULL;
}
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
void vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu = get_cpu();
__this_cpu_write(kvm_running_vcpu, vcpu);
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
EXPORT_SYMBOL_GPL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
preempt_disable();
kvm_arch_vcpu_put(vcpu);
preempt_notifier_unregister(&vcpu->preempt_notifier);
__this_cpu_write(kvm_running_vcpu, NULL);
preempt_enable();
}
EXPORT_SYMBOL_GPL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
{
int mode = kvm_vcpu_exiting_guest_mode(vcpu);
/*
* We need to wait for the VCPU to reenable interrupts and get out of
* READING_SHADOW_PAGE_TABLES mode.
*/
if (req & KVM_REQUEST_WAIT)
return mode != OUTSIDE_GUEST_MODE;
/*
* Need to kick a running VCPU, but otherwise there is nothing to do.
*/
return mode == IN_GUEST_MODE;
}
static void ack_kick(void *_completed)
{
}
static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
{
if (cpumask_empty(cpus))
return false;
smp_call_function_many(cpus, ack_kick, NULL, wait);
return true;
}
static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
struct cpumask *tmp, int current_cpu)
{
int cpu;
if (likely(!(req & KVM_REQUEST_NO_ACTION)))
__kvm_make_request(req, vcpu);
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
return;
/*
* Note, the vCPU could get migrated to a different pCPU at any point
* after kvm_request_needs_ipi(), which could result in sending an IPI
* to the previous pCPU. But, that's OK because the purpose of the IPI
* is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
* satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
* after this point is also OK, as the requirement is only that KVM wait
* for vCPUs that were reading SPTEs _before_ any changes were
* finalized. See kvm_vcpu_kick() for more details on handling requests.
*/
if (kvm_request_needs_ipi(vcpu, req)) {
cpu = READ_ONCE(vcpu->cpu);
if (cpu != -1 && cpu != current_cpu)
__cpumask_set_cpu(cpu, tmp);
}
}
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap)
{
struct kvm_vcpu *vcpu;
struct cpumask *cpus;
int i, me;
bool called;
me = get_cpu();
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
cpumask_clear(cpus);
for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
vcpu = kvm_get_vcpu(kvm, i);
if (!vcpu)
continue;
kvm_make_vcpu_request(vcpu, req, cpus, me);
}
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
}
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except)
{
struct kvm_vcpu *vcpu;
struct cpumask *cpus;
unsigned long i;
bool called;
int me;
me = get_cpu();
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
cpumask_clear(cpus);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu == except)
continue;
kvm_make_vcpu_request(vcpu, req, cpus, me);
}
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
}
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
return kvm_make_all_cpus_request_except(kvm, req, NULL);
}
EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
++kvm->stat.generic.remote_tlb_flush_requests;
/*
* We want to publish modifications to the page tables before reading
* mode. Pairs with a memory barrier in arch-specific code.
* - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
* and smp_mb in walk_shadow_page_lockless_begin/end.
* - powerpc: smp_mb in kvmppc_prepare_to_enter.
*
* There is already an smp_mb__after_atomic() before
* kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
* barrier here.
*/
if (!kvm_arch_flush_remote_tlbs(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
return;
/*
* Fall back to a flushing entire TLBs if the architecture range-based
* TLB invalidation is unsupported or can't be performed for whatever
* reason.
*/
kvm_flush_remote_tlbs(kvm);
}
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
/*
* All current use cases for flushing the TLBs for a specific memslot
* are related to dirty logging, and many do the TLB flush out of
* mmu_lock. The interaction between the various operations on memslot
* must be serialized by slots_locks to ensure the TLB flush from one
* operation is observed by any other operation on the same memslot.
*/
lockdep_assert_held(&kvm->slots_lock);
kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
}
static void kvm_flush_shadow_all(struct kvm *kvm)
{
kvm_arch_flush_shadow_all(kvm);
kvm_arch_guest_memory_reclaimed(kvm);
}
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{
gfp_flags |= mc->gfp_zero;
if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
else
return (void *)__get_free_page(gfp_flags);
}
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
{
gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
void *obj;
if (mc->nobjs >= min)
return 0;
if (unlikely(!mc->objects)) {
if (WARN_ON_ONCE(!capacity))
return -EIO;
mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
if (!mc->objects)
return -ENOMEM;
mc->capacity = capacity;
}
/* It is illegal to request a different capacity across topups. */
if (WARN_ON_ONCE(mc->capacity != capacity))
return -EIO;
while (mc->nobjs < mc->capacity) {
obj = mmu_memory_cache_alloc_obj(mc, gfp);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
}
return 0;
}
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
}
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
{
return mc->nobjs;
}
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs) {
if (mc->kmem_cache)
kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
else
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
kvfree(mc->objects);
mc->objects = NULL;
mc->capacity = 0;
}
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
if (WARN_ON(!mc->nobjs))
p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
else
p = mc->objects[--mc->nobjs];
BUG_ON(!p);
return p;
}
#endif
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
#ifndef __KVM_HAVE_ARCH_WQP
rcuwait_init(&vcpu->wait);
#endif
kvm_async_pf_vcpu_init(vcpu);
kvm_vcpu_set_in_spin_loop(vcpu, false);
kvm_vcpu_set_dy_eligible(vcpu, false);
vcpu->preempted = false;
vcpu->ready = false;
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
vcpu->last_used_slot = NULL;
/* Fill the stats id string for the vcpu */
snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
task_pid_nr(current), id);
}
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kvm_arch_vcpu_destroy(vcpu);
kvm_dirty_ring_free(&vcpu->dirty_ring);
/*
* No need for rcu_read_lock as VCPU_RUN is the only place that changes
* the vcpu->pid pointer, and at destruction time all file descriptors
* are already gone.
*/
put_pid(rcu_dereference_protected(vcpu->pid, 1));
free_page((unsigned long)vcpu->run);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
void kvm_destroy_vcpus(struct kvm *kvm)
{
unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu);
xa_erase(&kvm->vcpu_array, i);
}
atomic_set(&kvm->online_vcpus, 0);
}
EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
return container_of(mn, struct kvm, mmu_notifier);
}
typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
unsigned long end);
typedef void (*on_unlock_fn_t)(struct kvm *kvm);
struct kvm_hva_range {
unsigned long start;
unsigned long end;
union kvm_mmu_notifier_arg arg;
hva_handler_t handler;
on_lock_fn_t on_lock;
on_unlock_fn_t on_unlock;
bool flush_on_ret;
bool may_block;
};
/*
* Use a dedicated stub instead of NULL to indicate that there is no callback
* function/handler. The compiler technically can't guarantee that a real
* function will have a non-zero address, and so it will generate code to
* check for !NULL, whereas comparing against a stub will be elided at compile
* time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
*/
static void kvm_null_fn(void)
{
}
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
/* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
node; \
node = interval_tree_iter_next(node, start, last)) \
static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
const struct kvm_hva_range *range)
{
bool ret = false, locked = false;
struct kvm_gfn_range gfn_range;
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
int i, idx;
if (WARN_ON_ONCE(range->end <= range->start))
return 0;
/* A null handler is allowed if and only if on_lock() is provided. */
if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
IS_KVM_NULL_FN(range->handler)))
return 0;
idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct interval_tree_node *node;
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot_in_hva_range(node, slots,
range->start, range->end - 1) {
unsigned long hva_start, hva_end;
slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
hva_start = max(range->start, slot->userspace_addr);
hva_end = min(range->end, slot->userspace_addr +
(slot->npages << PAGE_SHIFT));
/*
* To optimize for the likely case where the address
* range is covered by zero or one memslots, don't
* bother making these conditional (to avoid writes on
* the second or later invocation of the handler).
*/
gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
*/
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
gfn_range.slot = slot;
if (!locked) {
locked = true;
KVM_MMU_LOCK(kvm);
if (!IS_KVM_NULL_FN(range->on_lock))
range->on_lock(kvm, range->start, range->end);
if (IS_KVM_NULL_FN(range->handler))
break;
}
ret |= range->handler(kvm, &gfn_range);
}
}
if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
if (locked) {
KVM_MMU_UNLOCK(kvm);
if (!IS_KVM_NULL_FN(range->on_unlock))
range->on_unlock(kvm);
}
srcu_read_unlock(&kvm->srcu, idx);
/* The notifiers are averse to booleans. :-( */
return (int)ret;
}
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
union kvm_mmu_notifier_arg arg,
hva_handler_t handler)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range range = {
.start = start,
.end = end,
.arg = arg,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn,
.flush_on_ret = true,
.may_block = false,
};
return __kvm_handle_hva_range(kvm, &range);
}
static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
hva_handler_t handler)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range range = {
.start = start,
.end = end,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = false,
};
return __kvm_handle_hva_range(kvm, &range);
}
static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/*
* Skipping invalid memslots is correct if and only change_pte() is
* surrounded by invalidate_range_{start,end}(), which is currently
* guaranteed by the primary MMU. If that ever changes, KVM needs to
* unmap the memslot instead of skipping the memslot to ensure that KVM
* doesn't hold references to the old PFN.
*/
WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
if (range->slot->flags & KVM_MEMSLOT_INVALID)
return false;
return kvm_set_spte_gfn(kvm, range);
}
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const union kvm_mmu_notifier_arg arg = { .pte = pte };
trace_kvm_set_spte_hva(address);
/*
* .change_pte() must be surrounded by .invalidate_range_{start,end}().
* If mmu_invalidate_in_progress is zero, then no in-progress
* invalidations, including this one, found a relevant memslot at
* start(); rechecking memslots here is unnecessary. Note, a false
* positive (count elevated by a different invalidation) is sub-optimal
* but functionally ok.
*/
WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
return;
kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
}
void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
unsigned long end)
{
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_invalidate_in_progress++;
if (likely(kvm->mmu_invalidate_in_progress == 1)) {
kvm->mmu_invalidate_range_start = start;
kvm->mmu_invalidate_range_end = end;
} else {
/*
* Fully tracking multiple concurrent ranges has diminishing
* returns. Keep things simple and just find the minimal range
* which includes the current and new ranges. As there won't be
* enough information to subtract a range after its invalidate
* completes, any ranges invalidated concurrently will
* accumulate and persist until all outstanding invalidates
* complete.
*/
kvm->mmu_invalidate_range_start =
min(kvm->mmu_invalidate_range_start, start);
kvm->mmu_invalidate_range_end =
max(kvm->mmu_invalidate_range_end, end);
}
}
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range hva_range = {
.start = range->start,
.end = range->end,
.handler = kvm_unmap_gfn_range,
.on_lock = kvm_mmu_invalidate_begin,
.on_unlock = kvm_arch_guest_memory_reclaimed,
.flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range),
};
trace_kvm_unmap_hva_range(range->start, range->end);
/*
* Prevent memslot modification between range_start() and range_end()
* so that conditionally locking provides the same result in both
* functions. Without that guarantee, the mmu_invalidate_in_progress
* adjustments will be imbalanced.
*
* Pairs with the decrement in range_end().
*/
spin_lock(&kvm->mn_invalidate_lock);
kvm->mn_active_invalidate_count++;
spin_unlock(&kvm->mn_invalidate_lock);
/*
* Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
* before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
* each cache's lock. There are relatively few caches in existence at
* any given time, and the caches themselves can check for hva overlap,
* i.e. don't need to rely on memslot overlap checks for performance.
* Because this runs without holding mmu_lock, the pfn caches must use
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
hva_range.may_block);
__kvm_handle_hva_range(kvm, &hva_range);
return 0;
}
void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
unsigned long end)
{
/*
* This sequence increase will notify the kvm page fault that
* the page that is going to be mapped in the spte could have
* been freed.
*/
kvm->mmu_invalidate_seq++;
smp_wmb();
/*
* The above sequence increase must be visible before the
* below count decrease, which is ensured by the smp_wmb above
* in conjunction with the smp_rmb in mmu_invalidate_retry().
*/
kvm->mmu_invalidate_in_progress--;
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range hva_range = {
.start = range->start,
.end = range->end,
.handler = (void *)kvm_null_fn,
.on_lock = kvm_mmu_invalidate_end,
.on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = mmu_notifier_range_blockable(range),
};
bool wake;
__kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
spin_lock(&kvm->mn_invalidate_lock);
wake = (--kvm->mn_active_invalidate_count == 0);
spin_unlock(&kvm->mn_invalidate_lock);
/*
* There can only be one waiter, since the wait happens under
* slots_lock.
*/
if (wake)
rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
BUG_ON(kvm->mmu_invalidate_in_progress < 0);
}
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
trace_kvm_age_hva(start, end);
return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
kvm_age_gfn);
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
trace_kvm_age_hva(start, end);
/*
* Even though we do not flush TLB, this will still adversely
* affect performance on pre-Haswell Intel EPT, where there is
* no EPT Access Bit to clear so that we have to tear down EPT
* tables instead. If we find this unacceptable, we can always
* add a parameter to kvm_age_hva so that it effectively doesn't
* do anything on clear_young.
*
* Also note that currently we never issue secondary TLB flushes
* from clear_young, leaving this job up to the regular system
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
trace_kvm_test_age_hva(address);
return kvm_handle_hva_range_no_flush(mn, address, address + 1,
kvm_test_age_gfn);
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
kvm_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx);
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.clear_young = kvm_mmu_notifier_clear_young,
.test_young = kvm_mmu_notifier_test_young,
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}
#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
return 0;
}
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
unsigned long state,
void *unused)
{
struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
return kvm_arch_pm_notifier(kvm, state);
}
static void kvm_init_pm_notifier(struct kvm *kvm)
{
kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
/* Suspend KVM before we suspend ftrace, RCU, etc. */
kvm->pm_notifier.priority = INT_MAX;
register_pm_notifier(&kvm->pm_notifier);
}
static void kvm_destroy_pm_notifier(struct kvm *kvm)
{
unregister_pm_notifier(&kvm->pm_notifier);
}
#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
static void kvm_init_pm_notifier(struct kvm *kvm)
{
}
static void kvm_destroy_pm_notifier(struct kvm *kvm)
{
}
#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
if (!memslot->dirty_bitmap)
return;
kvfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
/* This does not remove the slot from struct kvm_memslots data structures */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
kvm_destroy_dirty_bitmap(slot);
kvm_arch_free_memslot(kvm, slot);
kfree(slot);
}
static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{
struct hlist_node *idnode;
struct kvm_memory_slot *memslot;
int bkt;
/*
* The same memslot objects live in both active and inactive sets,
* arbitrarily free using index '1' so the second invocation of this
* function isn't operating over a structure with dangling pointers
* (even though this function isn't actually touching them).
*/
if (!slots->node_idx)
return;
hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
kvm_free_memslot(kvm, memslot);
}
static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
{
switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
case KVM_STATS_TYPE_INSTANT:
return 0444;
case KVM_STATS_TYPE_CUMULATIVE:
case KVM_STATS_TYPE_PEAK:
default:
return 0644;
}
}
static void kvm_destroy_vm_debugfs(struct kvm *kvm)
{
int i;
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
if (IS_ERR(kvm->debugfs_dentry))
return;
debugfs_remove_recursive(kvm->debugfs_dentry);
if (kvm->debugfs_stat_data) {
for (i = 0; i < kvm_debugfs_num_entries; i++)
kfree(kvm->debugfs_stat_data[i]);
kfree(kvm->debugfs_stat_data);
}
}
static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
{
static DEFINE_MUTEX(kvm_debugfs_lock);
struct dentry *dent;
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
const struct _kvm_stats_desc *pdesc;
int i, ret = -ENOMEM;
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
if (!debugfs_initialized())
return 0;
snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
mutex_lock(&kvm_debugfs_lock);
dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
if (dent) {
pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
dput(dent);
mutex_unlock(&kvm_debugfs_lock);
return 0;
}
dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
mutex_unlock(&kvm_debugfs_lock);
if (IS_ERR(dent))
return 0;
kvm->debugfs_dentry = dent;
kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
sizeof(*kvm->debugfs_stat_data),
GFP_KERNEL_ACCOUNT);
if (!kvm->debugfs_stat_data)
goto out_err;
for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
pdesc = &kvm_vm_stats_desc[i];
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
goto out_err;
stat_data->kvm = kvm;
stat_data->desc = pdesc;
stat_data->kind = KVM_STAT_VM;
kvm->debugfs_stat_data[i] = stat_data;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm->debugfs_dentry, stat_data,
&stat_fops_per_vm);
}
for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
pdesc = &kvm_vcpu_stats_desc[i];
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
goto out_err;
stat_data->kvm = kvm;
stat_data->desc = pdesc;
stat_data->kind = KVM_STAT_VCPU;
kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm->debugfs_dentry, stat_data,
&stat_fops_per_vm);
}
ret = kvm_arch_create_vm_debugfs(kvm);
if (ret)
goto out_err;
return 0;
out_err:
kvm_destroy_vm_debugfs(kvm);
return ret;
}
/*
* Called after the VM is otherwise initialized, but just before adding it to
* the vm_list.
*/
int __weak kvm_arch_post_init_vm(struct kvm *kvm)
{
return 0;
}
/*
* Called just after removing the VM from the vm_list, but before doing any
* other destruction.
*/
void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
{
}
/*
* Called after per-vm debugfs created. When called kvm->debugfs_dentry should
* be setup already, so we can create arch-specific debugfs entries under it.
* Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
* a per-arch destroy interface is not needed.
*/
int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
return 0;
}
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{
struct kvm *kvm = kvm_arch_alloc_vm();
struct kvm_memslots *slots;
int r = -ENOMEM;
int i, j;
if (!kvm)
return ERR_PTR(-ENOMEM);
/* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
__module_get(kvm_chardev_ops.owner);
KVM_MMU_LOCK_INIT(kvm);
mmgrab(current->mm);
kvm->mm = current->mm;
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
mutex_init(&kvm->slots_arch_lock);
spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
xa_init(&kvm->vcpu_array);
INIT_LIST_HEAD(&kvm->gpc_list);
spin_lock_init(&kvm->gpc_lock);
INIT_LIST_HEAD(&kvm->devices);
kvm->max_vcpus = KVM_MAX_VCPUS;
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
/*
* Force subsequent debugfs file creations to fail if the VM directory
* is not created (by kvm_create_vm_debugfs()).
*/
kvm->debugfs_dentry = ERR_PTR(-ENOENT);
snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
task_pid_nr(current));
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
if (init_srcu_struct(&kvm->irq_srcu))
goto out_err_no_irq_srcu;
refcount_set(&kvm->users_count, 1);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (j = 0; j < 2; j++) {
slots = &kvm->__memslots[i][j];
atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
slots->hva_tree = RB_ROOT_CACHED;
slots->gfn_tree = RB_ROOT;
hash_init(slots->id_hash);
slots->node_idx = j;
/* Generations must be different for each address space. */
slots->generation = i;
}
rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
}
for (i = 0; i < KVM_NR_BUSES; i++) {
rcu_assign_pointer(kvm->buses[i],
kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
if (!kvm->buses[i])
goto out_err_no_arch_destroy_vm;
}
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_arch_destroy_vm;
r = hardware_enable_all();
if (r)
goto out_err_no_disable;
#ifdef CONFIG_HAVE_KVM_IRQFD
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err_no_mmu_notifier;
r = kvm_coalesced_mmio_init(kvm);
if (r < 0)
goto out_no_coalesced_mmio;
r = kvm_create_vm_debugfs(kvm, fdname);
if (r)
goto out_err_no_debugfs;
r = kvm_arch_post_init_vm(kvm);
if (r)
goto out_err;
mutex_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
mutex_unlock(&kvm_lock);
preempt_notifier_inc();
kvm_init_pm_notifier(kvm);
return kvm;
out_err:
kvm_destroy_vm_debugfs(kvm);
out_err_no_debugfs:
kvm_coalesced_mmio_free(kvm);
out_no_coalesced_mmio:
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
if (kvm->mmu_notifier.ops)
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
#endif
out_err_no_mmu_notifier:
hardware_disable_all();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
out_err_no_arch_destroy_vm:
WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm_get_bus(kvm, i));
cleanup_srcu_struct(&kvm->irq_srcu);
out_err_no_irq_srcu:
cleanup_srcu_struct(&kvm->srcu);
out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
module_put(kvm_chardev_ops.owner);
return ERR_PTR(r);
}
static void kvm_destroy_devices(struct kvm *kvm)
{
struct kvm_device *dev, *tmp;
/*
* We do not need to take the kvm->lock here, because nobody else
* has a reference to the struct kvm at this point and therefore
* cannot access the devices list anyhow.
*/
list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
list_del(&dev->vm_node);
dev->ops->destroy(dev);
}
}
static void kvm_destroy_vm(struct kvm *kvm)
{
int i;
struct mm_struct *mm = kvm->mm;
kvm_destroy_pm_notifier(kvm);
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
mutex_unlock(&kvm_lock);
kvm_arch_pre_destroy_vm(kvm);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
if (bus)
kvm_io_bus_destroy(bus);
kvm->buses[i] = NULL;
}
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
/*
* At this point, pending calls to invalidate_range_start()
* have completed but no more MMU notifiers will run, so
* mn_active_invalidate_count may remain unbalanced.
* No threads can be waiting in kvm_swap_active_memslots() as the
* last reference on KVM has been dropped, but freeing
* memslots would deadlock without this manual intervention.
*/
WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
kvm->mn_active_invalidate_count = 0;
#else
kvm_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
}
cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
preempt_notifier_dec();
hardware_disable_all();
mmdrop(mm);
module_put(kvm_chardev_ops.owner);
}
void kvm_get_kvm(struct kvm *kvm)
{
refcount_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);
/*
* Make sure the vm is not during destruction, which is a safe version of
* kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
*/
bool kvm_get_kvm_safe(struct kvm *kvm)
{
return refcount_inc_not_zero(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
void kvm_put_kvm(struct kvm *kvm)
{
if (refcount_dec_and_test(&kvm->users_count))
kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);
/*
* Used to put a reference that was taken on behalf of an object associated
* with a user-visible file descriptor, e.g. a vcpu or device, if installation
* of the new file descriptor fails and the reference cannot be transferred to
* its final owner. In such cases, the caller is still actively using @kvm and
* will fail miserably if the refcount unexpectedly hits zero.
*/
void kvm_put_kvm_no_destroy(struct kvm *kvm)
{
WARN_ON(refcount_dec_and_test(&kvm->users_count));
}
EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
struct kvm *kvm = filp->private_data;
kvm_irqfd_release(kvm);
kvm_put_kvm(kvm);
return 0;
}
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* See kvm_vm_ioctl_get_dirty_log() why this is needed.
*/
static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
{
unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
if (!memslot->dirty_bitmap)
return -ENOMEM;
return 0;
}
static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
{
struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
int node_idx_inactive = active->node_idx ^ 1;
return &kvm->__memslots[as_id][node_idx_inactive];
}
/*
* Helper to get the address space ID when one of memslot pointers may be NULL.
* This also serves as a sanity that at least one of the pointers is non-NULL,
* and that their address space IDs don't diverge.
*/
static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
struct kvm_memory_slot *b)
{
if (WARN_ON_ONCE(!a && !b))
return 0;
if (!a)
return b->as_id;
if (!b)
return a->as_id;
WARN_ON_ONCE(a->as_id != b->as_id);
return a->as_id;
}
static void kvm_insert_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *slot)
{
struct rb_root *gfn_tree = &slots->gfn_tree;
struct rb_node **node, *parent;
int idx = slots->node_idx;
parent = NULL;
for (node = &gfn_tree->rb_node; *node; ) {
struct kvm_memory_slot *tmp;
tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
parent = *node;
if (slot->base_gfn < tmp->base_gfn)
node = &(*node)->rb_left;
else if (slot->base_gfn > tmp->base_gfn)
node = &(*node)->rb_right;
else
BUG();
}
rb_link_node(&slot->gfn_node[idx], parent, node);
rb_insert_color(&slot->gfn_node[idx], gfn_tree);
}
static void kvm_erase_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *slot)
{
rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
}
static void kvm_replace_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{
int idx = slots->node_idx;
WARN_ON_ONCE(old->base_gfn != new->base_gfn);
rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
&slots->gfn_tree);
}
/*
* Replace @old with @new in the inactive memslots.
*
* With NULL @old this simply adds @new.
* With NULL @new this simply removes @old.
*
* If @new is non-NULL its hva_node[slots_idx] range has to be set
* appropriately.
*/
static void kvm_replace_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{
int as_id = kvm_memslots_get_as_id(old, new);
struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
int idx = slots->node_idx;
if (old) {
hash_del(&old->id_node[idx]);
interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
if ((long)old == atomic_long_read(&slots->last_used_slot))
atomic_long_set(&slots->last_used_slot, (long)new);
if (!new) {
kvm_erase_gfn_node(slots, old);
return;
}
}
/*
* Initialize @new's hva range. Do this even when replacing an @old
* slot, kvm_copy_memslot() deliberately does not touch node data.
*/
new->hva_node[idx].start = new->userspace_addr;
new->hva_node[idx].last = new->userspace_addr +
(new->npages << PAGE_SHIFT) - 1;
/*
* (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
* hva_node needs to be swapped with remove+insert even though hva can't
* change when replacing an existing slot.
*/
hash_add(slots->id_hash, &new->id_node[idx], new->id);
interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
/*
* If the memslot gfn is unchanged, rb_replace_node() can be used to
* switch the node in the gfn tree instead of removing the old and
* inserting the new as two separate operations. Replacement is a
* single O(1) operation versus two O(log(n)) operations for
* remove+insert.
*/
if (old && old->base_gfn == new->base_gfn) {
kvm_replace_gfn_node(slots, old, new);
} else {
if (old)
kvm_erase_gfn_node(slots, old);
kvm_insert_gfn_node(slots, new);
}
}
static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
#ifdef __KVM_HAVE_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
if (mem->flags & ~valid_flags)
return -EINVAL;
return 0;
}
static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
{
struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
/* Grab the generation from the activate memslots. */
u64 gen = __kvm_memslots(kvm, as_id)->generation;
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
/*
* Do not store the new memslots while there are invalidations in
* progress, otherwise the locking in invalidate_range_start and
* invalidate_range_end will be unbalanced.
*/
spin_lock(&kvm->mn_invalidate_lock);
prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
while (kvm->mn_active_invalidate_count) {
set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&kvm->mn_invalidate_lock);
schedule();
spin_lock(&kvm->mn_invalidate_lock);
}
finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
rcu_assign_pointer(kvm->memslots[as_id], slots);
spin_unlock(&kvm->mn_invalidate_lock);
/*
* Acquired in kvm_set_memslot. Must be released before synchronize
* SRCU below in order to avoid deadlock with another thread
* acquiring the slots_arch_lock in an srcu critical section.
*/
mutex_unlock(&kvm->slots_arch_lock);
synchronize_srcu_expedited(&kvm->srcu);
/*
* Increment the new memslot generation a second time, dropping the
* update in-progress flag and incrementing the generation based on
* the number of address spaces. This provides a unique and easily
* identifiable generation number while the memslots are in flux.
*/
gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
/*
* Generations must be unique even across address spaces. We do not need
* a global counter for that, instead the generation space is evenly split
* across address spaces. For example, with two address spaces, address
* space 0 will use generations 0, 2, 4, ... while address space 1 will
* use generations 1, 3, 5, ...
*/
gen += KVM_ADDRESS_SPACE_NUM;
kvm_arch_memslots_updated(kvm, gen);
slots->generation = gen;
}
static int kvm_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int r;
/*
* If dirty logging is disabled, nullify the bitmap; the old bitmap
* will be freed on "commit". If logging is enabled in both old and
* new, reuse the existing bitmap. If logging is enabled only in the
* new and KVM isn't using a ring buffer, allocate and initialize a
* new bitmap.
*/
if (change != KVM_MR_DELETE) {
if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
new->dirty_bitmap = NULL;
else if (old && old->dirty_bitmap)
new->dirty_bitmap = old->dirty_bitmap;
else if (kvm_use_dirty_bitmap(kvm)) {
r = kvm_alloc_dirty_bitmap(new);
if (r)
return r;
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
bitmap_set(new->dirty_bitmap, 0, new->npages);
}
}
r = kvm_arch_prepare_memory_region(kvm, old, new, change);
/* Free the bitmap on failure if it was allocated above. */
if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
kvm_destroy_dirty_bitmap(new);
return r;
}
static void kvm_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int old_flags = old ? old->flags : 0;
int new_flags = new ? new->flags : 0;
/*
* Update the total number of memslot pages before calling the arch
* hook so that architectures can consume the result directly.
*/
if (change == KVM_MR_DELETE)
kvm->nr_memslot_pages -= old->npages;
else if (change == KVM_MR_CREATE)
kvm->nr_memslot_pages += new->npages;
if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
atomic_set(&kvm->nr_memslots_dirty_logging,
atomic_read(&kvm->nr_memslots_dirty_logging) + change);
}
kvm_arch_commit_memory_region(kvm, old, new, change);
switch (change) {
case KVM_MR_CREATE:
/* Nothing more to do. */
break;
case KVM_MR_DELETE:
/* Free the old memslot and all its metadata. */
kvm_free_memslot(kvm, old);
break;
case KVM_MR_MOVE:
case KVM_MR_FLAGS_ONLY:
/*
* Free the dirty bitmap as needed; the below check encompasses
* both the flags and whether a ring buffer is being used)
*/
if (old->dirty_bitmap && !new->dirty_bitmap)
kvm_destroy_dirty_bitmap(old);
/*
* The final quirk. Free the detached, old slot, but only its
* memory, not any metadata. Metadata, including arch specific
* data, may be reused by @new.
*/
kfree(old);
break;
default:
BUG();
}
}
/*
* Activate @new, which must be installed in the inactive slots by the caller,
* by swapping the active slots and then propagating @new to @old once @old is
* unreachable and can be safely modified.
*
* With NULL @old this simply adds @new to @active (while swapping the sets).
* With NULL @new this simply removes @old from @active and frees it
* (while also swapping the sets).
*/
static void kvm_activate_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{
int as_id = kvm_memslots_get_as_id(old, new);
kvm_swap_active_memslots(kvm, as_id);
/* Propagate the new memslot to the now inactive memslots. */
kvm_replace_memslot(kvm, old, new);
}
static void kvm_copy_memslot(struct kvm_memory_slot *dest,
const struct kvm_memory_slot *src)
{
dest->base_gfn = src->base_gfn;
dest->npages = src->npages;
dest->dirty_bitmap = src->dirty_bitmap;
dest->arch = src->arch;
dest->userspace_addr = src->userspace_addr;
dest->flags = src->flags;
dest->id = src->id;
dest->as_id = src->as_id;
}
static void kvm_invalidate_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *invalid_slot)
{
/*
* Mark the current slot INVALID. As with all memslot modifications,
* this must be done on an unreachable slot to avoid modifying the
* current slot in the active tree.
*/
kvm_copy_memslot(invalid_slot, old);
invalid_slot->flags |= KVM_MEMSLOT_INVALID;
kvm_replace_memslot(kvm, old, invalid_slot);
/*
* Activate the slot that is now marked INVALID, but don't propagate
* the slot to the now inactive slots. The slot is either going to be
* deleted or recreated as a new slot.
*/
kvm_swap_active_memslots(kvm, old->as_id);
/*
* From this point no new shadow pages pointing to a deleted, or moved,
* memslot will be created. Validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, old);
kvm_arch_guest_memory_reclaimed(kvm);
/* Was released by kvm_swap_active_memslots(), reacquire. */
mutex_lock(&kvm->slots_arch_lock);
/*
* Copy the arch-specific field of the newly-installed slot back to the
* old slot as the arch data could have changed between releasing
* slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
* above. Writers are required to retrieve memslots *after* acquiring
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
*/
old->arch = invalid_slot->arch;
}
static void kvm_create_memslot(struct kvm *kvm,
struct kvm_memory_slot *new)
{
/* Add the new memslot to the inactive set and activate. */
kvm_replace_memslot(kvm, NULL, new);
kvm_activate_memslot(kvm, NULL, new);
}
static void kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *invalid_slot)
{
/*
* Remove the old memslot (in the inactive memslots) by passing NULL as
* the "new" slot, and for the invalid version in the active slots.
*/
kvm_replace_memslot(kvm, old, NULL);
kvm_activate_memslot(kvm, invalid_slot, NULL);
}
static void kvm_move_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
struct kvm_memory_slot *invalid_slot)
{
/*
* Replace the old memslot in the inactive slots, and then swap slots
* and replace the current INVALID with the new as well.
*/
kvm_replace_memslot(kvm, old, new);
kvm_activate_memslot(kvm, invalid_slot, new);
}
static void kvm_update_flags_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{
/*
* Similar to the MOVE case, but the slot doesn't need to be zapped as
* an intermediate step. Instead, the old memslot is simply replaced
* with a new, updated copy in both memslot sets.
*/
kvm_replace_memslot(kvm, old, new);
kvm_activate_memslot(kvm, old, new);
}
static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
struct kvm_memory_slot *invalid_slot;
int r;
/*
* Released in kvm_swap_active_memslots().
*
* Must be held from before the current memslots are copied until after
* the new memslots are installed with rcu_assign_pointer, then
* released before the synchronize srcu in kvm_swap_active_memslots().
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
* changes to those memslots are complete.
*
* These rules ensure that installing new memslots does not lose
* changes made to the previous memslots.
*/
mutex_lock(&kvm->slots_arch_lock);
/*
* Invalidate the old slot if it's being deleted or moved. This is
* done prior to actually deleting/moving the memslot to allow vCPUs to
* continue running by ensuring there are no mappings or shadow pages
* for the memslot when it is deleted/moved. Without pre-invalidation
* (and without a lock), a window would exist between effecting the
* delete/move and committing the changes in arch code where KVM or a
* guest could access a non-existent memslot.
*
* Modifications are done on a temporary, unreachable slot. The old
* slot needs to be preserved in case a later step fails and the
* invalidation needs to be reverted.
*/
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
if (!invalid_slot) {
mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM;
}
kvm_invalidate_memslot(kvm, old, invalid_slot);
}
r = kvm_prepare_memory_region(kvm, old, new, change);
if (r) {
/*
* For DELETE/MOVE, revert the above INVALID change. No
* modifications required since the original slot was preserved
* in the inactive slots. Changing the active memslots also
* release slots_arch_lock.
*/
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
kvm_activate_memslot(kvm, invalid_slot, old);
kfree(invalid_slot);
} else {
mutex_unlock(&kvm->slots_arch_lock);
}
return r;
}
/*
* For DELETE and MOVE, the working slot is now active as the INVALID
* version of the old slot. MOVE is particularly special as it reuses
* the old slot and returns a copy of the old slot (in working_slot).
* For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
* old slot is detached but otherwise preserved.
*/
if (change == KVM_MR_CREATE)
kvm_create_memslot(kvm, new);
else if (change == KVM_MR_DELETE)
kvm_delete_memslot(kvm, old, invalid_slot);
else if (change == KVM_MR_MOVE)
kvm_move_memslot(kvm, old, new, invalid_slot);
else if (change == KVM_MR_FLAGS_ONLY)
kvm_update_flags_memslot(kvm, old, new);
else
BUG();
/* Free the temporary INVALID slot used for DELETE and MOVE. */
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
kfree(invalid_slot);
/*
* No need to refresh new->arch, changes after dropping slots_arch_lock
* will directly hit the final, active memslot. Architectures are
* responsible for knowing that new->arch may be stale.
*/
kvm_commit_memory_region(kvm, old, new, change);
return 0;
}
static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
gfn_t start, gfn_t end)
{
struct kvm_memslot_iter iter;
kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
if (iter.slot->id != id)
return true;
}
return false;
}
/*
* Allocate some memory and give it an address in the guest physical address
* space.
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
* Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
struct kvm_memory_slot *old, *new;
struct kvm_memslots *slots;
enum kvm_mr_change change;
unsigned long npages;
gfn_t base_gfn;
int as_id, id;
int r;
r = check_memory_region_flags(mem);
if (r)
return r;
as_id = mem->slot >> 16;
id = (u16)mem->slot;
/* General sanity checks */
if ((mem->memory_size & (PAGE_SIZE - 1)) ||
(mem->memory_size != (unsigned long)mem->memory_size))
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
(mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL;
if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
/*
* Note, the old memslot (and the pointer itself!) may be invalidated
* and/or destroyed by kvm_set_memslot().
*/
old = id_to_memslot(slots, id);
if (!mem->memory_size) {
if (!old || !old->npages)
return -EINVAL;
if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
return -EIO;
return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
}
base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
npages = (mem->memory_size >> PAGE_SHIFT);
if (!old || !old->npages) {
change = KVM_MR_CREATE;
/*
* To simplify KVM internals, the total number of pages across
* all memslots must fit in an unsigned long.
*/
if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
return -EINVAL;
} else { /* Modify an existing slot. */
if ((mem->userspace_addr != old->userspace_addr) ||
(npages != old->npages) ||
((mem->flags ^ old->flags) & KVM_MEM_READONLY))
return -EINVAL;
if (base_gfn != old->base_gfn)
change = KVM_MR_MOVE;
else if (mem->flags != old->flags)
change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */
return 0;
}
if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
return -EEXIST;
/* Allocate a slot that will persist in the memslot. */
new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
if (!new)
return -ENOMEM;
new->as_id = as_id;
new->id = id;
new->base_gfn = base_gfn;
new->npages = npages;
new->flags = mem->flags;
new->userspace_addr = mem->userspace_addr;
r = kvm_set_memslot(kvm, old, new, change);
if (r)
kfree(new);
return r;
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
return kvm_set_memory_region(kvm, mem);
}
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
* kvm_get_dirty_log - get a snapshot of dirty pages
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
* @is_dirty: set to '1' if any dirty pages were found
* @memslot: set to the associated memslot, always valid on success
*/
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot)
{
struct kvm_memslots *slots;
int i, as_id, id;
unsigned long n;
unsigned long any = 0;
/* Dirty ring tracking may be exclusive to dirty log tracking */
if (!kvm_use_dirty_bitmap(kvm))
return -ENXIO;
*memslot = NULL;
*is_dirty = 0;
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
*memslot = id_to_memslot(slots, id);
if (!(*memslot) || !(*memslot)->dirty_bitmap)
return -ENOENT;
kvm_arch_sync_dirty_log(kvm, *memslot);
n = kvm_dirty_bitmap_bytes(*memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = (*memslot)->dirty_bitmap[i];
if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
return -EFAULT;
if (any)
*is_dirty = 1;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
* kvm_get_dirty_log_protect - get a snapshot of dirty pages
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing track of dirty pages we keep the
* following order:
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Upon return caller flushes TLB's if needed.
*
* Between 2 and 4, the guest may write to the page using the remaining TLB
* entry. This is not a problem because the page is reported dirty using
* the snapshot taken before and step 4 ensures that writes done after
* exiting to userspace will be logged for the next call.
*
*/
static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, as_id, id;
unsigned long n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
bool flush;
/* Dirty ring tracking may be exclusive to dirty log tracking */
if (!kvm_use_dirty_bitmap(kvm))
return -ENXIO;
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
if (!memslot || !memslot->dirty_bitmap)
return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
kvm_arch_sync_dirty_log(kvm, memslot);
n = kvm_dirty_bitmap_bytes(memslot);
flush = false;
if (kvm->manual_dirty_log_protect) {
/*
* Unlike kvm_get_dirty_log, we always return false in *flush,
* because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
* is some code duplication between this function and
* kvm_get_dirty_log, but hopefully all architecture
* transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
* can be eliminated.
*/
dirty_bitmap_buffer = dirty_bitmap;
} else {
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
memset(dirty_bitmap_buffer, 0, n);
KVM_MMU_LOCK(kvm);
for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask;
gfn_t offset;
if (!dirty_bitmap[i])
continue;
flush = true;
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
offset = i * BITS_PER_LONG;
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
KVM_MMU_UNLOCK(kvm);
}
if (flush)
kvm_flush_remote_tlbs_memslot(kvm, memslot);
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
return -EFAULT;
return 0;
}
/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* Steps 1-4 below provide general overview of dirty page logging. See
* kvm_get_dirty_log_protect() function description for additional details.
*
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
* always flush the TLB (step 4) even if previous step failed and the dirty
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
* writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Flush TLB's if needed.
*/
static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log_protect(kvm, log);
mutex_unlock(&kvm->slots_lock);
return r;
}
/**
* kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address from which to fetch the bitmap of dirty pages
*/
static int kvm_clear_dirty_log_protect(struct kvm *kvm,
struct kvm_clear_dirty_log *log)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int as_id, id;
gfn_t offset;
unsigned long i, n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
bool flush;
/* Dirty ring tracking may be exclusive to dirty log tracking */
if (!kvm_use_dirty_bitmap(kvm))
return -ENXIO;
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
if (log->first_page & 63)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
if (!memslot || !memslot->dirty_bitmap)
return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
if (log->first_page > memslot->npages ||
log->num_pages > memslot->npages - log->first_page ||
(log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
return -EINVAL;
kvm_arch_sync_dirty_log(kvm, memslot);
flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
return -EFAULT;
KVM_MMU_LOCK(kvm);
for (offset = log->first_page, i = offset / BITS_PER_LONG,
n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
i++, offset += BITS_PER_LONG) {
unsigned long mask = *dirty_bitmap_buffer++;
atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
if (!mask)
continue;
mask &= atomic_long_fetch_andnot(mask, p);
/*
* mask contains the bits that really have been cleared. This
* never includes any bits beyond the length of the memslot (if
* the length is not aligned to 64 pages), therefore it is not
* a problem if userspace sets them in log->dirty_bitmap.
*/
if (mask) {
flush = true;
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
}
KVM_MMU_UNLOCK(kvm);
if (flush)
kvm_flush_remote_tlbs_memslot(kvm, memslot);
return 0;
}
static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
struct kvm_clear_dirty_log *log)
{
int r;
mutex_lock(&kvm->slots_lock);
r = kvm_clear_dirty_log_protect(kvm, log);
mutex_unlock(&kvm->slots_lock);
return r;
}
#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
u64 gen = slots->generation;
struct kvm_memory_slot *slot;
/*
* This also protects against using a memslot from a different address space,
* since different address spaces have different generation numbers.
*/
if (unlikely(gen != vcpu->last_used_slot_gen)) {
vcpu->last_used_slot = NULL;
vcpu->last_used_slot_gen = gen;
}
slot = try_get_memslot(vcpu->last_used_slot, gfn);
if (slot)
return slot;
/*
* Fall back to searching all memslots. We purposely use
* search_memslots() instead of __gfn_to_memslot() to avoid
* thrashing the VM-wide last_used_slot in kvm_memslots.
*/
slot = search_memslots(slots, gfn, false);
if (slot) {
vcpu->last_used_slot = slot;
return slot;
}
return NULL;
}
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
return kvm_is_visible_memslot(memslot);
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_is_visible_memslot(memslot);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct vm_area_struct *vma;
unsigned long addr, size;
size = PAGE_SIZE;
addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
mmap_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
size = vma_kernel_pagesize(vma);
out:
mmap_read_unlock(current->mm);
return size;
}
static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_READONLY;
}
static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages, bool write)
{
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return KVM_HVA_ERR_BAD;
if (memslot_is_readonly(slot) && write)
return KVM_HVA_ERR_RO_BAD;
if (nr_pages)
*nr_pages = slot->npages - (gfn - slot->base_gfn);
return __gfn_to_hva_memslot(slot, gfn);
}
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{
return __gfn_to_hva_many(slot, gfn, nr_pages, true);
}
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
gfn_t gfn)
{
return gfn_to_hva_many(slot, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
/*
* Return the hva of a @gfn and the R/W attribute if possible.
*
* @slot: the kvm_memory_slot which contains @gfn
* @gfn: the gfn to be translated
* @writable: used to return the read/write attribute of the @slot if the hva
* is valid and @writable is not NULL
*/
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
gfn_t gfn, bool *writable)
{
unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
if (!kvm_is_error_hva(hva) && writable)
*writable = !memslot_is_readonly(slot);
return hva;
}
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
rc = get_user_pages(addr, 1, flags, NULL);
return rc == -EHWPOISON;
}
/*
* The fast path to get the writable pfn which will be stored in @pfn,
* true indicates success, otherwise false is returned. It's also the
* only part that runs if we can in atomic context.
*/
static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
struct page *page[1];
/*
* Fast pin a writable pfn only if it is a write fault request
* or the caller allows to map a writable pfn for a read fault
* request.
*/
if (!(write_fault || writable))
return false;
if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
*pfn = page_to_pfn(page[0]);
if (writable)
*writable = true;
return true;
}
return false;
}
/*
* The slow path to get the pfn of the specified host virtual address,
* 1 indicates success, -errno is returned if error is detected.
*/
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool interruptible, bool *writable, kvm_pfn_t *pfn)
{
/*
* When a VCPU accesses a page that is not mapped into the secondary
* MMU, we lookup the page using GUP to map it, so the guest VCPU can
* make progress. We always want to honor NUMA hinting faults in that
* case, because GUP usage corresponds to memory accesses from the VCPU.
* Otherwise, we'd not trigger NUMA hinting faults once a page is
* mapped into the secondary MMU and gets accessed by a VCPU.
*
* Note that get_user_page_fast_only() and FOLL_WRITE for now
* implicitly honor NUMA hinting faults and don't need this flag.
*/
unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
struct page *page;
int npages;
might_sleep();
if (writable)
*writable = write_fault;
if (write_fault)
flags |= FOLL_WRITE;
if (async)
flags |= FOLL_NOWAIT;
if (interruptible)
flags |= FOLL_INTERRUPTIBLE;
npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
struct page *wpage;
if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
*writable = true;
put_page(page);
page = wpage;
}
}
*pfn = page_to_pfn(page);
return npages;
}
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{
if (unlikely(!(vma->vm_flags & VM_READ)))
return false;
if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
return false;
return true;
}
static int kvm_try_get_pfn(kvm_pfn_t pfn)
{
struct page *page = kvm_pfn_to_refcounted_page(pfn);
if (!page)
return 1;
return get_page_unless_zero(page);
}
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *p_pfn)
{
kvm_pfn_t pfn;
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
int r;
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
* not call the fault handler, so do it here.
*/
bool unlocked = false;
r = fixup_user_fault(current->mm, addr,
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
if (unlocked)
return -EAGAIN;
if (r)
return r;
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
if (r)
return r;
}
pte = ptep_get(ptep);
if (write_fault && !pte_write(pte)) {
pfn = KVM_PFN_ERR_RO_FAULT;
goto out;
}
if (writable)
*writable = pte_write(pte);
pfn = pte_pfn(pte);
/*
* Get a reference here because callers of *hva_to_pfn* and
* *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
* returned pfn. This is only needed if the VMA has VM_MIXEDMAP
* set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
* simply do nothing for reserved pfns.
*
* Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier.
*
* Certain IO or PFNMAP mappings can be backed with valid
* struct pages, but be allocated without refcounting e.g.,
* tail pages of non-compound higher order allocations, which
* would then underflow the refcount when the caller does the
* required put_page. Don't allow those pages here.
*/
if (!kvm_try_get_pfn(pfn))
r = -EFAULT;
out:
pte_unmap_unlock(ptep, ptl);
*p_pfn = pfn;
return r;
}
/*
* Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest
* @atomic: whether this function can sleep
* @interruptible: whether the process can be interrupted by non-fatal signals
* @async: whether this function need to wait IO complete if the
* host page is not in the memory
* @write_fault: whether we should get a writable host page
* @writable: whether it allows to map a writable host page for !@write_fault
*
* The function will map a writable host page for these two cases:
* 1): @write_fault = true
* 2): @write_fault = false && @writable, @writable will tell the caller
* whether the mapping is writable.
*/
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
bool *async, bool write_fault, bool *writable)
{
struct vm_area_struct *vma;
kvm_pfn_t pfn;
int npages, r;
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
return pfn;
if (atomic)
return KVM_PFN_ERR_FAULT;
npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
writable, &pfn);
if (npages == 1)
return pfn;
if (npages == -EINTR)
return KVM_PFN_ERR_SIGPENDING;
mmap_read_lock(current->mm);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr))) {
pfn = KVM_PFN_ERR_HWPOISON;
goto exit;
}
retry:
vma = vma_lookup(current->mm, addr);
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
if (r == -EAGAIN)
goto retry;
if (r < 0)
pfn = KVM_PFN_ERR_FAULT;
} else {
if (async && vma_is_valid(vma, write_fault))
*async = true;
pfn = KVM_PFN_ERR_FAULT;
}
exit:
mmap_read_unlock(current->mm);
return pfn;
}
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool interruptible, bool *async,
bool write_fault, bool *writable, hva_t *hva)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
if (hva)
*hva = addr;
if (addr == KVM_HVA_ERR_RO_BAD) {
if (writable)
*writable = false;
return KVM_PFN_ERR_RO_FAULT;
}
if (kvm_is_error_hva(addr)) {
if (writable)
*writable = false;
return KVM_PFN_NOSLOT;
}
/* Do not map writable pfn in the readonly memslot. */
if (writable && memslot_is_readonly(slot)) {
*writable = false;
writable = NULL;
}
return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
writable);
}
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable)
{
return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
NULL, write_fault, writable, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
{
return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry = 0;
addr = gfn_to_hva_many(slot, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
if (entry < nr_pages)
return 0;
return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
/*
* Do not use this helper unless you are absolutely certain the gfn _must_ be
* backed by 'struct page'. A valid example is if the backing memslot is
* controlled by KVM. Note, if the returned page is valid, it's refcount has
* been elevated by gfn_to_pfn().
*/
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct page *page;
kvm_pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
if (is_error_noslot_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
page = kvm_pfn_to_refcounted_page(pfn);
if (!page)
return KVM_ERR_PTR_BAD_PAGE;
return page;
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{
if (dirty)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);
}
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
kvm_pfn_t pfn;
void *hva = NULL;
struct page *page = KVM_UNMAPPED_PAGE;
if (!map)
return -EINVAL;
pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_noslot_pfn(pfn))
return -EINVAL;
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM
} else {
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
if (!hva)
return -EFAULT;
map->page = page;
map->hva = hva;
map->pfn = pfn;
map->gfn = gfn;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
if (!map)
return;
if (!map->hva)
return;
if (map->page != KVM_UNMAPPED_PAGE)
kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
else
memunmap(map->hva);
#endif
if (dirty)
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
kvm_release_pfn(map->pfn, dirty);
map->hva = NULL;
map->page = NULL;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
static bool kvm_is_ad_tracked_page(struct page *page)
{
/*
* Per page-flags.h, pages tagged PG_reserved "should in general not be
* touched (e.g. set dirty) except by its owner".
*/
return !PageReserved(page);
}
static void kvm_set_page_dirty(struct page *page)
{
if (kvm_is_ad_tracked_page(page))
SetPageDirty(page);
}
static void kvm_set_page_accessed(struct page *page)
{
if (kvm_is_ad_tracked_page(page))
mark_page_accessed(page);
}
void kvm_release_page_clean(struct page *page)
{
WARN_ON(is_error_page(page));
kvm_set_page_accessed(page);
put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(kvm_pfn_t pfn)
{
struct page *page;
if (is_error_noslot_pfn(pfn))
return;
page = kvm_pfn_to_refcounted_page(pfn);
if (!page)
return;
kvm_release_page_clean(page);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
void kvm_release_page_dirty(struct page *page)
{
WARN_ON(is_error_page(page));
kvm_set_page_dirty(page);
kvm_release_page_clean(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
void kvm_release_pfn_dirty(kvm_pfn_t pfn)
{
struct page *page;
if (is_error_noslot_pfn(pfn))
return;
page = kvm_pfn_to_refcounted_page(pfn);
if (!page)
return;
kvm_release_page_dirty(page);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
/*
* Note, checking for an error/noslot pfn is the caller's responsibility when
* directly marking a page dirty/accessed. Unlike the "release" helpers, the
* "set" helpers are not to be used when the pfn might point at garbage.
*/
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{
if (WARN_ON(is_error_noslot_pfn(pfn)))
return;
if (pfn_valid(pfn))
kvm_set_page_dirty(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{
if (WARN_ON(is_error_noslot_pfn(pfn)))
return;
if (pfn_valid(pfn))
kvm_set_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
static int next_segment(unsigned long len, int offset)
{
if (len > PAGE_SIZE - offset)
return PAGE_SIZE - offset;
else
return len;
}
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_from_user(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
}
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
return 0;
}
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
int offset = offset_in_page(gpa);
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_to_user((void __user *)addr + offset, data, len);
if (r)
return -EFAULT;
mark_page_dirty_in_slot(kvm, memslot, gfn);
return 0;
}
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
int offset = offset_in_page(gpa);
gfn_t start_gfn = gpa >> PAGE_SHIFT;
gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
gfn_t nr_pages_avail;
/* Update ghc->generation before performing any error checks. */
ghc->generation = slots->generation;
if (start_gfn > end_gfn) {
ghc->hva = KVM_HVA_ERR_BAD;
return -EINVAL;
}
/*
* If the requested region crosses two memslots, we still
* verify that the entire region is valid here.
*/
for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
ghc->memslot = __gfn_to_memslot(slots, start_gfn);
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
&nr_pages_avail);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
}
/* Use the slow path for cross page reads and writes. */
if (nr_pages_needed == 1)
ghc->hva += offset;
else
ghc->memslot = NULL;
ghc->gpa = gpa;
ghc->len = len;
return 0;
}
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
gpa_t gpa = ghc->gpa + offset;
if (WARN_ON_ONCE(len + offset > ghc->len))
return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
return -EFAULT;
}
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
if (unlikely(!ghc->memslot))
return kvm_write_guest(kvm, gpa, data, len);
r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
if (r)
return -EFAULT;
mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
gpa_t gpa = ghc->gpa + offset;
if (WARN_ON_ONCE(len + offset > ghc->len))
return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
return -EFAULT;
}
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
if (unlikely(!ghc->memslot))
return kvm_read_guest(kvm, gpa, data, len);
r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
if (r)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
gfn_t gfn)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
#ifdef CONFIG_HAVE_KVM_DIRTY_RING
if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
return;
WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
#endif
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
if (!vcpu->sigset_active)
return;
/*
* This does a lockless modification of ->real_blocked, which is fine
* because, only current can change ->real_blocked and all readers of
* ->real_blocked don't care as long ->real_blocked is always a subset
* of ->blocked.
*/
sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
}
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
{
if (!vcpu->sigset_active)
return;
sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
sigemptyset(¤t->real_blocked);
}
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
unsigned int old, val, grow, grow_start;
old = val = vcpu->halt_poll_ns;
grow_start = READ_ONCE(halt_poll_ns_grow_start);
grow = READ_ONCE(halt_poll_ns_grow);
if (!grow)
goto out;
val *= grow;
if (val < grow_start)
val = grow_start;
vcpu->halt_poll_ns = val;
out:
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
}
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns;
shrink = READ_ONCE(halt_poll_ns_shrink);
grow_start = READ_ONCE(halt_poll_ns_grow_start);
if (shrink == 0)
val = 0;
else
val /= shrink;
if (val < grow_start)
val = 0;
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
}
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
int ret = -EINTR;
int idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_arch_vcpu_runnable(vcpu))
goto out;
if (kvm_cpu_has_pending_timer(vcpu))
goto out;
if (signal_pending(current))
goto out;
if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
goto out;
ret = 0;
out:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
/*
* Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
* pending. This is mostly used when halting a vCPU, but may also be used
* directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
*/
bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
bool waited = false;
vcpu->stat.generic.blocking = 1;
preempt_disable();
kvm_arch_vcpu_blocking(vcpu);
prepare_to_rcuwait(wait);
preempt_enable();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
waited = true;
schedule();
}
preempt_disable();
finish_rcuwait(wait);
kvm_arch_vcpu_unblocking(vcpu);
preempt_enable();
vcpu->stat.generic.blocking = 0;
return waited;
}
static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
ktime_t end, bool success)
{
struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
++vcpu->stat.generic.halt_attempted_poll;
if (success) {
++vcpu->stat.generic.halt_successful_poll;
if (!vcpu_valid_wakeup(vcpu))
++vcpu->stat.generic.halt_poll_invalid;
stats->halt_poll_success_ns += poll_ns;
KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
} else {
stats->halt_poll_fail_ns += poll_ns;
KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
}
}
static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
if (kvm->override_halt_poll_ns) {
/*
* Ensure kvm->max_halt_poll_ns is not read before
* kvm->override_halt_poll_ns.
*
* Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
*/
smp_rmb();
return READ_ONCE(kvm->max_halt_poll_ns);
}
return READ_ONCE(halt_poll_ns);
}
/*
* Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
* polling is enabled, busy wait for a short time before blocking to avoid the
* expensive block+unblock sequence if a wake event arrives soon after the vCPU
* is halted.
*/
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
ktime_t start, cur, poll_end;
bool waited = false;
bool do_halt_poll;
u64 halt_ns;
if (vcpu->halt_poll_ns > max_halt_poll_ns)
vcpu->halt_poll_ns = max_halt_poll_ns;
do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
start = cur = poll_end = ktime_get();
if (do_halt_poll) {
ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
do {
if (kvm_vcpu_check_block(vcpu) < 0)
goto out;
cpu_relax();
poll_end = cur = ktime_get();
} while (kvm_vcpu_can_poll(cur, stop));
}
waited = kvm_vcpu_block(vcpu);
cur = ktime_get();
if (waited) {
vcpu->stat.generic.halt_wait_ns +=
ktime_to_ns(cur) - ktime_to_ns(poll_end);
KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
ktime_to_ns(cur) - ktime_to_ns(poll_end));
}
out:
/* The total time the vCPU was "halted", including polling time. */
halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
/*
* Note, halt-polling is considered successful so long as the vCPU was
* never actually scheduled out, i.e. even if the wake event arrived
* after of the halt-polling loop itself, but before the full wait.
*/
if (do_halt_poll)
update_halt_poll_stats(vcpu, start, poll_end, !waited);
if (halt_poll_allowed) {
/* Recompute the max halt poll time in case it changed. */
max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
} else if (max_halt_poll_ns) {
if (halt_ns <= vcpu->halt_poll_ns)
;
/* we had a long block, shrink polling */
else if (vcpu->halt_poll_ns &&
halt_ns > max_halt_poll_ns)
shrink_halt_poll_ns(vcpu);
/* we had a short halt and our poll time is too small */
else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
halt_ns < max_halt_poll_ns)
grow_halt_poll_ns(vcpu);
} else {
vcpu->halt_poll_ns = 0;
}
}
trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
if (__kvm_vcpu_wake_up(vcpu)) {
WRITE_ONCE(vcpu->ready, true);
++vcpu->stat.generic.halt_wakeup;
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
#ifndef CONFIG_S390
/*
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me, cpu;
if (kvm_vcpu_wake_up(vcpu))
return;
me = get_cpu();
/*
* The only state change done outside the vcpu mutex is IN_GUEST_MODE
* to EXITING_GUEST_MODE. Therefore the moderately expensive "should
* kick" check does not need atomic operations if kvm_vcpu_kick is used
* within the vCPU thread itself.
*/
if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
if (vcpu->mode == IN_GUEST_MODE)
WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
goto out;
}
/*
* Note, the vCPU could get migrated to a different pCPU at any point
* after kvm_arch_vcpu_should_kick(), which could result in sending an
* IPI to the previous pCPU. But, that's ok because the purpose of the
* IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
* vCPU also requires it to leave IN_GUEST_MODE.
*/
if (kvm_arch_vcpu_should_kick(vcpu)) {
cpu = READ_ONCE(vcpu->cpu);
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
out:
put_cpu();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
#endif /* !CONFIG_S390 */
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
struct pid *pid;
struct task_struct *task = NULL;
int ret = 0;
rcu_read_lock();
pid = rcu_dereference(target->pid);
if (pid)
task = get_pid_task(pid, PIDTYPE_PID);
rcu_read_unlock();
if (!task)
return ret;
ret = yield_to(task, 1);
put_task_struct(task);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
/*
* Helper that checks whether a VCPU is eligible for directed yield.
* Most eligible candidate to yield is decided by following heuristics:
*
* (a) VCPU which has not done pl-exit or cpu relax intercepted recently
* (preempted lock holder), indicated by @in_spin_loop.
* Set at the beginning and cleared at the end of interception/PLE handler.
*
* (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
* chance last time (mostly it has become eligible now since we have probably
* yielded to lockholder in last iteration. This is done by toggling
* @dy_eligible each time a VCPU checked for eligibility.)
*
* Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
* to preempted lock-holder could result in wrong VCPU selection and CPU
* burning. Giving priority for a potential lock-holder increases lock
* progress.
*
* Since algorithm is based on heuristics, accessing another VCPU data without
* locking does not harm. It may result in trying to yield to same VCPU, fail
* and continue with next VCPU and so on.
*/
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
bool eligible;
eligible = !vcpu->spin_loop.in_spin_loop ||
vcpu->spin_loop.dy_eligible;
if (vcpu->spin_loop.in_spin_loop)
kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
return eligible;
#else
return true;
#endif
}
/*
* Unlike kvm_arch_vcpu_runnable, this function is called outside
* a vcpu_load/vcpu_put pair. However, for most architectures
* kvm_arch_vcpu_runnable does not require vcpu_load.
*/
bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_runnable(vcpu);
}
static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
{
if (kvm_arch_dy_runnable(vcpu))
return true;
#ifdef CONFIG_KVM_ASYNC_PF
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
#endif
return false;
}
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
}
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
unsigned long i;
int yielded = 0;
int try = 3;
int pass;
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
* currently running, because it got preempted by something
* else and called schedule in __vcpu_run. Hopefully that
* VCPU is holding the lock that we need and will release it.
* We approximate round-robin by starting at the last boosted VCPU.
*/
for (pass = 0; pass < 2 && !yielded && try; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
} else if (pass && i > last_boosted_vcpu)
break;
if (!READ_ONCE(vcpu->ready))
continue;
if (vcpu == me)
continue;
if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
continue;
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
!kvm_arch_dy_has_pending_interrupt(vcpu) &&
!kvm_arch_vcpu_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
kvm->last_boosted_vcpu = i;
break;
} else if (yielded < 0) {
try--;
if (!try)
break;
}
}
}
kvm_vcpu_set_in_spin_loop(me, false);
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{
#ifdef CONFIG_HAVE_KVM_DIRTY_RING
return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
(pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
kvm->dirty_ring_size / PAGE_SIZE);
#else
return false;
#endif
}
static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
{
struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run);
#ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
#endif
#ifdef CONFIG_KVM_MMIO
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
page = kvm_dirty_ring_get_page(
&vcpu->dirty_ring,
vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
else
return kvm_arch_vcpu_fault(vcpu, vmf);
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
.fault = kvm_vcpu_fault,
};
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
struct kvm_vcpu *vcpu = file->private_data;
unsigned long pages = vma_pages(vma);
if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
return -EINVAL;
vma->vm_ops = &kvm_vcpu_vm_ops;
return 0;
}
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
kvm_put_kvm(vcpu->kvm);
return 0;
}
static const struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
KVM_COMPAT(kvm_vcpu_compat_ioctl),
};
/*
* Allocates an inode for the vcpu.
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
char name[8 + 1 + ITOA_MAX_LEN + 1];
snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = data;
rcu_read_lock();
*val = pid_nr(rcu_dereference(vcpu->pid));
rcu_read_unlock();
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
struct dentry *debugfs_dentry;
char dir_name[ITOA_MAX_LEN * 2];
if (!debugfs_initialized())
return;
snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
debugfs_dentry = debugfs_create_dir(dir_name,
vcpu->kvm->debugfs_dentry);
debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
&vcpu_get_pid_fops);
kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
}
#endif
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu;
struct page *page;
if (id >= KVM_MAX_VCPU_IDS)
return -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus >= kvm->max_vcpus) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
r = kvm_arch_vcpu_precreate(kvm, id);
if (r) {
mutex_unlock(&kvm->lock);
return r;
}
kvm->created_vcpus++;
mutex_unlock(&kvm->lock);
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!vcpu) {
r = -ENOMEM;
goto vcpu_decrement;
}
BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto vcpu_free;
}
vcpu->run = page_address(page);
kvm_vcpu_init(vcpu, kvm, id);
r = kvm_arch_vcpu_create(vcpu);
if (r)
goto vcpu_free_run_page;
if (kvm->dirty_ring_size) {
r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
id, kvm->dirty_ring_size);
if (r)
goto arch_vcpu_destroy;
}
mutex_lock(&kvm->lock);
#ifdef CONFIG_LOCKDEP
/* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
mutex_lock(&vcpu->mutex);
mutex_unlock(&vcpu->mutex);
#endif
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
}
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
if (r)
goto unlock_vcpu_destroy;
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0)
goto kvm_put_xa_release;
if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
r = -EINVAL;
goto kvm_put_xa_release;
}
/*
* Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
* pointer before kvm->online_vcpu's incremented value.
*/
smp_wmb();
atomic_inc(&kvm->online_vcpus);
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_postcreate(vcpu);
kvm_create_vcpu_debugfs(vcpu);
return r;
kvm_put_xa_release:
kvm_put_kvm_no_destroy(kvm);
xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_dirty_ring_free(&vcpu->dirty_ring);
arch_vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
vcpu_free_run_page:
free_page((unsigned long)vcpu->run);
vcpu_free:
kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu_decrement:
mutex_lock(&kvm->lock);
kvm->created_vcpus--;
mutex_unlock(&kvm->lock);
return r;
}
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
if (sigset) {
sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
vcpu->sigset_active = 1;
vcpu->sigset = *sigset;
} else
vcpu->sigset_active = 0;
return 0;
}
static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
size_t size, loff_t *offset)
{
struct kvm_vcpu *vcpu = file->private_data;
return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
&kvm_vcpu_stats_desc[0], &vcpu->stat,
sizeof(vcpu->stat), user_buffer, size, offset);
}
static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
{
struct kvm_vcpu *vcpu = file->private_data;
kvm_put_kvm(vcpu->kvm);
return 0;
}
static const struct file_operations kvm_vcpu_stats_fops = {
.read = kvm_vcpu_stats_read,
.release = kvm_vcpu_stats_release,
.llseek = noop_llseek,
};
static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
{
int fd;
struct file *file;
char name[15 + ITOA_MAX_LEN + 1];
snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
kvm_get_kvm(vcpu->kvm);
file->f_mode |= FMODE_PREAD;
fd_install(fd, file);
return fd;
}
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
return -EINVAL;
/*
* Some architectures have vcpu ioctls that are asynchronous to vcpu
* execution; mutex_lock() would break them.
*/
r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
switch (ioctl) {
case KVM_RUN: {
struct pid *oldpid;
r = -EINVAL;
if (arg)
goto out;
oldpid = rcu_access_pointer(vcpu->pid);
if (unlikely(oldpid != task_pid(current))) {
/* The thread running this VCPU changed. */
struct pid *newpid;
r = kvm_arch_vcpu_run_pid_change(vcpu);
if (r)
break;
newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
if (oldpid)
synchronize_rcu();
put_pid(oldpid);
}
r = kvm_arch_vcpu_ioctl_run(vcpu);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
case KVM_GET_REGS: {
struct kvm_regs *kvm_regs;
r = -ENOMEM;
kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
if (r)
goto out_free1;
r = -EFAULT;
if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
goto out_free1;
r = 0;
out_free1:
kfree(kvm_regs);
break;
}
case KVM_SET_REGS: {
struct kvm_regs *kvm_regs;
kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
if (IS_ERR(kvm_regs)) {
r = PTR_ERR(kvm_regs);
goto out;
}
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
kfree(kvm_regs);
break;
}
case KVM_GET_SREGS: {
kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
goto out;
r = 0;
break;
}
case KVM_SET_SREGS: {
kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
if (IS_ERR(kvm_sregs)) {
r = PTR_ERR(kvm_sregs);
kvm_sregs = NULL;
goto out;
}
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
break;
}
case KVM_GET_MP_STATE: {
struct kvm_mp_state mp_state;
r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
goto out;
r = 0;
break;
}
case KVM_SET_MP_STATE: {
struct kvm_mp_state mp_state;
r = -EFAULT;
if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
goto out;
r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
break;
}
case KVM_TRANSLATE: {
struct kvm_translation tr;
r = -EFAULT;
if (copy_from_user(&tr, argp, sizeof(tr)))
goto out;
r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tr, sizeof(tr)))
goto out;
r = 0;
break;
}
case KVM_SET_GUEST_DEBUG: {
struct kvm_guest_debug dbg;
r = -EFAULT;
if (copy_from_user(&dbg, argp, sizeof(dbg)))
goto out;
r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
break;
}
case KVM_SET_SIGNAL_MASK: {
struct kvm_signal_mask __user *sigmask_arg = argp;
struct kvm_signal_mask kvm_sigmask;
sigset_t sigset, *p;
p = NULL;
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
sizeof(kvm_sigmask)))
goto out;
r = -EINVAL;
if (kvm_sigmask.len != sizeof(sigset))
goto out;
r = -EFAULT;
if (copy_from_user(&sigset, sigmask_arg->sigset,
sizeof(sigset)))
goto out;
p = &sigset;
}
r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
break;
}
case KVM_GET_FPU: {
fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!fpu)
goto out;
r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
goto out;
r = 0;
break;
}
case KVM_SET_FPU: {
fpu = memdup_user(argp, sizeof(*fpu));
if (IS_ERR(fpu)) {
r = PTR_ERR(fpu);
fpu = NULL;
goto out;
}
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
break;
}
case KVM_GET_STATS_FD: {
r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
break;
}
default:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
mutex_unlock(&vcpu->mutex);
kfree(fpu);
kfree(kvm_sregs);
return r;
}
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = compat_ptr(arg);
int r;
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_SET_SIGNAL_MASK: {
struct kvm_signal_mask __user *sigmask_arg = argp;
struct kvm_signal_mask kvm_sigmask;
sigset_t sigset;
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
sizeof(kvm_sigmask)))
goto out;
r = -EINVAL;
if (kvm_sigmask.len != sizeof(compat_sigset_t))
goto out;
r = -EFAULT;
if (get_compat_sigset(&sigset,
(compat_sigset_t __user *)sigmask_arg->sigset))
goto out;
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
} else
r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
break;
}
default:
r = kvm_vcpu_ioctl(filp, ioctl, arg);
}
out:
return r;
}
#endif
static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kvm_device *dev = filp->private_data;
if (dev->ops->mmap)
return dev->ops->mmap(dev, vma);
return -ENODEV;
}
static int kvm_device_ioctl_attr(struct kvm_device *dev,
int (*accessor)(struct kvm_device *dev,
struct kvm_device_attr *attr),
unsigned long arg)
{
struct kvm_device_attr attr;
if (!accessor)
return -EPERM;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
return -EFAULT;
return accessor(dev, &attr);
}
static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_device *dev = filp->private_data;
if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_SET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
case KVM_GET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
case KVM_HAS_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
default:
if (dev->ops->ioctl)
return dev->ops->ioctl(dev, ioctl, arg);
return -ENOTTY;
}
}
static int kvm_device_release(struct inode *inode, struct file *filp)
{
struct kvm_device *dev = filp->private_data;
struct kvm *kvm = dev->kvm;
if (dev->ops->release) {
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
dev->ops->release(dev);
mutex_unlock(&kvm->lock);
}
kvm_put_kvm(kvm);
return 0;
}
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
.release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
.mmap = kvm_device_mmap,
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
{
if (filp->f_op != &kvm_device_fops)
return NULL;
return filp->private_data;
}
static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
#ifdef CONFIG_KVM_MPIC
[KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
[KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
#endif
};
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
{
if (type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENOSPC;
if (kvm_device_ops_table[type] != NULL)
return -EEXIST;
kvm_device_ops_table[type] = ops;
return 0;
}
void kvm_unregister_device_ops(u32 type)
{
if (kvm_device_ops_table[type] != NULL)
kvm_device_ops_table[type] = NULL;
}
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
const struct kvm_device_ops *ops;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int type;
int ret;
if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENODEV;
type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
ops = kvm_device_ops_table[type];
if (ops == NULL)
return -ENODEV;
if (test)
return 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
if (!dev)
return -ENOMEM;
dev->ops = ops;
dev->kvm = kvm;
mutex_lock(&kvm->lock);
ret = ops->create(dev, type);
if (ret < 0) {
mutex_unlock(&kvm->lock);
kfree(dev);
return ret;
}
list_add(&dev->vm_node, &kvm->devices);
mutex_unlock(&kvm->lock);
if (ops->init)
ops->init(dev);
kvm_get_kvm(kvm);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
kvm_put_kvm_no_destroy(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
if (ops->release)
ops->release(dev);
mutex_unlock(&kvm->lock);
if (ops->destroy)
ops->destroy(dev);
return ret;
}
cd->fd = ret;
return 0;
}
static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{
switch (arg) {
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
case KVM_CAP_IRQFD:
#endif
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_HALT_POLL:
return 1;
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
return KVM_COALESCED_MMIO_PAGE_OFFSET;
case KVM_CAP_COALESCED_PIO:
return 1;
#endif
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
return KVM_DIRTY_LOG_MANUAL_CAPS;
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
#if KVM_ADDRESS_SPACE_NUM > 1
case KVM_CAP_MULTI_ADDRESS_SPACE:
return KVM_ADDRESS_SPACE_NUM;
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
case KVM_CAP_DIRTY_LOG_RING:
#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
#else
return 0;
#endif
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
#else
return 0;
#endif
#ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
#endif
case KVM_CAP_BINARY_STATS_FD:
case KVM_CAP_SYSTEM_EVENT_DATA:
return 1;
default:
break;
}
return kvm_vm_ioctl_check_extension(kvm, arg);
}
static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
{
int r;
if (!KVM_DIRTY_LOG_PAGE_OFFSET)
return -EINVAL;
/* the size should be power of 2 */
if (!size || (size & (size - 1)))
return -EINVAL;
/* Should be bigger to keep the reserved entries, or a page */
if (size < kvm_dirty_ring_get_rsvd_entries() *
sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
return -EINVAL;
if (size > KVM_DIRTY_RING_MAX_ENTRIES *
sizeof(struct kvm_dirty_gfn))
return -E2BIG;
/* We only allow it to set once */
if (kvm->dirty_ring_size)
return -EINVAL;
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
/* We don't allow to change this value after vcpu created */
r = -EINVAL;
} else {
kvm->dirty_ring_size = size;
r = 0;
}
mutex_unlock(&kvm->lock);
return r;
}
static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
{
unsigned long i;
struct kvm_vcpu *vcpu;
int cleared = 0;
if (!kvm->dirty_ring_size)
return -EINVAL;
mutex_lock(&kvm->slots_lock);
kvm_for_each_vcpu(i, vcpu, kvm)
cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
mutex_unlock(&kvm->slots_lock);
if (cleared)
kvm_flush_remote_tlbs(kvm);
return cleared;
}
int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
return -EINVAL;
}
bool kvm_are_all_memslots_empty(struct kvm *kvm)
{
int i;
lockdep_assert_held(&kvm->slots_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
switch (cap->cap) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
if (cap->flags || (cap->args[0] & ~allowed_options))
return -EINVAL;
kvm->manual_dirty_log_protect = cap->args[0];
return 0;
}
#endif
case KVM_CAP_HALT_POLL: {
if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
return -EINVAL;
kvm->max_halt_poll_ns = cap->args[0];
/*
* Ensure kvm->override_halt_poll_ns does not become visible
* before kvm->max_halt_poll_ns.
*
* Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
*/
smp_wmb();
kvm->override_halt_poll_ns = true;
return 0;
}
case KVM_CAP_DIRTY_LOG_RING:
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
return -EINVAL;
return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
int r = -EINVAL;
if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
!kvm->dirty_ring_size || cap->flags)
return r;
mutex_lock(&kvm->slots_lock);
/*
* For simplicity, allow enabling ring+bitmap if and only if
* there are no memslots, e.g. to ensure all memslots allocate
* a bitmap after the capability is enabled.
*/
if (kvm_are_all_memslots_empty(kvm)) {
kvm->dirty_ring_with_bitmap = true;
r = 0;
}
mutex_unlock(&kvm->slots_lock);
return r;
}
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
}
}
static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
size_t size, loff_t *offset)
{
struct kvm *kvm = file->private_data;
return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
&kvm_vm_stats_desc[0], &kvm->stat,
sizeof(kvm->stat), user_buffer, size, offset);
}
static int kvm_vm_stats_release(struct inode *inode, struct file *file)
{
struct kvm *kvm = file->private_data;
kvm_put_kvm(kvm);
return 0;
}
static const struct file_operations kvm_vm_stats_fops = {
.read = kvm_vm_stats_read,
.release = kvm_vm_stats_release,
.llseek = noop_llseek,
};
static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
{
int fd;
struct file *file;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
file = anon_inode_getfile("kvm-vm-stats",
&kvm_vm_stats_fops, kvm, O_RDONLY);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
kvm_get_kvm(kvm);
file->f_mode |= FMODE_PREAD;
fd_install(fd, file);
return fd;
}
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU:
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
break;
case KVM_ENABLE_CAP: {
struct kvm_enable_cap cap;
r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
break;
}
case KVM_SET_USER_MEMORY_REGION: {
struct kvm_userspace_memory_region kvm_userspace_mem;
r = -EFAULT;
if (copy_from_user(&kvm_userspace_mem, argp,
sizeof(kvm_userspace_mem)))
goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
break;
}
case KVM_GET_DIRTY_LOG: {
struct kvm_dirty_log log;
r = -EFAULT;
if (copy_from_user(&log, argp, sizeof(log)))
goto out;
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
}
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CLEAR_DIRTY_LOG: {
struct kvm_clear_dirty_log log;
r = -EFAULT;
if (copy_from_user(&log, argp, sizeof(log)))
goto out;
r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
break;
}
#endif
#ifdef CONFIG_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof(zone)))
goto out;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof(zone)))
goto out;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
break;
}
#endif
case KVM_IRQFD: {
struct kvm_irqfd data;
r = -EFAULT;
if (copy_from_user(&data, argp, sizeof(data)))
goto out;
r = kvm_irqfd(kvm, &data);
break;
}
case KVM_IOEVENTFD: {
struct kvm_ioeventfd data;
r = -EFAULT;
if (copy_from_user(&data, argp, sizeof(data)))
goto out;
r = kvm_ioeventfd(kvm, &data);
break;
}
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
struct kvm_msi msi;
r = -EFAULT;
if (copy_from_user(&msi, argp, sizeof(msi)))
goto out;
r = kvm_send_userspace_msi(kvm, &msi);
break;
}
#endif
#ifdef __KVM_HAVE_IRQ_LINE
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
goto out;
r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
ioctl == KVM_IRQ_LINE_STATUS);
if (r)
goto out;
r = -EFAULT;
if (ioctl == KVM_IRQ_LINE_STATUS) {
if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
goto out;
}
r = 0;
break;
}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_SET_GSI_ROUTING: {
struct kvm_irq_routing routing;
struct kvm_irq_routing __user *urouting;
struct kvm_irq_routing_entry *entries = NULL;
r = -EFAULT;
if (copy_from_user(&routing, argp, sizeof(routing)))
goto out;
r = -EINVAL;
if (!kvm_arch_can_set_irq_routing(kvm))
goto out;
if (routing.nr > KVM_MAX_IRQ_ROUTES)
goto out;
if (routing.flags)
goto out;
if (routing.nr) {
urouting = argp;
entries = vmemdup_user(urouting->entries,
array_size(sizeof(*entries),
routing.nr));
if (IS_ERR(entries)) {
r = PTR_ERR(entries);
goto out;
}
}
r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags);
kvfree(entries);
break;
}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
case KVM_CREATE_DEVICE: {
struct kvm_create_device cd;
r = -EFAULT;
if (copy_from_user(&cd, argp, sizeof(cd)))
goto out;
r = kvm_ioctl_create_device(kvm, &cd);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &cd, sizeof(cd)))
goto out;
r = 0;
break;
}
case KVM_CHECK_EXTENSION:
r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
break;
case KVM_RESET_DIRTY_RINGS:
r = kvm_vm_ioctl_reset_dirty_pages(kvm);
break;
case KVM_GET_STATS_FD:
r = kvm_vm_ioctl_get_stats_fd(kvm);
break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
out:
return r;
}
#ifdef CONFIG_KVM_COMPAT
struct compat_kvm_dirty_log {
__u32 slot;
__u32 padding1;
union {
compat_uptr_t dirty_bitmap; /* one bit per page */
__u64 padding2;
};
};
struct compat_kvm_clear_dirty_log {
__u32 slot;
__u32 num_pages;
__u64 first_page;
union {
compat_uptr_t dirty_bitmap; /* one bit per page */
__u64 padding2;
};
};
long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
return -ENOTTY;
}
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
int r;
if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
if (r != -ENOTTY)
return r;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CLEAR_DIRTY_LOG: {
struct compat_kvm_clear_dirty_log compat_log;
struct kvm_clear_dirty_log log;
if (copy_from_user(&compat_log, (void __user *)arg,
sizeof(compat_log)))
return -EFAULT;
log.slot = compat_log.slot;
log.num_pages = compat_log.num_pages;
log.first_page = compat_log.first_page;
log.padding2 = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
break;
}
#endif
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;
if (copy_from_user(&compat_log, (void __user *)arg,
sizeof(compat_log)))
return -EFAULT;
log.slot = compat_log.slot;
log.padding1 = compat_log.padding1;
log.padding2 = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
}
default:
r = kvm_vm_ioctl(filp, ioctl, arg);
}
return r;
}
#endif
static const struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.llseek = noop_llseek,
KVM_COMPAT(kvm_vm_compat_ioctl),
};
bool file_is_kvm(struct file *file)
{
return file && file->f_op == &kvm_vm_fops;
}
EXPORT_SYMBOL_GPL(file_is_kvm);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
char fdname[ITOA_MAX_LEN + 1];
int r, fd;
struct kvm *kvm;
struct file *file;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
snprintf(fdname, sizeof(fdname), "%d", fd);
kvm = kvm_create_vm(type, fdname);
if (IS_ERR(kvm)) {
r = PTR_ERR(kvm);
goto put_fd;
}
file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (IS_ERR(file)) {
r = PTR_ERR(file);
goto put_kvm;
}
/*
* Don't call kvm_put_kvm anymore at this point; file->f_op is
* already set, with ->release() being kvm_vm_release(). In error
* cases it will be called by the final fput(file) and will take
* care of doing kvm_put_kvm(kvm).
*/
kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
fd_install(fd, file);
return fd;
put_kvm:
kvm_put_kvm(kvm);
put_fd:
put_unused_fd(fd);
return r;
}
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
int r = -EINVAL;
switch (ioctl) {
case KVM_GET_API_VERSION:
if (arg)
goto out;
r = KVM_API_VERSION;
break;
case KVM_CREATE_VM:
r = kvm_dev_ioctl_create_vm(arg);
break;
case KVM_CHECK_EXTENSION:
r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
break;
case KVM_GET_VCPU_MMAP_SIZE:
if (arg)
goto out;
r = PAGE_SIZE; /* struct kvm_run */
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
#ifdef CONFIG_KVM_MMIO
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
case KVM_TRACE_ENABLE:
case KVM_TRACE_PAUSE:
case KVM_TRACE_DISABLE:
r = -EOPNOTSUPP;
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
}
out:
return r;
}
static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
KVM_COMPAT(kvm_dev_ioctl),
};
static struct miscdevice kvm_dev = {
KVM_MINOR,
"kvm",
&kvm_chardev_ops,
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
static DEFINE_PER_CPU(bool, hardware_enabled);
static int kvm_usage_count;
static int __hardware_enable_nolock(void)
{
if (__this_cpu_read(hardware_enabled))
return 0;
if (kvm_arch_hardware_enable()) {
pr_info("kvm: enabling virtualization on CPU%d failed\n",
raw_smp_processor_id());
return -EIO;
}
__this_cpu_write(hardware_enabled, true);
return 0;
}
static void hardware_enable_nolock(void *failed)
{
if (__hardware_enable_nolock())
atomic_inc(failed);
}
static int kvm_online_cpu(unsigned int cpu)
{
int ret = 0;
/*
* Abort the CPU online process if hardware virtualization cannot
* be enabled. Otherwise running VMs would encounter unrecoverable
* errors when scheduled to this CPU.
*/
mutex_lock(&kvm_lock);
if (kvm_usage_count)
ret = __hardware_enable_nolock();
mutex_unlock(&kvm_lock);
return ret;
}
static void hardware_disable_nolock(void *junk)
{
/*
* Note, hardware_disable_all_nolock() tells all online CPUs to disable
* hardware, not just CPUs that successfully enabled hardware!
*/
if (!__this_cpu_read(hardware_enabled))
return;
kvm_arch_hardware_disable();
__this_cpu_write(hardware_enabled, false);
}
static int kvm_offline_cpu(unsigned int cpu)
{
mutex_lock(&kvm_lock);
if (kvm_usage_count)
hardware_disable_nolock(NULL);
mutex_unlock(&kvm_lock);
return 0;
}
static void hardware_disable_all_nolock(void)
{
BUG_ON(!kvm_usage_count);
kvm_usage_count--;
if (!kvm_usage_count)
on_each_cpu(hardware_disable_nolock, NULL, 1);
}
static void hardware_disable_all(void)
{
cpus_read_lock();
mutex_lock(&kvm_lock);
hardware_disable_all_nolock();
mutex_unlock(&kvm_lock);
cpus_read_unlock();
}
static int hardware_enable_all(void)
{
atomic_t failed = ATOMIC_INIT(0);
int r;
/*
* Do not enable hardware virtualization if the system is going down.
* If userspace initiated a forced reboot, e.g. reboot -f, then it's
* possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
* after kvm_reboot() is called. Note, this relies on system_state
* being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
* hook instead of registering a dedicated reboot notifier (the latter
* runs before system_state is updated).
*/
if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
system_state == SYSTEM_RESTART)
return -EBUSY;
/*
* When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
* is called, and so on_each_cpu() between them includes the CPU that
* is being onlined. As a result, hardware_enable_nolock() may get
* invoked before kvm_online_cpu(), which also enables hardware if the
* usage count is non-zero. Disable CPU hotplug to avoid attempting to
* enable hardware multiple times.
*/
cpus_read_lock();
mutex_lock(&kvm_lock);
r = 0;
kvm_usage_count++;
if (kvm_usage_count == 1) {
on_each_cpu(hardware_enable_nolock, &failed, 1);
if (atomic_read(&failed)) {
hardware_disable_all_nolock();
r = -EBUSY;
}
}
mutex_unlock(&kvm_lock);
cpus_read_unlock();
return r;
}
static void kvm_shutdown(void)
{
/*
* Disable hardware virtualization and set kvm_rebooting to indicate
* that KVM has asynchronously disabled hardware virtualization, i.e.
* that relevant errors and exceptions aren't entirely unexpected.
* Some flavors of hardware virtualization need to be disabled before
* transferring control to firmware (to perform shutdown/reboot), e.g.
* on x86, virtualization can block INIT interrupts, which are used by
* firmware to pull APs back under firmware control. Note, this path
* is used for both shutdown and reboot scenarios, i.e. neither name is
* 100% comprehensive.
*/
pr_info("kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
on_each_cpu(hardware_disable_nolock, NULL, 1);
}
static int kvm_suspend(void)
{
/*
* Secondary CPUs and CPU hotplug are disabled across the suspend/resume
* callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
* is stable. Assert that kvm_lock is not held to ensure the system
* isn't suspended while KVM is enabling hardware. Hardware enabling
* can be preempted, but the task cannot be frozen until it has dropped
* all locks (userspace tasks are frozen via a fake signal).
*/
lockdep_assert_not_held(&kvm_lock);
lockdep_assert_irqs_disabled();
if (kvm_usage_count)
hardware_disable_nolock(NULL);
return 0;
}
static void kvm_resume(void)
{
lockdep_assert_not_held(&kvm_lock);
lockdep_assert_irqs_disabled();
if (kvm_usage_count)
WARN_ON_ONCE(__hardware_enable_nolock());
}
static struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
.shutdown = kvm_shutdown,
};
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
static int hardware_enable_all(void)
{
return 0;
}
static void hardware_disable_all(void)
{
}
#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
static void kvm_iodevice_destructor(struct kvm_io_device *dev)
{
if (dev->ops->destructor)
dev->ops->destructor(dev);
}
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;
for (i = 0; i < bus->dev_count; i++) {
struct kvm_io_device *pos = bus->range[i].dev;
kvm_iodevice_destructor(pos);
}
kfree(bus);
}
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
const struct kvm_io_range *r2)
{
gpa_t addr1 = r1->addr;
gpa_t addr2 = r2->addr;
if (addr1 < addr2)
return -1;
/* If r2->len == 0, match the exact address. If r2->len != 0,
* accept any overlapping write. Any order is acceptable for
* overlapping ranges, because kvm_io_bus_get_first_dev ensures
* we process all of them.
*/
if (r2->len) {
addr1 += r1->len;
addr2 += r2->len;
}
if (addr1 > addr2)
return 1;
return 0;
}
static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{
return kvm_io_bus_cmp(p1, p2);
}
static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
gpa_t addr, int len)
{
struct kvm_io_range *range, key;
int off;
key = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
range = bsearch(&key, bus->range, bus->dev_count,
sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
if (range == NULL)
return -ENOENT;
off = range - bus->range;
while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
off--;
return off;
}
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
struct kvm_io_range *range, const void *val)
{
int idx;
idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
if (idx < 0)
return -EOPNOTSUPP;
while (idx < bus->dev_count &&
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
range->len, val))
return idx;
idx++;
}
return -EOPNOTSUPP;
}
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
struct kvm_io_bus *bus;
struct kvm_io_range range;
int r;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie)
{
struct kvm_io_bus *bus;
struct kvm_io_range range;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
(kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
val))
return cookie;
/*
* cookie contained garbage; fall back to search and return the
* correct cookie value.
*/
return __kvm_io_bus_write(vcpu, bus, &range, val);
}
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
struct kvm_io_range *range, void *val)
{
int idx;
idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
if (idx < 0)
return -EOPNOTSUPP;
while (idx < bus->dev_count &&
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
range->len, val))
return idx;
idx++;
}
return -EOPNOTSUPP;
}
/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
struct kvm_io_bus *bus;
struct kvm_io_range range;
int r;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{
int i;
struct kvm_io_bus *new_bus, *bus;
struct kvm_io_range range;
bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return -ENOMEM;
/* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
GFP_KERNEL_ACCOUNT);
if (!new_bus)
return -ENOMEM;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
.dev = dev,
};
for (i = 0; i < bus->dev_count; i++)
if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
break;
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count++;
new_bus->range[i] = range;
memcpy(new_bus->range + i + 1, bus->range + i,
(bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
return 0;
}
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
int i;
struct kvm_io_bus *new_bus, *bus;
lockdep_assert_held(&kvm->slots_lock);
bus = kvm_get_bus(kvm, bus_idx);
if (!bus)
return 0;
for (i = 0; i < bus->dev_count; i++) {
if (bus->range[i].dev == dev) {
break;
}
}
if (i == bus->dev_count)
return 0;
new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
GFP_KERNEL_ACCOUNT);
if (new_bus) {
memcpy(new_bus, bus, struct_size(bus, range, i));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
flex_array_size(new_bus, range, new_bus->dev_count - i));
}
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
/*
* If NULL bus is installed, destroy the old bus, including all the
* attached devices. Otherwise, destroy the caller's device only.
*/
if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n");
kvm_io_bus_destroy(bus);
return -ENOMEM;
}
kvm_iodevice_destructor(dev);
kfree(bus);
return 0;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr)
{
struct kvm_io_bus *bus;
int dev_idx, srcu_idx;
struct kvm_io_device *iodev = NULL;
srcu_idx = srcu_read_lock(&kvm->srcu);
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
if (!bus)
goto out_unlock;
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
if (dev_idx < 0)
goto out_unlock;
iodev = bus->range[dev_idx].dev;
out_unlock:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return iodev;
}
EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)
{
int ret;
struct kvm_stat_data *stat_data = inode->i_private;
/*
* The debugfs files are a reference to the kvm struct which
* is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
* avoids the race between open and the removal of the debugfs directory.
*/
if (!kvm_get_kvm_safe(stat_data->kvm))
return -ENOENT;
ret = simple_attr_open(inode, file, get,
kvm_stats_debugfs_mode(stat_data->desc) & 0222
? set : NULL, fmt);
if (ret)
kvm_put_kvm(stat_data->kvm);
return ret;
}
static int kvm_debugfs_release(struct inode *inode, struct file *file)
{
struct kvm_stat_data *stat_data = inode->i_private;
simple_attr_release(inode, file);
kvm_put_kvm(stat_data->kvm);
return 0;
}
static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
{
*val = *(u64 *)((void *)(&kvm->stat) + offset);
return 0;
}
static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
{
*(u64 *)((void *)(&kvm->stat) + offset) = 0;
return 0;
}
static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
{
unsigned long i;
struct kvm_vcpu *vcpu;
*val = 0;
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u64 *)((void *)(&vcpu->stat) + offset);
return 0;
}
static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
{
unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
return 0;
}
static int kvm_stat_data_get(void *data, u64 *val)
{
int r = -EFAULT;
struct kvm_stat_data *stat_data = data;
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_get_stat_per_vm(stat_data->kvm,
stat_data->desc->desc.offset, val);
break;
case KVM_STAT_VCPU:
r = kvm_get_stat_per_vcpu(stat_data->kvm,
stat_data->desc->desc.offset, val);
break;
}
return r;
}
static int kvm_stat_data_clear(void *data, u64 val)
{
int r = -EFAULT;
struct kvm_stat_data *stat_data = data;
if (val)
return -EINVAL;
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_clear_stat_per_vm(stat_data->kvm,
stat_data->desc->desc.offset);
break;
case KVM_STAT_VCPU:
r = kvm_clear_stat_per_vcpu(stat_data->kvm,
stat_data->desc->desc.offset);
break;
}
return r;
}
static int kvm_stat_data_open(struct inode *inode, struct file *file)
{
__simple_attr_check_format("%llu\n", 0ull);
return kvm_debugfs_open(inode, file, kvm_stat_data_get,
kvm_stat_data_clear, "%llu\n");
}
static const struct file_operations stat_fops_per_vm = {
.owner = THIS_MODULE,
.open = kvm_stat_data_open,
.release = kvm_debugfs_release,
.read = simple_attr_read,
.write = simple_attr_write,
.llseek = no_llseek,
};
static int vm_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
u64 tmp_val;
*val = 0;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_get_stat_per_vm(kvm, offset, &tmp_val);
*val += tmp_val;
}
mutex_unlock(&kvm_lock);
return 0;
}
static int vm_stat_clear(void *_offset, u64 val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
if (val)
return -EINVAL;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_clear_stat_per_vm(kvm, offset);
}
mutex_unlock(&kvm_lock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
static int vcpu_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
u64 tmp_val;
*val = 0;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
*val += tmp_val;
}
mutex_unlock(&kvm_lock);
return 0;
}
static int vcpu_stat_clear(void *_offset, u64 val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
if (val)
return -EINVAL;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_clear_stat_per_vcpu(kvm, offset);
}
mutex_unlock(&kvm_lock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
"%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
{
struct kobj_uevent_env *env;
unsigned long long created, active;
if (!kvm_dev.this_device || !kvm)
return;
mutex_lock(&kvm_lock);
if (type == KVM_EVENT_CREATE_VM) {
kvm_createvm_count++;
kvm_active_vms++;
} else if (type == KVM_EVENT_DESTROY_VM) {
kvm_active_vms--;
}
created = kvm_createvm_count;
active = kvm_active_vms;
mutex_unlock(&kvm_lock);
env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
if (!env)
return;
add_uevent_var(env, "CREATED=%llu", created);
add_uevent_var(env, "COUNT=%llu", active);
if (type == KVM_EVENT_CREATE_VM) {
add_uevent_var(env, "EVENT=create");
kvm->userspace_pid = task_pid_nr(current);
} else if (type == KVM_EVENT_DESTROY_VM) {
add_uevent_var(env, "EVENT=destroy");
}
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (!IS_ERR(kvm->debugfs_dentry)) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
if (!IS_ERR(tmp))
add_uevent_var(env, "STATS_PATH=%s", tmp);
kfree(p);
}
}
/* no need for checks, since we are adding at most only 5 keys */
env->envp[env->envp_idx++] = NULL;
kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
kfree(env);
}
static void kvm_init_debug(void)
{
const struct file_operations *fops;
const struct _kvm_stats_desc *pdesc;
int i;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
pdesc = &kvm_vm_stats_desc[i];
if (kvm_stats_debugfs_mode(pdesc) & 0222)
fops = &vm_stat_fops;
else
fops = &vm_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
(void *)(long)pdesc->desc.offset, fops);
}
for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
pdesc = &kvm_vcpu_stats_desc[i];
if (kvm_stats_debugfs_mode(pdesc) & 0222)
fops = &vcpu_stat_fops;
else
fops = &vcpu_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
(void *)(long)pdesc->desc.offset, fops);
}
}
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
return container_of(pn, struct kvm_vcpu, preempt_notifier);
}
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
WRITE_ONCE(vcpu->preempted, false);
WRITE_ONCE(vcpu->ready, false);
__this_cpu_write(kvm_running_vcpu, vcpu);
kvm_arch_sched_in(vcpu, cpu);
kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
struct task_struct *next)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
if (current->on_rq) {
WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
kvm_arch_vcpu_put(vcpu);
__this_cpu_write(kvm_running_vcpu, NULL);
}
/**
* kvm_get_running_vcpu - get the vcpu running on the current CPU.
*
* We can disable preemption locally around accessing the per-CPU variable,
* and use the resolved vcpu pointer after enabling preemption again,
* because even if the current thread is migrated to another CPU, reading
* the per-CPU value later will give us the same value as we update the
* per-CPU variable in the preempt notifier handlers.
*/
struct kvm_vcpu *kvm_get_running_vcpu(void)
{
struct kvm_vcpu *vcpu;
preempt_disable();
vcpu = __this_cpu_read(kvm_running_vcpu);
preempt_enable();
return vcpu;
}
EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
/**
* kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
*/
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
{
return &kvm_running_vcpu;
}
#ifdef CONFIG_GUEST_PERF_EVENTS
static unsigned int kvm_guest_state(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
unsigned int state;
if (!kvm_arch_pmi_in_guest(vcpu))
return 0;
state = PERF_GUEST_ACTIVE;
if (!kvm_arch_vcpu_in_kernel(vcpu))
state |= PERF_GUEST_USER;
return state;
}
static unsigned long kvm_guest_get_ip(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
return 0;
return kvm_arch_vcpu_get_ip(vcpu);
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.state = kvm_guest_state,
.get_ip = kvm_guest_get_ip,
.handle_intel_pt_intr = NULL,
};
void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
{
kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
void kvm_unregister_perf_callbacks(void)
{
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
}
#endif
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
{
int r;
int cpu;
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
kvm_online_cpu, kvm_offline_cpu);
if (r)
return r;
register_syscore_ops(&kvm_syscore_ops);
#endif
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
kvm_vcpu_cache =
kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
SLAB_ACCOUNT,
offsetof(struct kvm_vcpu, arch),
offsetofend(struct kvm_vcpu, stats_id)
- offsetof(struct kvm_vcpu, arch),
NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto err_vcpu_cache;
}
for_each_possible_cpu(cpu) {
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu))) {
r = -ENOMEM;
goto err_cpu_kick_mask;
}
}
r = kvm_irqfd_init();
if (r)
goto err_irqfd;
r = kvm_async_pf_init();
if (r)
goto err_async_pf;
kvm_chardev_ops.owner = module;
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
kvm_init_debug();
r = kvm_vfio_ops_init();
if (WARN_ON_ONCE(r))
goto err_vfio;
/*
* Registration _must_ be the very last thing done, as this exposes
* /dev/kvm to userspace, i.e. all infrastructure must be setup!
*/
r = misc_register(&kvm_dev);
if (r) {
pr_err("kvm: misc device register failed\n");
goto err_register;
}
return 0;
err_register:
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
err_async_pf:
kvm_irqfd_exit();
err_irqfd:
err_cpu_kick_mask:
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
err_vcpu_cache:
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
unregister_syscore_ops(&kvm_syscore_ops);
cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
#endif
return r;
}
EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
int cpu;
/*
* Note, unregistering /dev/kvm doesn't strictly need to come first,
* fops_get(), a.k.a. try_module_get(), prevents acquiring references
* to KVM while the module is being stopped.
*/
misc_deregister(&kvm_dev);
debugfs_remove_recursive(kvm_debugfs_dir);
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
unregister_syscore_ops(&kvm_syscore_ops);
cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
#endif
kvm_irqfd_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
struct kvm_vm_worker_thread_context {
struct kvm *kvm;
struct task_struct *parent;
struct completion init_done;
kvm_vm_thread_fn_t thread_fn;
uintptr_t data;
int err;
};
static int kvm_vm_worker_thread(void *context)
{
/*
* The init_context is allocated on the stack of the parent thread, so
* we have to locally copy anything that is needed beyond initialization
*/
struct kvm_vm_worker_thread_context *init_context = context;
struct task_struct *parent;
struct kvm *kvm = init_context->kvm;
kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
uintptr_t data = init_context->data;
int err;
err = kthread_park(current);
/* kthread_park(current) is never supposed to return an error */
WARN_ON(err != 0);
if (err)
goto init_complete;
err = cgroup_attach_task_all(init_context->parent, current);
if (err) {
kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
__func__, err);
goto init_complete;
}
set_user_nice(current, task_nice(init_context->parent));
init_complete:
init_context->err = err;
complete(&init_context->init_done);
init_context = NULL;
if (err)
goto out;
/* Wait to be woken up by the spawner before proceeding. */
kthread_parkme();
if (!kthread_should_stop())
err = thread_fn(kvm, data);
out:
/*
* Move kthread back to its original cgroup to prevent it lingering in
* the cgroup of the VM process, after the latter finishes its
* execution.
*
* kthread_stop() waits on the 'exited' completion condition which is
* set in exit_mm(), via mm_release(), in do_exit(). However, the
* kthread is removed from the cgroup in the cgroup_exit() which is
* called after the exit_mm(). This causes the kthread_stop() to return
* before the kthread actually quits the cgroup.
*/
rcu_read_lock();
parent = rcu_dereference(current->real_parent);
get_task_struct(parent);
rcu_read_unlock();
cgroup_attach_task_all(parent, current);
put_task_struct(parent);
return err;
}
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
uintptr_t data, const char *name,
struct task_struct **thread_ptr)
{
struct kvm_vm_worker_thread_context init_context = {};
struct task_struct *thread;
*thread_ptr = NULL;
init_context.kvm = kvm;
init_context.parent = current;
init_context.thread_fn = thread_fn;
init_context.data = data;
init_completion(&init_context.init_done);
thread = kthread_run(kvm_vm_worker_thread, &init_context,
"%s-%d", name, task_pid_nr(current));
if (IS_ERR(thread))
return PTR_ERR(thread);
/* kthread_run is never supposed to return NULL */
WARN_ON(thread == NULL);
wait_for_completion(&init_context.init_done);
if (!init_context.err)
*thread_ptr = thread;
return init_context.err;
}
| linux-master | virt/kvm/kvm_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO-KVM bridge pseudo device
*
* Copyright (C) 2013 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*/
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include "vfio.h"
#ifdef CONFIG_SPAPR_TCE_IOMMU
#include <asm/kvm_ppc.h>
#endif
struct kvm_vfio_file {
struct list_head node;
struct file *file;
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct iommu_group *iommu_group;
#endif
};
struct kvm_vfio {
struct list_head file_list;
struct mutex lock;
bool noncoherent;
};
static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
void (*fn)(struct file *file, struct kvm *kvm);
fn = symbol_get(vfio_file_set_kvm);
if (!fn)
return;
fn(file, kvm);
symbol_put(vfio_file_set_kvm);
}
static bool kvm_vfio_file_enforced_coherent(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
fn = symbol_get(vfio_file_enforced_coherent);
if (!fn)
return false;
ret = fn(file);
symbol_put(vfio_file_enforced_coherent);
return ret;
}
static bool kvm_vfio_file_is_valid(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
fn = symbol_get(vfio_file_is_valid);
if (!fn)
return false;
ret = fn(file);
symbol_put(vfio_file_is_valid);
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
{
struct iommu_group *(*fn)(struct file *file);
struct iommu_group *ret;
fn = symbol_get(vfio_file_iommu_group);
if (!fn)
return NULL;
ret = fn(file);
symbol_put(vfio_file_iommu_group);
return ret;
}
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
struct kvm_vfio_file *kvf)
{
if (WARN_ON_ONCE(!kvf->iommu_group))
return;
kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
iommu_group_put(kvf->iommu_group);
kvf->iommu_group = NULL;
}
#endif
/*
* Groups/devices can use the same or different IOMMU domains. If the same
* then adding a new group/device may change the coherency of groups/devices
* we've previously been told about. We don't want to care about any of
* that so we retest each group/device and bail as soon as we find one that's
* noncoherent. This means we only ever [un]register_noncoherent_dma once
* for the whole device.
*/
static void kvm_vfio_update_coherency(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
bool noncoherent = false;
struct kvm_vfio_file *kvf;
list_for_each_entry(kvf, &kv->file_list, node) {
if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
noncoherent = true;
break;
}
}
if (noncoherent != kv->noncoherent) {
kv->noncoherent = noncoherent;
if (kv->noncoherent)
kvm_arch_register_noncoherent_dma(dev->kvm);
else
kvm_arch_unregister_noncoherent_dma(dev->kvm);
}
}
static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct file *filp;
int ret = 0;
filp = fget(fd);
if (!filp)
return -EBADF;
/* Ensure the FD is a vfio FD. */
if (!kvm_vfio_file_is_valid(filp)) {
ret = -EINVAL;
goto out_fput;
}
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file == filp) {
ret = -EEXIST;
goto out_unlock;
}
}
kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
if (!kvf) {
ret = -ENOMEM;
goto out_unlock;
}
kvf->file = get_file(filp);
list_add_tail(&kvf->node, &kv->file_list);
kvm_arch_start_assignment(dev->kvm);
kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
out_unlock:
mutex_unlock(&kv->lock);
out_fput:
fput(filp);
return ret;
}
static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct fd f;
int ret;
f = fdget(fd);
if (!f.file)
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file != f.file)
continue;
list_del(&kvf->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
kvm_vfio_file_set_kvm(kvf->file, NULL);
fput(kvf->file);
kfree(kvf);
ret = 0;
break;
}
kvm_vfio_update_coherency(dev);
mutex_unlock(&kv->lock);
fdput(f);
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
void __user *arg)
{
struct kvm_vfio_spapr_tce param;
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct fd f;
int ret;
if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce)))
return -EFAULT;
f = fdget(param.groupfd);
if (!f.file)
return -EBADF;
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
if (kvf->file != f.file)
continue;
if (!kvf->iommu_group) {
kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
if (WARN_ON_ONCE(!kvf->iommu_group)) {
ret = -EIO;
goto err_fdput;
}
}
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
kvf->iommu_group);
break;
}
err_fdput:
mutex_unlock(&kv->lock);
fdput(f);
return ret;
}
#endif
static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
void __user *arg)
{
int32_t __user *argp = arg;
int32_t fd;
switch (attr) {
case KVM_DEV_VFIO_FILE_ADD:
if (get_user(fd, argp))
return -EFAULT;
return kvm_vfio_file_add(dev, fd);
case KVM_DEV_VFIO_FILE_DEL:
if (get_user(fd, argp))
return -EFAULT;
return kvm_vfio_file_del(dev, fd);
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
return kvm_vfio_file_set_spapr_tce(dev, arg);
#endif
}
return -ENXIO;
}
static int kvm_vfio_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_VFIO_FILE:
return kvm_vfio_set_file(dev, attr->attr,
u64_to_user_ptr(attr->addr));
}
return -ENXIO;
}
static int kvm_vfio_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_VFIO_FILE:
switch (attr->attr) {
case KVM_DEV_VFIO_FILE_ADD:
case KVM_DEV_VFIO_FILE_DEL:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
#endif
return 0;
}
break;
}
return -ENXIO;
}
static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf, *tmp;
list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
kvm_vfio_file_set_kvm(kvf->file, NULL);
fput(kvf->file);
list_del(&kvf->node);
kfree(kvf);
kvm_arch_end_assignment(dev->kvm);
}
kvm_vfio_update_coherency(dev);
kfree(kv);
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
}
static int kvm_vfio_create(struct kvm_device *dev, u32 type);
static struct kvm_device_ops kvm_vfio_ops = {
.name = "kvm-vfio",
.create = kvm_vfio_create,
.release = kvm_vfio_release,
.set_attr = kvm_vfio_set_attr,
.has_attr = kvm_vfio_has_attr,
};
static int kvm_vfio_create(struct kvm_device *dev, u32 type)
{
struct kvm_device *tmp;
struct kvm_vfio *kv;
/* Only one VFIO "device" per VM */
list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
if (tmp->ops == &kvm_vfio_ops)
return -EBUSY;
kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
if (!kv)
return -ENOMEM;
INIT_LIST_HEAD(&kv->file_list);
mutex_init(&kv->lock);
dev->private = kv;
return 0;
}
int kvm_vfio_ops_init(void)
{
return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
}
void kvm_vfio_ops_exit(void)
{
kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
}
| linux-master | virt/kvm/vfio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables kernel and guest-mode vCPU access to guest physical
* memory with suitable invalidation mechanisms.
*
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
*
* Authors:
* David Woodhouse <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/errno.h>
#include "kvm_mm.h"
/*
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
unsigned long end, bool may_block)
{
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
write_lock_irq(&gpc->lock);
/* Only a single page so no need to care about length */
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
gpc->valid = false;
/*
* If a guest vCPU could be using the physical address,
* it needs to be forced out of guest mode.
*/
if (gpc->usage & KVM_GUEST_USES_PFN) {
if (!evict_vcpus) {
evict_vcpus = true;
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
}
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
}
}
write_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
if (evict_vcpus) {
/*
* KVM needs to ensure the vCPU is fully out of guest context
* before allowing the invalidation to continue.
*/
unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
bool called;
/*
* If the OOM reaper is active, then all vCPUs should have
* been stopped already, so perform the request without
* KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
*/
if (!may_block)
req &= ~KVM_REQUEST_WAIT;
called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
WARN_ON_ONCE(called && !may_block);
}
}
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
if (!gpc->active)
return false;
if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false;
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
return false;
if (!gpc->valid)
return false;
return true;
}
EXPORT_SYMBOL_GPL(kvm_gpc_check);
static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
{
/* Unmap the old pfn/page if it was mapped before. */
if (!is_error_noslot_pfn(pfn) && khva) {
if (pfn_valid(pfn))
kunmap(pfn_to_page(pfn));
#ifdef CONFIG_HAS_IOMEM
else
memunmap(khva);
#endif
}
}
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
{
/*
* mn_active_invalidate_count acts for all intents and purposes
* like mmu_invalidate_in_progress here; but the latter cannot
* be used here because the invalidation of caches in the
* mmu_notifier event occurs _before_ mmu_invalidate_in_progress
* is elevated.
*
* Note, it does not matter that mn_active_invalidate_count
* is not protected by gpc->lock. It is guaranteed to
* be elevated before the mmu_notifier acquires gpc->lock, and
* isn't dropped until after mmu_invalidate_seq is updated.
*/
if (kvm->mn_active_invalidate_count)
return true;
/*
* Ensure mn_active_invalidate_count is read before
* mmu_invalidate_seq. This pairs with the smp_wmb() in
* mmu_notifier_invalidate_range_end() to guarantee either the
* old (non-zero) value of mn_active_invalidate_count or the
* new (incremented) value of mmu_invalidate_seq is observed.
*/
smp_rmb();
return kvm->mmu_invalidate_seq != mmu_seq;
}
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
void *new_khva = NULL;
unsigned long mmu_seq;
lockdep_assert_held(&gpc->refresh_lock);
lockdep_assert_held_write(&gpc->lock);
/*
* Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
* assets have already been updated and so a concurrent check() from a
* different task may not fail the gpa/uhva/generation checks.
*/
gpc->valid = false;
do {
mmu_seq = gpc->kvm->mmu_invalidate_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);
/*
* If the previous iteration "failed" due to an mmu_notifier
* event, release the pfn and unmap the kernel virtual address
* from the previous attempt. Unmapping might sleep, so this
* needs to be done after dropping the lock. Opportunistically
* check for resched while the lock isn't held.
*/
if (new_pfn != KVM_PFN_ERR_FAULT) {
/*
* Keep the mapping if the previous iteration reused
* the existing mapping and didn't create a new one.
*/
if (new_khva != old_khva)
gpc_unmap_khva(new_pfn, new_khva);
kvm_release_pfn_clean(new_pfn);
cond_resched();
}
/* We always request a writeable mapping */
new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
if (is_error_noslot_pfn(new_pfn))
goto out_error;
/*
* Obtain a new kernel mapping if KVM itself will access the
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
if (gpc->usage & KVM_HOST_USES_PFN) {
if (new_pfn == gpc->pfn) {
new_khva = old_khva;
} else if (pfn_valid(new_pfn)) {
new_khva = kmap(pfn_to_page(new_pfn));
#ifdef CONFIG_HAS_IOMEM
} else {
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
if (!new_khva) {
kvm_release_pfn_clean(new_pfn);
goto out_error;
}
}
write_lock_irq(&gpc->lock);
/*
* Other tasks must wait for _this_ refresh to complete before
* attempting to refresh.
*/
WARN_ON_ONCE(gpc->valid);
} while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
gpc->valid = true;
gpc->pfn = new_pfn;
gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
/*
* Put the reference to the _new_ pfn. The pfn is now tracked by the
* cache and can be safely migrated, swapped, etc... as the cache will
* invalidate any mappings in response to relevant mmu_notifier events.
*/
kvm_release_pfn_clean(new_pfn);
return 0;
out_error:
write_lock_irq(&gpc->lock);
return -EFAULT;
}
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
void *old_khva;
int ret;
/*
* If must fit within a single page. The 'len' argument is
* only to enforce that.
*/
if (page_offset + len > PAGE_SIZE)
return -EINVAL;
/*
* If another task is refreshing the cache, wait for it to complete.
* There is no guarantee that concurrent refreshes will see the same
* gpa, memslots generation, etc..., so they must be fully serialized.
*/
mutex_lock(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
if (!gpc->active) {
ret = -EINVAL;
goto out_unlock;
}
old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
/* If the userspace HVA is invalid, refresh that first */
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
gpc->gpa = gpa;
gpc->generation = slots->generation;
gpc->memslot = __gfn_to_memslot(slots, gfn);
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
if (kvm_is_error_hva(gpc->uhva)) {
ret = -EFAULT;
goto out;
}
}
/*
* If the userspace HVA changed or the PFN was already invalid,
* drop the lock and do the HVA to PFN lookup again.
*/
if (!gpc->valid || old_uhva != gpc->uhva) {
ret = hva_to_pfn_retry(gpc);
} else {
/*
* If the HVA→PFN mapping was already valid, don't unmap it.
* But do update gpc->khva because the offset within the page
* may have changed.
*/
gpc->khva = old_khva + page_offset;
ret = 0;
goto out_unlock;
}
out:
/*
* Invalidate the cache and purge the pfn/khva if the refresh failed.
* Some/all of the uhva, gpa, and memslot generation info may still be
* valid, leave it as is.
*/
if (ret) {
gpc->valid = false;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->khva = NULL;
}
/* Detect a pfn change before dropping the lock! */
unmap_old = (old_pfn != gpc->pfn);
out_unlock:
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
if (unmap_old)
gpc_unmap_khva(old_pfn, old_khva);
return ret;
}
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
{
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
}
EXPORT_SYMBOL_GPL(kvm_gpc_init);
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
struct kvm *kvm = gpc->kvm;
if (!gpc->active) {
if (KVM_BUG_ON(gpc->valid, kvm))
return -EIO;
spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
/*
* Activate the cache after adding it to the list, a concurrent
* refresh must not establish a mapping until the cache is
* reachable by mmu_notifier events.
*/
write_lock_irq(&gpc->lock);
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
return __kvm_gpc_refresh(gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
struct kvm *kvm = gpc->kvm;
kvm_pfn_t old_pfn;
void *old_khva;
if (gpc->active) {
/*
* Deactivate the cache before removing it from the list, KVM
* must stall mmu_notifier events until all users go away, i.e.
* until gpc->lock is dropped and refresh is guaranteed to fail.
*/
write_lock_irq(&gpc->lock);
gpc->active = false;
gpc->valid = false;
/*
* Leave the GPA => uHVA cache intact, it's protected by the
* memslot generation. The PFN lookup needs to be redone every
* time as mmu_notifier protection is lost when the cache is
* removed from the VM's gpc_list.
*/
old_khva = gpc->khva - offset_in_page(gpc->khva);
gpc->khva = NULL;
old_pfn = gpc->pfn;
gpc->pfn = KVM_PFN_ERR_FAULT;
write_unlock_irq(&gpc->lock);
spin_lock(&kvm->gpc_lock);
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
gpc_unmap_khva(old_pfn, old_khva);
}
}
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
| linux-master | virt/kvm/pfncache.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* irqchip.c: Common API for in kernel interrupt controllers
* Copyright (c) 2007, Intel Corporation.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
* Copyright (c) 2013, Alexander Graf <[email protected]>
*
* This file is derived from virt/kvm/irq_comm.c.
*
* Authors:
* Yaozu (Eddie) Dong <[email protected]>
* Alexander Graf <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/export.h>
#include <trace/events/kvm.h>
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi)
{
struct kvm_irq_routing_table *irq_rt;
struct kvm_kernel_irq_routing_entry *e;
int n = 0;
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
lockdep_is_held(&kvm->irq_lock));
if (irq_rt && gsi < irq_rt->nr_rt_entries) {
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
entries[n] = *e;
++n;
}
}
return n;
}
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
struct kvm_irq_routing_table *irq_rt;
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
return irq_rt->chip[irqchip][pin];
}
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
{
struct kvm_kernel_irq_routing_entry route;
if (!kvm_arch_irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
return -EINVAL;
route.msi.address_lo = msi->address_lo;
route.msi.address_hi = msi->address_hi;
route.msi.data = msi->data;
route.msi.flags = msi->flags;
route.msi.devid = msi->devid;
return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
}
/*
* Return value:
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
* = 0 Interrupt was coalesced (previous irq is still pending)
* > 0 Number of CPUs interrupt was delivered to
*/
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status)
{
struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
int ret = -1, i, idx;
trace_kvm_set_irq(irq, level, irq_source_id);
/* Not possible to detect if the guest uses the PIC or the
* IOAPIC. So set the bit in both. The guest will ignore
* writes to the unused one.
*/
idx = srcu_read_lock(&kvm->irq_srcu);
i = kvm_irq_map_gsi(kvm, irq_set, irq);
srcu_read_unlock(&kvm->irq_srcu, idx);
while (i--) {
int r;
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
line_status);
if (r < 0)
continue;
ret = r + ((ret < 0) ? 0 : ret);
}
return ret;
}
static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
{
int i;
if (!rt)
return;
for (i = 0; i < rt->nr_rt_entries; ++i) {
struct kvm_kernel_irq_routing_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
hlist_del(&e->link);
kfree(e);
}
}
kfree(rt);
}
void kvm_free_irq_routing(struct kvm *kvm)
{
/* Called only during vm destruction. Nobody can use the pointer
at this stage */
struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
free_irq_routing_table(rt);
}
static int setup_routing_entry(struct kvm *kvm,
struct kvm_irq_routing_table *rt,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
struct kvm_kernel_irq_routing_entry *ei;
int r;
u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
* Allow only one to one mapping between GSI and non-irqchip routing.
*/
hlist_for_each_entry(ei, &rt->map[gsi], link)
if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
return -EINVAL;
e->gsi = gsi;
e->type = ue->type;
r = kvm_set_routing_entry(kvm, e, ue);
if (r)
return r;
if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
hlist_add_head(&e->link, &rt->map[e->gsi]);
return 0;
}
void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
{
}
bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
{
return true;
}
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *ue,
unsigned nr,
unsigned flags)
{
struct kvm_irq_routing_table *new, *old;
struct kvm_kernel_irq_routing_entry *e;
u32 i, j, nr_rt_entries = 0;
int r;
for (i = 0; i < nr; ++i) {
if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
return -EINVAL;
nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
}
nr_rt_entries += 1;
new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT);
if (!new)
return -ENOMEM;
new->nr_rt_entries = nr_rt_entries;
for (i = 0; i < KVM_NR_IRQCHIPS; i++)
for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
new->chip[i][j] = -1;
for (i = 0; i < nr; ++i) {
r = -ENOMEM;
e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
if (!e)
goto out;
r = -EINVAL;
switch (ue->type) {
case KVM_IRQ_ROUTING_MSI:
if (ue->flags & ~KVM_MSI_VALID_DEVID)
goto free_entry;
break;
default:
if (ue->flags)
goto free_entry;
break;
}
r = setup_routing_entry(kvm, new, e, ue);
if (r)
goto free_entry;
++ue;
}
mutex_lock(&kvm->irq_lock);
old = rcu_dereference_protected(kvm->irq_routing, 1);
rcu_assign_pointer(kvm->irq_routing, new);
kvm_irq_routing_update(kvm);
kvm_arch_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock);
kvm_arch_post_irq_routing_update(kvm);
synchronize_srcu_expedited(&kvm->irq_srcu);
new = old;
r = 0;
goto out;
free_entry:
kfree(e);
out:
free_irq_routing_table(new);
return r;
}
| linux-master | virt/kvm/irqchip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kvm eventfd support - use eventfd objects to signal various KVM events
*
* Copyright 2009 Novell. All Rights Reserved.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Author:
* Gregory Haskins <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
#include <linux/kernel.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/iodev.h>
#ifdef CONFIG_HAVE_KVM_IRQFD
static struct workqueue_struct *irqfd_cleanup_wq;
bool __attribute__((weak))
kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
{
return true;
}
static void
irqfd_inject(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, inject);
struct kvm *kvm = irqfd->kvm;
if (!irqfd->resampler) {
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
false);
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
false);
} else
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
irqfd->gsi, 1, false);
}
static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
{
struct kvm_kernel_irqfd *irqfd;
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
srcu_read_lock_held(&resampler->kvm->irq_srcu))
eventfd_signal(irqfd->resamplefd, 1);
}
/*
* Since resampler irqfds share an IRQ source ID, we de-assert once
* then notify all of the resampler irqfds using this GSI. We can't
* do multiple de-asserts or we risk racing with incoming re-asserts.
*/
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kernel_irqfd_resampler *resampler;
struct kvm *kvm;
int idx;
resampler = container_of(kian,
struct kvm_kernel_irqfd_resampler, notifier);
kvm = resampler->kvm;
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_resampler_notify(resampler);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
static void
irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
struct kvm *kvm = resampler->kvm;
mutex_lock(&kvm->irqfds.resampler_lock);
list_del_rcu(&irqfd->resampler_link);
synchronize_srcu(&kvm->irq_srcu);
if (list_empty(&resampler->list)) {
list_del_rcu(&resampler->link);
kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
/*
* synchronize_srcu(&kvm->irq_srcu) already called
* in kvm_unregister_irq_ack_notifier().
*/
kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
resampler->notifier.gsi, 0, false);
kfree(resampler);
}
mutex_unlock(&kvm->irqfds.resampler_lock);
}
/*
* Race-free decouple logic (ordering is critical)
*/
static void
irqfd_shutdown(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, shutdown);
struct kvm *kvm = irqfd->kvm;
u64 cnt;
/* Make sure irqfd has been initialized in assign path. */
synchronize_srcu(&kvm->irq_srcu);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
*/
eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
/*
* We know no new events will be scheduled at this point, so block
* until all previously outstanding events have completed
*/
flush_work(&irqfd->inject);
if (irqfd->resampler) {
irqfd_resampler_shutdown(irqfd);
eventfd_ctx_put(irqfd->resamplefd);
}
/*
* It is now safe to release the object's resources
*/
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
irq_bypass_unregister_consumer(&irqfd->consumer);
#endif
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
/* assumes kvm->irqfds.lock is held */
static bool
irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
{
return list_empty(&irqfd->list) ? false : true;
}
/*
* Mark the irqfd as inactive and schedule it for removal
*
* assumes kvm->irqfds.lock is held
*/
static void
irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
{
BUG_ON(!irqfd_is_active(irqfd));
list_del_init(&irqfd->list);
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
struct kvm_kernel_irq_routing_entry *irq,
struct kvm *kvm, int irq_source_id,
int level,
bool line_status)
{
return -EWOULDBLOCK;
}
/*
* Called with wqh->lock held and interrupts disabled
*/
static int
irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
__poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
int idx;
int ret = 0;
if (flags & EPOLLIN) {
u64 cnt;
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
idx = srcu_read_lock(&kvm->irq_srcu);
do {
seq = read_seqcount_begin(&irqfd->irq_entry_sc);
irq = irqfd->irq_entry;
} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
/* An event has been signaled, inject an interrupt */
if (kvm_arch_set_irq_inatomic(&irq, kvm,
KVM_USERSPACE_IRQ_SOURCE_ID, 1,
false) == -EWOULDBLOCK)
schedule_work(&irqfd->inject);
srcu_read_unlock(&kvm->irq_srcu, idx);
ret = 1;
}
if (flags & EPOLLHUP) {
/* The eventfd is closing, detach from KVM */
unsigned long iflags;
spin_lock_irqsave(&kvm->irqfds.lock, iflags);
/*
* We must check if someone deactivated the irqfd before
* we could acquire the irqfds.lock since the item is
* deactivated from the KVM side before it is unhooked from
* the wait-queue. If it is already deactivated, we can
* simply return knowing the other side will cleanup for us.
* We cannot race against the irqfd going away since the
* other side is required to acquire wqh->lock, which we hold
*/
if (irqfd_is_active(irqfd))
irqfd_deactivate(irqfd);
spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
}
return ret;
}
static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct kvm_kernel_irqfd *irqfd =
container_of(pt, struct kvm_kernel_irqfd, pt);
add_wait_queue_priority(wqh, &irqfd->wait);
}
/* Must be called under irqfds.lock */
static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
int n_entries;
n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc);
e = entries;
if (n_entries == 1)
irqfd->irq_entry = *e;
else
irqfd->irq_entry.type = 0;
write_seqcount_end(&irqfd->irq_entry_sc);
}
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
}
void __attribute__((weak)) kvm_arch_irq_bypass_start(
struct irq_bypass_consumer *cons)
{
}
int __attribute__((weak)) kvm_arch_update_irqfd_routing(
struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{
return 0;
}
bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
return true;
}
#endif
static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct kvm_kernel_irqfd *irqfd, *tmp;
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
__poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
return -EAGAIN;
if (!kvm_arch_irqfd_allowed(kvm, args))
return -EINVAL;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
if (!irqfd)
return -ENOMEM;
irqfd->kvm = kvm;
irqfd->gsi = args->gsi;
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->inject, irqfd_inject);
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
f = fdget(args->fd);
if (!f.file) {
ret = -EBADF;
goto out;
}
eventfd = eventfd_ctx_fileget(f.file);
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
}
irqfd->eventfd = eventfd;
if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
struct kvm_kernel_irqfd_resampler *resampler;
resamplefd = eventfd_ctx_fdget(args->resamplefd);
if (IS_ERR(resamplefd)) {
ret = PTR_ERR(resamplefd);
goto fail;
}
irqfd->resamplefd = resamplefd;
INIT_LIST_HEAD(&irqfd->resampler_link);
mutex_lock(&kvm->irqfds.resampler_lock);
list_for_each_entry(resampler,
&kvm->irqfds.resampler_list, link) {
if (resampler->notifier.gsi == irqfd->gsi) {
irqfd->resampler = resampler;
break;
}
}
if (!irqfd->resampler) {
resampler = kzalloc(sizeof(*resampler),
GFP_KERNEL_ACCOUNT);
if (!resampler) {
ret = -ENOMEM;
mutex_unlock(&kvm->irqfds.resampler_lock);
goto fail;
}
resampler->kvm = kvm;
INIT_LIST_HEAD(&resampler->list);
resampler->notifier.gsi = irqfd->gsi;
resampler->notifier.irq_acked = irqfd_resampler_ack;
INIT_LIST_HEAD(&resampler->link);
list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
kvm_register_irq_ack_notifier(kvm,
&resampler->notifier);
irqfd->resampler = resampler;
}
list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
synchronize_srcu(&kvm->irq_srcu);
mutex_unlock(&kvm->irqfds.resampler_lock);
}
/*
* Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd
*/
init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
spin_lock_irq(&kvm->irqfds.lock);
ret = 0;
list_for_each_entry(tmp, &kvm->irqfds.items, list) {
if (irqfd->eventfd != tmp->eventfd)
continue;
/* This fd is used for another irq already. */
ret = -EBUSY;
spin_unlock_irq(&kvm->irqfds.lock);
goto fail;
}
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_update(kvm, irqfd);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
spin_unlock_irq(&kvm->irqfds.lock);
/*
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
events = vfs_poll(f.file, &irqfd->pt);
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
irqfd->consumer.start = kvm_arch_irq_bypass_start;
ret = irq_bypass_register_consumer(&irqfd->consumer);
if (ret)
pr_info("irq bypass consumer (token %p) registration fails: %d\n",
irqfd->consumer.token, ret);
}
#endif
srcu_read_unlock(&kvm->irq_srcu, idx);
/*
* do not drop the file until the irqfd is fully initialized, otherwise
* we might race against the EPOLLHUP
*/
fdput(f);
return 0;
fail:
if (irqfd->resampler)
irqfd_resampler_shutdown(irqfd);
if (resamplefd && !IS_ERR(resamplefd))
eventfd_ctx_put(resamplefd);
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
fdput(f);
out:
kfree(irqfd);
return ret;
}
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
struct kvm_irq_ack_notifier *kian;
int gsi, idx;
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
link, srcu_read_lock_held(&kvm->irq_srcu))
if (kian->gsi == gsi) {
srcu_read_unlock(&kvm->irq_srcu, idx);
return true;
}
srcu_read_unlock(&kvm->irq_srcu, idx);
return false;
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
struct kvm_irq_ack_notifier *kian;
hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
link, srcu_read_lock_held(&kvm->irq_srcu))
if (kian->gsi == gsi)
kian->irq_acked(kian);
}
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
int gsi, idx;
trace_kvm_ack_irq(irqchip, pin);
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
kvm_notify_acked_gsi(kvm, gsi);
srcu_read_unlock(&kvm->irq_srcu, idx);
}
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian)
{
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian)
{
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
#endif
void
kvm_eventfd_init(struct kvm *kvm)
{
#ifdef CONFIG_HAVE_KVM_IRQFD
spin_lock_init(&kvm->irqfds.lock);
INIT_LIST_HEAD(&kvm->irqfds.items);
INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
mutex_init(&kvm->irqfds.resampler_lock);
#endif
INIT_LIST_HEAD(&kvm->ioeventfds);
}
#ifdef CONFIG_HAVE_KVM_IRQFD
/*
* shutdown any irqfd's that match fd+gsi
*/
static int
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct kvm_kernel_irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
/*
* This clearing of irq_entry.type is needed for when
* another thread calls kvm_irq_routing_update before
* we flush workqueue below (we synchronize with
* kvm_irq_routing_update using irqfds.lock).
*/
write_seqcount_begin(&irqfd->irq_entry_sc);
irqfd->irq_entry.type = 0;
write_seqcount_end(&irqfd->irq_entry_sc);
irqfd_deactivate(irqfd);
}
}
spin_unlock_irq(&kvm->irqfds.lock);
eventfd_ctx_put(eventfd);
/*
* Block until we know all outstanding shutdown jobs have completed
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
flush_workqueue(irqfd_cleanup_wq);
return 0;
}
int
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
return -EINVAL;
if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
return kvm_irqfd_deassign(kvm, args);
return kvm_irqfd_assign(kvm, args);
}
/*
* This function is called as the kvm VM fd is being released. Shutdown all
* irqfds that still remain open
*/
void
kvm_irqfd_release(struct kvm *kvm)
{
struct kvm_kernel_irqfd *irqfd, *tmp;
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
irqfd_deactivate(irqfd);
spin_unlock_irq(&kvm->irqfds.lock);
/*
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
flush_workqueue(irqfd_cleanup_wq);
}
/*
* Take note of a change in irq routing.
* Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
*/
void kvm_irq_routing_update(struct kvm *kvm)
{
struct kvm_kernel_irqfd *irqfd;
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
/* Under irqfds.lock, so can read irq_entry safely */
struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
#endif
irqfd_update(kvm, irqfd);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
if (irqfd->producer &&
kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
int ret = kvm_arch_update_irqfd_routing(
irqfd->kvm, irqfd->producer->irq,
irqfd->gsi, 1);
WARN_ON(ret);
}
#endif
}
spin_unlock_irq(&kvm->irqfds.lock);
}
bool kvm_notify_irqfd_resampler(struct kvm *kvm,
unsigned int irqchip,
unsigned int pin)
{
struct kvm_kernel_irqfd_resampler *resampler;
int gsi, idx;
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1) {
list_for_each_entry_srcu(resampler,
&kvm->irqfds.resampler_list, link,
srcu_read_lock_held(&kvm->irq_srcu)) {
if (resampler->notifier.gsi == gsi) {
irqfd_resampler_notify(resampler);
srcu_read_unlock(&kvm->irq_srcu, idx);
return true;
}
}
}
srcu_read_unlock(&kvm->irq_srcu, idx);
return false;
}
/*
* create a host-wide workqueue for issuing deferred shutdown requests
* aggregated from all vm* instances. We need our own isolated
* queue to ease flushing work items when a VM exits.
*/
int kvm_irqfd_init(void)
{
irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
return 0;
}
void kvm_irqfd_exit(void)
{
destroy_workqueue(irqfd_cleanup_wq);
}
#endif
/*
* --------------------------------------------------------------------
* ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
*
* userspace can register a PIO/MMIO address with an eventfd for receiving
* notification when the memory has been touched.
* --------------------------------------------------------------------
*/
struct _ioeventfd {
struct list_head list;
u64 addr;
int length;
struct eventfd_ctx *eventfd;
u64 datamatch;
struct kvm_io_device dev;
u8 bus_idx;
bool wildcard;
};
static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
return container_of(dev, struct _ioeventfd, dev);
}
static void
ioeventfd_release(struct _ioeventfd *p)
{
eventfd_ctx_put(p->eventfd);
list_del(&p->list);
kfree(p);
}
static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
u64 _val;
if (addr != p->addr)
/* address must be precise for a hit */
return false;
if (!p->length)
/* length = 0 means only look at the address, so always a hit */
return true;
if (len != p->length)
/* address-range must be precise for a hit */
return false;
if (p->wildcard)
/* all else equal, wildcard is always a hit */
return true;
/* otherwise, we have to actually compare the data */
BUG_ON(!IS_ALIGNED((unsigned long)val, len));
switch (len) {
case 1:
_val = *(u8 *)val;
break;
case 2:
_val = *(u16 *)val;
break;
case 4:
_val = *(u32 *)val;
break;
case 8:
_val = *(u64 *)val;
break;
default:
return false;
}
return _val == p->datamatch;
}
/* MMIO/PIO writes trigger an event if the addr/val match */
static int
ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
int len, const void *val)
{
struct _ioeventfd *p = to_ioeventfd(this);
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
eventfd_signal(p->eventfd, 1);
return 0;
}
/*
* This function is called as KVM is completely shutting down. We do not
* need to worry about locking just nuke anything we have as quickly as possible
*/
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
struct _ioeventfd *p = to_ioeventfd(this);
ioeventfd_release(p);
}
static const struct kvm_io_device_ops ioeventfd_ops = {
.write = ioeventfd_write,
.destructor = ioeventfd_destructor,
};
/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
struct _ioeventfd *_p;
list_for_each_entry(_p, &kvm->ioeventfds, list)
if (_p->bus_idx == p->bus_idx &&
_p->addr == p->addr &&
(!_p->length || !p->length ||
(_p->length == p->length &&
(_p->wildcard || p->wildcard ||
_p->datamatch == p->datamatch))))
return true;
return false;
}
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
if (flags & KVM_IOEVENTFD_FLAG_PIO)
return KVM_PIO_BUS;
if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
return KVM_VIRTIO_CCW_NOTIFY_BUS;
return KVM_MMIO_BUS;
}
static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
enum kvm_bus bus_idx,
struct kvm_ioeventfd *args)
{
struct eventfd_ctx *eventfd;
struct _ioeventfd *p;
int ret;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
if (!p) {
ret = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&p->list);
p->addr = args->addr;
p->bus_idx = bus_idx;
p->length = args->len;
p->eventfd = eventfd;
/* The datamatch feature is optional, otherwise this is a wildcard */
if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
p->datamatch = args->datamatch;
else
p->wildcard = true;
mutex_lock(&kvm->slots_lock);
/* Verify that there isn't a match already */
if (ioeventfd_check_collision(kvm, p)) {
ret = -EEXIST;
goto unlock_fail;
}
kvm_iodevice_init(&p->dev, &ioeventfd_ops);
ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
&p->dev);
if (ret < 0)
goto unlock_fail;
kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
list_add_tail(&p->list, &kvm->ioeventfds);
mutex_unlock(&kvm->slots_lock);
return 0;
unlock_fail:
mutex_unlock(&kvm->slots_lock);
kfree(p);
fail:
eventfd_ctx_put(eventfd);
return ret;
}
static int
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_ioeventfd *args)
{
struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus;
int ret = -ENOENT;
bool wildcard;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
mutex_lock(&kvm->slots_lock);
list_for_each_entry(p, &kvm->ioeventfds, list) {
if (p->bus_idx != bus_idx ||
p->eventfd != eventfd ||
p->addr != args->addr ||
p->length != args->len ||
p->wildcard != wildcard)
continue;
if (!p->wildcard && p->datamatch != args->datamatch)
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
bus = kvm_get_bus(kvm, bus_idx);
if (bus)
bus->ioeventfd_count--;
ret = 0;
break;
}
mutex_unlock(&kvm->slots_lock);
eventfd_ctx_put(eventfd);
return ret;
}
static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
if (!args->len && bus_idx == KVM_MMIO_BUS)
kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
return ret;
}
static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
enum kvm_bus bus_idx;
int ret;
bus_idx = ioeventfd_bus_from_flags(args->flags);
/* must be natural-word sized, or 0 to ignore length */
switch (args->len) {
case 0:
case 1:
case 2:
case 4:
case 8:
break;
default:
return -EINVAL;
}
/* check for range overflow */
if (args->addr + args->len < args->addr)
return -EINVAL;
/* check for extra flags that we don't understand */
if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
return -EINVAL;
/* ioeventfd with no length can't be combined with DATAMATCH */
if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
return -EINVAL;
ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
if (ret)
goto fail;
/* When length is ignored, MMIO is also put on a separate bus, for
* faster lookups.
*/
if (!args->len && bus_idx == KVM_MMIO_BUS) {
ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
if (ret < 0)
goto fast_fail;
}
return 0;
fast_fail:
kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
fail:
return ret;
}
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
return kvm_deassign_ioeventfd(kvm, args);
return kvm_assign_ioeventfd(kvm, args);
}
| linux-master | virt/kvm/eventfd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM dirty ring implementation
*
* Copyright 2019 Red Hat, Inc.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/vmalloc.h>
#include <linux/kvm_dirty_ring.h>
#include <trace/events/kvm.h>
#include "kvm_mm.h"
int __weak kvm_cpu_dirty_log_size(void)
{
return 0;
}
u32 kvm_dirty_ring_get_rsvd_entries(void)
{
return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
}
bool kvm_use_dirty_bitmap(struct kvm *kvm)
{
lockdep_assert_held(&kvm->slots_lock);
return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
}
#ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
{
return false;
}
#endif
static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
{
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
}
static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
{
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
}
static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
{
return kvm_dirty_ring_used(ring) >= ring->size;
}
static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
{
struct kvm_memory_slot *memslot;
int as_id, id;
as_id = slot >> 16;
id = (u16)slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return;
memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
if (!memslot || (offset + __fls(mask)) >= memslot->npages)
return;
KVM_MMU_LOCK(kvm);
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
KVM_MMU_UNLOCK(kvm);
}
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
{
ring->dirty_gfns = vzalloc(size);
if (!ring->dirty_gfns)
return -ENOMEM;
ring->size = size / sizeof(struct kvm_dirty_gfn);
ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
ring->dirty_index = 0;
ring->reset_index = 0;
ring->index = index;
return 0;
}
static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
{
smp_store_release(&gfn->flags, 0);
}
static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
{
gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
}
static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
{
return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
}
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
{
u32 cur_slot, next_slot;
u64 cur_offset, next_offset;
unsigned long mask;
int count = 0;
struct kvm_dirty_gfn *entry;
bool first_round = true;
/* This is only needed to make compilers happy */
cur_slot = cur_offset = mask = 0;
while (true) {
entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
if (!kvm_dirty_gfn_harvested(entry))
break;
next_slot = READ_ONCE(entry->slot);
next_offset = READ_ONCE(entry->offset);
/* Update the flags to reflect that this GFN is reset */
kvm_dirty_gfn_set_invalid(entry);
ring->reset_index++;
count++;
/*
* Try to coalesce the reset operations when the guest is
* scanning pages in the same slot.
*/
if (!first_round && next_slot == cur_slot) {
s64 delta = next_offset - cur_offset;
if (delta >= 0 && delta < BITS_PER_LONG) {
mask |= 1ull << delta;
continue;
}
/* Backwards visit, careful about overflows! */
if (delta > -BITS_PER_LONG && delta < 0 &&
(mask << -delta >> -delta) == mask) {
cur_offset = next_offset;
mask = (mask << -delta) | 1;
continue;
}
}
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
cur_slot = next_slot;
cur_offset = next_offset;
mask = 1;
first_round = false;
}
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
/*
* The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
* by the VCPU thread next time when it enters the guest.
*/
trace_kvm_dirty_ring_reset(ring);
return count;
}
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
{
struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
struct kvm_dirty_gfn *entry;
/* It should never get full */
WARN_ON_ONCE(kvm_dirty_ring_full(ring));
entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
entry->slot = slot;
entry->offset = offset;
/*
* Make sure the data is filled in before we publish this to
* the userspace program. There's no paired kernel-side reader.
*/
smp_wmb();
kvm_dirty_gfn_set_dirtied(entry);
ring->dirty_index++;
trace_kvm_dirty_ring_push(ring, slot, offset);
if (kvm_dirty_ring_soft_full(ring))
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
}
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
{
/*
* The VCPU isn't runnable when the dirty ring becomes soft full.
* The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
* the VCPU from running until the dirty pages are harvested and
* the dirty ring is reset by userspace.
*/
if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
trace_kvm_dirty_ring_exit(vcpu);
return true;
}
return false;
}
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
{
return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
}
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
{
vfree(ring->dirty_gfns);
ring->dirty_gfns = NULL;
}
| linux-master | virt/kvm/dirty_ring.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IRQ offload/bypass manager
*
* Copyright (C) 2015 Red Hat, Inc.
* Copyright (c) 2015 Linaro Ltd.
*
* Various virtualization hardware acceleration techniques allow bypassing or
* offloading interrupts received from devices around the host kernel. Posted
* Interrupts on Intel VT-d systems can allow interrupts to be received
* directly by a virtual machine. ARM IRQ Forwarding allows forwarded physical
* interrupts to be directly deactivated by the guest. This manager allows
* interrupt producers and consumers to find each other to enable this sort of
* bypass.
*/
#include <linux/irqbypass.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IRQ bypass manager utility module");
static LIST_HEAD(producers);
static LIST_HEAD(consumers);
static DEFINE_MUTEX(lock);
/* @lock must be held when calling connect */
static int __connect(struct irq_bypass_producer *prod,
struct irq_bypass_consumer *cons)
{
int ret = 0;
if (prod->stop)
prod->stop(prod);
if (cons->stop)
cons->stop(cons);
if (prod->add_consumer)
ret = prod->add_consumer(prod, cons);
if (!ret) {
ret = cons->add_producer(cons, prod);
if (ret && prod->del_consumer)
prod->del_consumer(prod, cons);
}
if (cons->start)
cons->start(cons);
if (prod->start)
prod->start(prod);
return ret;
}
/* @lock must be held when calling disconnect */
static void __disconnect(struct irq_bypass_producer *prod,
struct irq_bypass_consumer *cons)
{
if (prod->stop)
prod->stop(prod);
if (cons->stop)
cons->stop(cons);
cons->del_producer(cons, prod);
if (prod->del_consumer)
prod->del_consumer(prod, cons);
if (cons->start)
cons->start(cons);
if (prod->start)
prod->start(prod);
}
/**
* irq_bypass_register_producer - register IRQ bypass producer
* @producer: pointer to producer structure
*
* Add the provided IRQ producer to the list of producers and connect
* with any matching token found on the IRQ consumers list.
*/
int irq_bypass_register_producer(struct irq_bypass_producer *producer)
{
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
int ret;
if (!producer->token)
return -EINVAL;
might_sleep();
if (!try_module_get(THIS_MODULE))
return -ENODEV;
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
if (tmp->token == producer->token) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(consumer, &consumers, node) {
if (consumer->token == producer->token) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
break;
}
}
list_add(&producer->node, &producers);
mutex_unlock(&lock);
return 0;
out_err:
mutex_unlock(&lock);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
/**
* irq_bypass_unregister_producer - unregister IRQ bypass producer
* @producer: pointer to producer structure
*
* Remove a previously registered IRQ producer from the list of producers
* and disconnect it from any connected IRQ consumer.
*/
void irq_bypass_unregister_producer(struct irq_bypass_producer *producer)
{
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
if (!producer->token)
return;
might_sleep();
if (!try_module_get(THIS_MODULE))
return; /* nothing in the list anyway */
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
if (tmp->token != producer->token)
continue;
list_for_each_entry(consumer, &consumers, node) {
if (consumer->token == producer->token) {
__disconnect(producer, consumer);
break;
}
}
list_del(&producer->node);
module_put(THIS_MODULE);
break;
}
mutex_unlock(&lock);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
/**
* irq_bypass_register_consumer - register IRQ bypass consumer
* @consumer: pointer to consumer structure
*
* Add the provided IRQ consumer to the list of consumers and connect
* with any matching token found on the IRQ producer list.
*/
int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
{
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
int ret;
if (!consumer->token ||
!consumer->add_producer || !consumer->del_producer)
return -EINVAL;
might_sleep();
if (!try_module_get(THIS_MODULE))
return -ENODEV;
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
if (tmp->token == consumer->token || tmp == consumer) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(producer, &producers, node) {
if (producer->token == consumer->token) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
break;
}
}
list_add(&consumer->node, &consumers);
mutex_unlock(&lock);
return 0;
out_err:
mutex_unlock(&lock);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);
/**
* irq_bypass_unregister_consumer - unregister IRQ bypass consumer
* @consumer: pointer to consumer structure
*
* Remove a previously registered IRQ consumer from the list of consumers
* and disconnect it from any connected IRQ producer.
*/
void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
{
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
if (!consumer->token)
return;
might_sleep();
if (!try_module_get(THIS_MODULE))
return; /* nothing in the list anyway */
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
if (tmp != consumer)
continue;
list_for_each_entry(producer, &producers, node) {
if (producer->token == consumer->token) {
__disconnect(producer, consumer);
break;
}
}
list_del(&consumer->node);
module_put(THIS_MODULE);
break;
}
mutex_unlock(&lock);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);
| linux-master | virt/lib/irqbypass.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote processor messaging - sample client driver
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rpmsg.h>
#define MSG "hello world!"
static int count = 100;
module_param(count, int, 0644);
struct instance_data {
int rx_count;
};
static int rpmsg_sample_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
int ret;
struct instance_data *idata = dev_get_drvdata(&rpdev->dev);
dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n",
++idata->rx_count, src);
print_hex_dump_debug(__func__, DUMP_PREFIX_NONE, 16, 1, data, len,
true);
/* samples should not live forever */
if (idata->rx_count >= count) {
dev_info(&rpdev->dev, "goodbye!\n");
return 0;
}
/* send a new message now */
ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
if (ret)
dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
return 0;
}
static int rpmsg_sample_probe(struct rpmsg_device *rpdev)
{
int ret;
struct instance_data *idata;
dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
rpdev->src, rpdev->dst);
idata = devm_kzalloc(&rpdev->dev, sizeof(*idata), GFP_KERNEL);
if (!idata)
return -ENOMEM;
dev_set_drvdata(&rpdev->dev, idata);
/* send a message to our remote processor */
ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
if (ret) {
dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
return ret;
}
return 0;
}
static void rpmsg_sample_remove(struct rpmsg_device *rpdev)
{
dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n");
}
static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
{ .name = "rpmsg-client-sample" },
{ },
};
MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table);
static struct rpmsg_driver rpmsg_sample_client = {
.drv.name = KBUILD_MODNAME,
.id_table = rpmsg_driver_sample_id_table,
.probe = rpmsg_sample_probe,
.callback = rpmsg_sample_cb,
.remove = rpmsg_sample_remove,
};
module_rpmsg_driver(rpmsg_sample_client);
MODULE_DESCRIPTION("Remote processor messaging sample client driver");
MODULE_LICENSE("GPL v2");
| linux-master | samples/rpmsg/rpmsg_client_sample.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UHID Example
*
* Copyright (c) 2012-2013 David Herrmann <[email protected]>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using uhid.
*/
/*
* UHID Example
* This example emulates a basic 3 buttons mouse with wheel over UHID. Run this
* program as root and then use the following keys to control the mouse:
* q: Quit the application
* 1: Toggle left button (down, up, ...)
* 2: Toggle right button
* 3: Toggle middle button
* a: Move mouse left
* d: Move mouse right
* w: Move mouse up
* s: Move mouse down
* r: Move wheel up
* f: Move wheel down
*
* Additionally to 3 button mouse, 3 keyboard LEDs are also supported (LED_NUML,
* LED_CAPSL and LED_SCROLLL). The device doesn't generate any related keyboard
* events, though. You need to manually write the EV_LED/LED_XY/1 activation
* input event to the evdev device to see it being sent to this device.
*
* If uhid is not available as /dev/uhid, then you can pass a different path as
* first argument.
* If <linux/uhid.h> is not installed in /usr, then compile this with:
* gcc -o ./uhid_test -Wall -I./include ./samples/uhid/uhid-example.c
* And ignore the warning about kernel headers. However, it is recommended to
* use the installed uhid.h if available.
*/
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <termios.h>
#include <unistd.h>
#include <linux/uhid.h>
/*
* HID Report Desciptor
* We emulate a basic 3 button mouse with wheel and 3 keyboard LEDs. This is
* the report-descriptor as the kernel will parse it:
*
* INPUT(1)[INPUT]
* Field(0)
* Physical(GenericDesktop.Pointer)
* Application(GenericDesktop.Mouse)
* Usage(3)
* Button.0001
* Button.0002
* Button.0003
* Logical Minimum(0)
* Logical Maximum(1)
* Report Size(1)
* Report Count(3)
* Report Offset(0)
* Flags( Variable Absolute )
* Field(1)
* Physical(GenericDesktop.Pointer)
* Application(GenericDesktop.Mouse)
* Usage(3)
* GenericDesktop.X
* GenericDesktop.Y
* GenericDesktop.Wheel
* Logical Minimum(-128)
* Logical Maximum(127)
* Report Size(8)
* Report Count(3)
* Report Offset(8)
* Flags( Variable Relative )
* OUTPUT(2)[OUTPUT]
* Field(0)
* Application(GenericDesktop.Keyboard)
* Usage(3)
* LED.NumLock
* LED.CapsLock
* LED.ScrollLock
* Logical Minimum(0)
* Logical Maximum(1)
* Report Size(1)
* Report Count(3)
* Report Offset(0)
* Flags( Variable Absolute )
*
* This is the mapping that we expect:
* Button.0001 ---> Key.LeftBtn
* Button.0002 ---> Key.RightBtn
* Button.0003 ---> Key.MiddleBtn
* GenericDesktop.X ---> Relative.X
* GenericDesktop.Y ---> Relative.Y
* GenericDesktop.Wheel ---> Relative.Wheel
* LED.NumLock ---> LED.NumLock
* LED.CapsLock ---> LED.CapsLock
* LED.ScrollLock ---> LED.ScrollLock
*
* This information can be verified by reading /sys/kernel/debug/hid/<dev>/rdesc
* This file should print the same information as showed above.
*/
static unsigned char rdesc[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x02, /* USAGE (Mouse) */
0xa1, 0x01, /* COLLECTION (Application) */
0x09, 0x01, /* USAGE (Pointer) */
0xa1, 0x00, /* COLLECTION (Physical) */
0x85, 0x01, /* REPORT_ID (1) */
0x05, 0x09, /* USAGE_PAGE (Button) */
0x19, 0x01, /* USAGE_MINIMUM (Button 1) */
0x29, 0x03, /* USAGE_MAXIMUM (Button 3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x81, 0x01, /* INPUT (Cnst,Var,Abs) */
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x30, /* USAGE (X) */
0x09, 0x31, /* USAGE (Y) */
0x09, 0x38, /* USAGE (WHEEL) */
0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
0x75, 0x08, /* REPORT_SIZE (8) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x81, 0x06, /* INPUT (Data,Var,Rel) */
0xc0, /* END_COLLECTION */
0xc0, /* END_COLLECTION */
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x06, /* USAGE (Keyboard) */
0xa1, 0x01, /* COLLECTION (Application) */
0x85, 0x02, /* REPORT_ID (2) */
0x05, 0x08, /* USAGE_PAGE (Led) */
0x19, 0x01, /* USAGE_MINIMUM (1) */
0x29, 0x03, /* USAGE_MAXIMUM (3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x91, 0x02, /* Output (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x91, 0x01, /* Output (Cnst,Var,Abs) */
0xc0, /* END_COLLECTION */
};
static int uhid_write(int fd, const struct uhid_event *ev)
{
ssize_t ret;
ret = write(fd, ev, sizeof(*ev));
if (ret < 0) {
fprintf(stderr, "Cannot write to uhid: %m\n");
return -errno;
} else if (ret != sizeof(*ev)) {
fprintf(stderr, "Wrong size written to uhid: %zd != %zu\n",
ret, sizeof(ev));
return -EFAULT;
} else {
return 0;
}
}
static int create(int fd)
{
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_CREATE;
strcpy((char*)ev.u.create.name, "test-uhid-device");
ev.u.create.rd_data = rdesc;
ev.u.create.rd_size = sizeof(rdesc);
ev.u.create.bus = BUS_USB;
ev.u.create.vendor = 0x15d9;
ev.u.create.product = 0x0a37;
ev.u.create.version = 0;
ev.u.create.country = 0;
return uhid_write(fd, &ev);
}
static void destroy(int fd)
{
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_DESTROY;
uhid_write(fd, &ev);
}
/* This parses raw output reports sent by the kernel to the device. A normal
* uhid program shouldn't do this but instead just forward the raw report.
* However, for ducomentational purposes, we try to detect LED events here and
* print debug messages for it. */
static void handle_output(struct uhid_event *ev)
{
/* LED messages are adverised via OUTPUT reports; ignore the rest */
if (ev->u.output.rtype != UHID_OUTPUT_REPORT)
return;
/* LED reports have length 2 bytes */
if (ev->u.output.size != 2)
return;
/* first byte is report-id which is 0x02 for LEDs in our rdesc */
if (ev->u.output.data[0] != 0x2)
return;
/* print flags payload */
fprintf(stderr, "LED output report received with flags %x\n",
ev->u.output.data[1]);
}
static int event(int fd)
{
struct uhid_event ev;
ssize_t ret;
memset(&ev, 0, sizeof(ev));
ret = read(fd, &ev, sizeof(ev));
if (ret == 0) {
fprintf(stderr, "Read HUP on uhid-cdev\n");
return -EFAULT;
} else if (ret < 0) {
fprintf(stderr, "Cannot read uhid-cdev: %m\n");
return -errno;
} else if (ret != sizeof(ev)) {
fprintf(stderr, "Invalid size read from uhid-dev: %zd != %zu\n",
ret, sizeof(ev));
return -EFAULT;
}
switch (ev.type) {
case UHID_START:
fprintf(stderr, "UHID_START from uhid-dev\n");
break;
case UHID_STOP:
fprintf(stderr, "UHID_STOP from uhid-dev\n");
break;
case UHID_OPEN:
fprintf(stderr, "UHID_OPEN from uhid-dev\n");
break;
case UHID_CLOSE:
fprintf(stderr, "UHID_CLOSE from uhid-dev\n");
break;
case UHID_OUTPUT:
fprintf(stderr, "UHID_OUTPUT from uhid-dev\n");
handle_output(&ev);
break;
case UHID_OUTPUT_EV:
fprintf(stderr, "UHID_OUTPUT_EV from uhid-dev\n");
break;
default:
fprintf(stderr, "Invalid event from uhid-dev: %u\n", ev.type);
}
return 0;
}
static bool btn1_down;
static bool btn2_down;
static bool btn3_down;
static signed char abs_hor;
static signed char abs_ver;
static signed char wheel;
static int send_event(int fd)
{
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_INPUT;
ev.u.input.size = 5;
ev.u.input.data[0] = 0x1;
if (btn1_down)
ev.u.input.data[1] |= 0x1;
if (btn2_down)
ev.u.input.data[1] |= 0x2;
if (btn3_down)
ev.u.input.data[1] |= 0x4;
ev.u.input.data[2] = abs_hor;
ev.u.input.data[3] = abs_ver;
ev.u.input.data[4] = wheel;
return uhid_write(fd, &ev);
}
static int keyboard(int fd)
{
char buf[128];
ssize_t ret, i;
ret = read(STDIN_FILENO, buf, sizeof(buf));
if (ret == 0) {
fprintf(stderr, "Read HUP on stdin\n");
return -EFAULT;
} else if (ret < 0) {
fprintf(stderr, "Cannot read stdin: %m\n");
return -errno;
}
for (i = 0; i < ret; ++i) {
switch (buf[i]) {
case '1':
btn1_down = !btn1_down;
ret = send_event(fd);
if (ret)
return ret;
break;
case '2':
btn2_down = !btn2_down;
ret = send_event(fd);
if (ret)
return ret;
break;
case '3':
btn3_down = !btn3_down;
ret = send_event(fd);
if (ret)
return ret;
break;
case 'a':
abs_hor = -20;
ret = send_event(fd);
abs_hor = 0;
if (ret)
return ret;
break;
case 'd':
abs_hor = 20;
ret = send_event(fd);
abs_hor = 0;
if (ret)
return ret;
break;
case 'w':
abs_ver = -20;
ret = send_event(fd);
abs_ver = 0;
if (ret)
return ret;
break;
case 's':
abs_ver = 20;
ret = send_event(fd);
abs_ver = 0;
if (ret)
return ret;
break;
case 'r':
wheel = 1;
ret = send_event(fd);
wheel = 0;
if (ret)
return ret;
break;
case 'f':
wheel = -1;
ret = send_event(fd);
wheel = 0;
if (ret)
return ret;
break;
case 'q':
return -ECANCELED;
default:
fprintf(stderr, "Invalid input: %c\n", buf[i]);
}
}
return 0;
}
int main(int argc, char **argv)
{
int fd;
const char *path = "/dev/uhid";
struct pollfd pfds[2];
int ret;
struct termios state;
ret = tcgetattr(STDIN_FILENO, &state);
if (ret) {
fprintf(stderr, "Cannot get tty state\n");
} else {
state.c_lflag &= ~ICANON;
state.c_cc[VMIN] = 1;
ret = tcsetattr(STDIN_FILENO, TCSANOW, &state);
if (ret)
fprintf(stderr, "Cannot set tty state\n");
}
if (argc >= 2) {
if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
fprintf(stderr, "Usage: %s [%s]\n", argv[0], path);
return EXIT_SUCCESS;
} else {
path = argv[1];
}
}
fprintf(stderr, "Open uhid-cdev %s\n", path);
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0) {
fprintf(stderr, "Cannot open uhid-cdev %s: %m\n", path);
return EXIT_FAILURE;
}
fprintf(stderr, "Create uhid device\n");
ret = create(fd);
if (ret) {
close(fd);
return EXIT_FAILURE;
}
pfds[0].fd = STDIN_FILENO;
pfds[0].events = POLLIN;
pfds[1].fd = fd;
pfds[1].events = POLLIN;
fprintf(stderr, "Press 'q' to quit...\n");
while (1) {
ret = poll(pfds, 2, -1);
if (ret < 0) {
fprintf(stderr, "Cannot poll for fds: %m\n");
break;
}
if (pfds[0].revents & POLLHUP) {
fprintf(stderr, "Received HUP on stdin\n");
break;
}
if (pfds[1].revents & POLLHUP) {
fprintf(stderr, "Received HUP on uhid-cdev\n");
break;
}
if (pfds[0].revents & POLLIN) {
ret = keyboard(fd);
if (ret)
break;
}
if (pfds[1].revents & POLLIN) {
ret = event(fd);
if (ret)
break;
}
}
fprintf(stderr, "Destroy uhid device\n");
destroy(fd);
return EXIT_SUCCESS;
}
| linux-master | samples/uhid/uhid-example.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* fd-based mount test.
*
* Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <linux/mount.h>
#include <linux/unistd.h>
#define E(x) do { if ((x) == -1) { perror(#x); exit(1); } } while(0)
static void check_messages(int fd)
{
char buf[4096];
int err, n;
err = errno;
for (;;) {
n = read(fd, buf, sizeof(buf));
if (n < 0)
break;
n -= 2;
switch (buf[0]) {
case 'e':
fprintf(stderr, "Error: %*.*s\n", n, n, buf + 2);
break;
case 'w':
fprintf(stderr, "Warning: %*.*s\n", n, n, buf + 2);
break;
case 'i':
fprintf(stderr, "Info: %*.*s\n", n, n, buf + 2);
break;
}
}
errno = err;
}
static __attribute__((noreturn))
void mount_error(int fd, const char *s)
{
check_messages(fd);
fprintf(stderr, "%s: %m\n", s);
exit(1);
}
/* Hope -1 isn't a syscall */
#ifndef __NR_fsopen
#define __NR_fsopen -1
#endif
#ifndef __NR_fsmount
#define __NR_fsmount -1
#endif
#ifndef __NR_fsconfig
#define __NR_fsconfig -1
#endif
#ifndef __NR_move_mount
#define __NR_move_mount -1
#endif
static inline int fsopen(const char *fs_name, unsigned int flags)
{
return syscall(__NR_fsopen, fs_name, flags);
}
static inline int fsmount(int fsfd, unsigned int flags, unsigned int ms_flags)
{
return syscall(__NR_fsmount, fsfd, flags, ms_flags);
}
static inline int fsconfig(int fsfd, unsigned int cmd,
const char *key, const void *val, int aux)
{
return syscall(__NR_fsconfig, fsfd, cmd, key, val, aux);
}
static inline int move_mount(int from_dfd, const char *from_pathname,
int to_dfd, const char *to_pathname,
unsigned int flags)
{
return syscall(__NR_move_mount,
from_dfd, from_pathname,
to_dfd, to_pathname, flags);
}
#define E_fsconfig(fd, cmd, key, val, aux) \
do { \
if (fsconfig(fd, cmd, key, val, aux) == -1) \
mount_error(fd, key ?: "create"); \
} while (0)
int main(int argc, char *argv[])
{
int fsfd, mfd;
/* Mount a publically available AFS filesystem */
fsfd = fsopen("afs", 0);
if (fsfd == -1) {
perror("fsopen");
exit(1);
}
E_fsconfig(fsfd, FSCONFIG_SET_STRING, "source", "#grand.central.org:root.cell.", 0);
E_fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
mfd = fsmount(fsfd, 0, MOUNT_ATTR_RDONLY);
if (mfd < 0)
mount_error(fsfd, "fsmount");
E(close(fsfd));
if (move_mount(mfd, "", AT_FDCWD, "/mnt", MOVE_MOUNT_F_EMPTY_PATH) < 0) {
perror("move_mount");
exit(1);
}
E(close(mfd));
exit(0);
}
| linux-master | samples/vfs/test-fsmount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Test the statx() system call.
*
* Note that the output of this program is intended to look like the output of
* /bin/stat where possible.
*
* Copyright (C) 2015 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define _GNU_SOURCE
#define _ATFILE_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <ctype.h>
#include <errno.h>
#include <time.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#define statx foo
#define statx_timestamp foo_timestamp
struct statx;
struct statx_timestamp;
#include <sys/stat.h>
#undef statx
#undef statx_timestamp
#define AT_STATX_SYNC_TYPE 0x6000
#define AT_STATX_SYNC_AS_STAT 0x0000
#define AT_STATX_FORCE_SYNC 0x2000
#define AT_STATX_DONT_SYNC 0x4000
#ifndef __NR_statx
#define __NR_statx -1
#endif
static __attribute__((unused))
ssize_t statx(int dfd, const char *filename, unsigned flags,
unsigned int mask, struct statx *buffer)
{
return syscall(__NR_statx, dfd, filename, flags, mask, buffer);
}
static void print_time(const char *field, struct statx_timestamp *ts)
{
struct tm tm;
time_t tim;
char buffer[100];
int len;
tim = ts->tv_sec;
if (!localtime_r(&tim, &tm)) {
perror("localtime_r");
exit(1);
}
len = strftime(buffer, 100, "%F %T", &tm);
if (len == 0) {
perror("strftime");
exit(1);
}
printf("%s", field);
fwrite(buffer, 1, len, stdout);
printf(".%09u", ts->tv_nsec);
len = strftime(buffer, 100, "%z", &tm);
if (len == 0) {
perror("strftime2");
exit(1);
}
fwrite(buffer, 1, len, stdout);
printf("\n");
}
static void dump_statx(struct statx *stx)
{
char buffer[256], ft = '?';
printf("results=%x\n", stx->stx_mask);
printf(" ");
if (stx->stx_mask & STATX_SIZE)
printf(" Size: %-15llu", (unsigned long long)stx->stx_size);
if (stx->stx_mask & STATX_BLOCKS)
printf(" Blocks: %-10llu", (unsigned long long)stx->stx_blocks);
printf(" IO Block: %-6llu", (unsigned long long)stx->stx_blksize);
if (stx->stx_mask & STATX_TYPE) {
switch (stx->stx_mode & S_IFMT) {
case S_IFIFO: printf(" FIFO\n"); ft = 'p'; break;
case S_IFCHR: printf(" character special file\n"); ft = 'c'; break;
case S_IFDIR: printf(" directory\n"); ft = 'd'; break;
case S_IFBLK: printf(" block special file\n"); ft = 'b'; break;
case S_IFREG: printf(" regular file\n"); ft = '-'; break;
case S_IFLNK: printf(" symbolic link\n"); ft = 'l'; break;
case S_IFSOCK: printf(" socket\n"); ft = 's'; break;
default:
printf(" unknown type (%o)\n", stx->stx_mode & S_IFMT);
break;
}
} else {
printf(" no type\n");
}
sprintf(buffer, "%02x:%02x", stx->stx_dev_major, stx->stx_dev_minor);
printf("Device: %-15s", buffer);
if (stx->stx_mask & STATX_INO)
printf(" Inode: %-11llu", (unsigned long long) stx->stx_ino);
if (stx->stx_mask & STATX_NLINK)
printf(" Links: %-5u", stx->stx_nlink);
if (stx->stx_mask & STATX_TYPE) {
switch (stx->stx_mode & S_IFMT) {
case S_IFBLK:
case S_IFCHR:
printf(" Device type: %u,%u",
stx->stx_rdev_major, stx->stx_rdev_minor);
break;
}
}
printf("\n");
if (stx->stx_mask & STATX_MODE)
printf("Access: (%04o/%c%c%c%c%c%c%c%c%c%c) ",
stx->stx_mode & 07777,
ft,
stx->stx_mode & S_IRUSR ? 'r' : '-',
stx->stx_mode & S_IWUSR ? 'w' : '-',
stx->stx_mode & S_IXUSR ? 'x' : '-',
stx->stx_mode & S_IRGRP ? 'r' : '-',
stx->stx_mode & S_IWGRP ? 'w' : '-',
stx->stx_mode & S_IXGRP ? 'x' : '-',
stx->stx_mode & S_IROTH ? 'r' : '-',
stx->stx_mode & S_IWOTH ? 'w' : '-',
stx->stx_mode & S_IXOTH ? 'x' : '-');
if (stx->stx_mask & STATX_UID)
printf("Uid: %5d ", stx->stx_uid);
if (stx->stx_mask & STATX_GID)
printf("Gid: %5d\n", stx->stx_gid);
if (stx->stx_mask & STATX_ATIME)
print_time("Access: ", &stx->stx_atime);
if (stx->stx_mask & STATX_MTIME)
print_time("Modify: ", &stx->stx_mtime);
if (stx->stx_mask & STATX_CTIME)
print_time("Change: ", &stx->stx_ctime);
if (stx->stx_mask & STATX_BTIME)
print_time(" Birth: ", &stx->stx_btime);
if (stx->stx_attributes_mask) {
unsigned char bits, mbits;
int loop, byte;
static char attr_representation[64 + 1] =
/* STATX_ATTR_ flags: */
"????????" /* 63-56 */
"????????" /* 55-48 */
"????????" /* 47-40 */
"????????" /* 39-32 */
"????????" /* 31-24 0x00000000-ff000000 */
"????????" /* 23-16 0x00000000-00ff0000 */
"???me???" /* 15- 8 0x00000000-0000ff00 */
"?dai?c??" /* 7- 0 0x00000000-000000ff */
;
printf("Attributes: %016llx (",
(unsigned long long)stx->stx_attributes);
for (byte = 64 - 8; byte >= 0; byte -= 8) {
bits = stx->stx_attributes >> byte;
mbits = stx->stx_attributes_mask >> byte;
for (loop = 7; loop >= 0; loop--) {
int bit = byte + loop;
if (!(mbits & 0x80))
putchar('.'); /* Not supported */
else if (bits & 0x80)
putchar(attr_representation[63 - bit]);
else
putchar('-'); /* Not set */
bits <<= 1;
mbits <<= 1;
}
if (byte)
putchar(' ');
}
printf(")\n");
}
}
static void dump_hex(unsigned long long *data, int from, int to)
{
unsigned offset, print_offset = 1, col = 0;
from /= 8;
to = (to + 7) / 8;
for (offset = from; offset < to; offset++) {
if (print_offset) {
printf("%04x: ", offset * 8);
print_offset = 0;
}
printf("%016llx", data[offset]);
col++;
if ((col & 3) == 0) {
printf("\n");
print_offset = 1;
} else {
printf(" ");
}
}
if (!print_offset)
printf("\n");
}
int main(int argc, char **argv)
{
struct statx stx;
int ret, raw = 0, atflag = AT_SYMLINK_NOFOLLOW;
unsigned int mask = STATX_BASIC_STATS | STATX_BTIME;
for (argv++; *argv; argv++) {
if (strcmp(*argv, "-F") == 0) {
atflag &= ~AT_STATX_SYNC_TYPE;
atflag |= AT_STATX_FORCE_SYNC;
continue;
}
if (strcmp(*argv, "-D") == 0) {
atflag &= ~AT_STATX_SYNC_TYPE;
atflag |= AT_STATX_DONT_SYNC;
continue;
}
if (strcmp(*argv, "-L") == 0) {
atflag &= ~AT_SYMLINK_NOFOLLOW;
continue;
}
if (strcmp(*argv, "-O") == 0) {
mask &= ~STATX_BASIC_STATS;
continue;
}
if (strcmp(*argv, "-A") == 0) {
atflag |= AT_NO_AUTOMOUNT;
continue;
}
if (strcmp(*argv, "-R") == 0) {
raw = 1;
continue;
}
memset(&stx, 0xbf, sizeof(stx));
ret = statx(AT_FDCWD, *argv, atflag, mask, &stx);
printf("statx(%s) = %d\n", *argv, ret);
if (ret < 0) {
perror(*argv);
exit(1);
}
if (raw)
dump_hex((unsigned long long *)&stx, 0, sizeof(stx));
dump_statx(&stx);
}
return 0;
}
| linux-master | samples/vfs/test-statx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
/**
* DOC: Sample flow of using the ioctl interface provided by the Nitro Enclaves (NE)
* kernel driver.
*
* Usage
* -----
*
* Load the nitro_enclaves module, setting also the enclave CPU pool. The
* enclave CPUs need to be full cores from the same NUMA node. CPU 0 and its
* siblings have to remain available for the primary / parent VM, so they
* cannot be included in the enclave CPU pool.
*
* See the cpu list section from the kernel documentation.
* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists
*
* insmod drivers/virt/nitro_enclaves/nitro_enclaves.ko
* lsmod
*
* The CPU pool can be set at runtime, after the kernel module is loaded.
*
* echo <cpu-list> > /sys/module/nitro_enclaves/parameters/ne_cpus
*
* NUMA and CPU siblings information can be found using:
*
* lscpu
* /proc/cpuinfo
*
* Check the online / offline CPU list. The CPUs from the pool should be
* offlined.
*
* lscpu
*
* Check dmesg for any warnings / errors through the NE driver lifetime / usage.
* The NE logs contain the "nitro_enclaves" or "pci 0000:00:02.0" pattern.
*
* dmesg
*
* Setup hugetlbfs huge pages. The memory needs to be from the same NUMA node as
* the enclave CPUs.
*
* https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html
*
* By default, the allocation of hugetlb pages are distributed on all possible
* NUMA nodes. Use the following configuration files to set the number of huge
* pages from a NUMA node:
*
* /sys/devices/system/node/node<X>/hugepages/hugepages-2048kB/nr_hugepages
* /sys/devices/system/node/node<X>/hugepages/hugepages-1048576kB/nr_hugepages
*
* or, if not on a system with multiple NUMA nodes, can also set the number
* of 2 MiB / 1 GiB huge pages using
*
* /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
* /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
*
* In this example 256 hugepages of 2 MiB are used.
*
* Build and run the NE sample.
*
* make -C samples/nitro_enclaves clean
* make -C samples/nitro_enclaves
* ./samples/nitro_enclaves/ne_ioctl_sample <path_to_enclave_image>
*
* Unload the nitro_enclaves module.
*
* rmmod nitro_enclaves
* lsmod
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <string.h>
#include <sys/eventfd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/mman.h>
#include <linux/nitro_enclaves.h>
#include <linux/vm_sockets.h>
/**
* NE_DEV_NAME - Nitro Enclaves (NE) misc device that provides the ioctl interface.
*/
#define NE_DEV_NAME "/dev/nitro_enclaves"
/**
* NE_POLL_WAIT_TIME - Timeout in seconds for each poll event.
*/
#define NE_POLL_WAIT_TIME (60)
/**
* NE_POLL_WAIT_TIME_MS - Timeout in milliseconds for each poll event.
*/
#define NE_POLL_WAIT_TIME_MS (NE_POLL_WAIT_TIME * 1000)
/**
* NE_SLEEP_TIME - Amount of time in seconds for the process to keep the enclave alive.
*/
#define NE_SLEEP_TIME (300)
/**
* NE_DEFAULT_NR_VCPUS - Default number of vCPUs set for an enclave.
*/
#define NE_DEFAULT_NR_VCPUS (2)
/**
* NE_MIN_MEM_REGION_SIZE - Minimum size of a memory region - 2 MiB.
*/
#define NE_MIN_MEM_REGION_SIZE (2 * 1024 * 1024)
/**
* NE_DEFAULT_NR_MEM_REGIONS - Default number of memory regions of 2 MiB set for
* an enclave.
*/
#define NE_DEFAULT_NR_MEM_REGIONS (256)
/**
* NE_IMAGE_LOAD_HEARTBEAT_CID - Vsock CID for enclave image loading heartbeat logic.
*/
#define NE_IMAGE_LOAD_HEARTBEAT_CID (3)
/**
* NE_IMAGE_LOAD_HEARTBEAT_PORT - Vsock port for enclave image loading heartbeat logic.
*/
#define NE_IMAGE_LOAD_HEARTBEAT_PORT (9000)
/**
* NE_IMAGE_LOAD_HEARTBEAT_VALUE - Heartbeat value for enclave image loading.
*/
#define NE_IMAGE_LOAD_HEARTBEAT_VALUE (0xb7)
/**
* struct ne_user_mem_region - User space memory region set for an enclave.
* @userspace_addr: Address of the user space memory region.
* @memory_size: Size of the user space memory region.
*/
struct ne_user_mem_region {
void *userspace_addr;
size_t memory_size;
};
/**
* ne_create_vm() - Create a slot for the enclave VM.
* @ne_dev_fd: The file descriptor of the NE misc device.
* @slot_uid: The generated slot uid for the enclave.
* @enclave_fd : The generated file descriptor for the enclave.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_create_vm(int ne_dev_fd, unsigned long *slot_uid, int *enclave_fd)
{
int rc = -EINVAL;
*enclave_fd = ioctl(ne_dev_fd, NE_CREATE_VM, slot_uid);
if (*enclave_fd < 0) {
rc = *enclave_fd;
switch (errno) {
case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
printf("Error in create VM, no CPUs available in the NE CPU pool\n");
break;
}
default:
printf("Error in create VM [%m]\n");
}
return rc;
}
return 0;
}
/**
* ne_poll_enclave_fd() - Thread function for polling the enclave fd.
* @data: Argument provided for the polling function.
*
* Context: Process context.
* Return:
* * NULL on success / failure.
*/
void *ne_poll_enclave_fd(void *data)
{
int enclave_fd = *(int *)data;
struct pollfd fds[1] = {};
int i = 0;
int rc = -EINVAL;
printf("Running from poll thread, enclave fd %d\n", enclave_fd);
fds[0].fd = enclave_fd;
fds[0].events = POLLIN | POLLERR | POLLHUP;
/* Keep on polling until the current process is terminated. */
while (1) {
printf("[iter %d] Polling ...\n", i);
rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
if (rc < 0) {
printf("Error in poll [%m]\n");
return NULL;
}
i++;
if (!rc) {
printf("Poll: %d seconds elapsed\n",
i * NE_POLL_WAIT_TIME);
continue;
}
printf("Poll received value 0x%x\n", fds[0].revents);
if (fds[0].revents & POLLHUP) {
printf("Received POLLHUP\n");
return NULL;
}
if (fds[0].revents & POLLNVAL) {
printf("Received POLLNVAL\n");
return NULL;
}
}
return NULL;
}
/**
* ne_alloc_user_mem_region() - Allocate a user space memory region for an enclave.
* @ne_user_mem_region: User space memory region allocated using hugetlbfs.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_alloc_user_mem_region(struct ne_user_mem_region *ne_user_mem_region)
{
/**
* Check available hugetlb encodings for different huge page sizes in
* include/uapi/linux/mman.h.
*/
ne_user_mem_region->userspace_addr = mmap(NULL, ne_user_mem_region->memory_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_HUGETLB | MAP_HUGE_2MB, -1, 0);
if (ne_user_mem_region->userspace_addr == MAP_FAILED) {
printf("Error in mmap memory [%m]\n");
return -1;
}
return 0;
}
/**
* ne_load_enclave_image() - Place the enclave image in the enclave memory.
* @enclave_fd : The file descriptor associated with the enclave.
* @ne_user_mem_regions: User space memory regions allocated for the enclave.
* @enclave_image_path : The file path of the enclave image.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_load_enclave_image(int enclave_fd, struct ne_user_mem_region ne_user_mem_regions[],
char *enclave_image_path)
{
unsigned char *enclave_image = NULL;
int enclave_image_fd = -1;
size_t enclave_image_size = 0;
size_t enclave_memory_size = 0;
unsigned long i = 0;
size_t image_written_bytes = 0;
struct ne_image_load_info image_load_info = {
.flags = NE_EIF_IMAGE,
};
struct stat image_stat_buf = {};
int rc = -EINVAL;
size_t temp_image_offset = 0;
for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
enclave_memory_size += ne_user_mem_regions[i].memory_size;
rc = stat(enclave_image_path, &image_stat_buf);
if (rc < 0) {
printf("Error in get image stat info [%m]\n");
return rc;
}
enclave_image_size = image_stat_buf.st_size;
if (enclave_memory_size < enclave_image_size) {
printf("The enclave memory is smaller than the enclave image size\n");
return -ENOMEM;
}
rc = ioctl(enclave_fd, NE_GET_IMAGE_LOAD_INFO, &image_load_info);
if (rc < 0) {
switch (errno) {
case NE_ERR_NOT_IN_INIT_STATE: {
printf("Error in get image load info, enclave not in init state\n");
break;
}
case NE_ERR_INVALID_FLAG_VALUE: {
printf("Error in get image load info, provided invalid flag\n");
break;
}
default:
printf("Error in get image load info [%m]\n");
}
return rc;
}
printf("Enclave image offset in enclave memory is %lld\n",
image_load_info.memory_offset);
enclave_image_fd = open(enclave_image_path, O_RDONLY);
if (enclave_image_fd < 0) {
printf("Error in open enclave image file [%m]\n");
return enclave_image_fd;
}
enclave_image = mmap(NULL, enclave_image_size, PROT_READ,
MAP_PRIVATE, enclave_image_fd, 0);
if (enclave_image == MAP_FAILED) {
printf("Error in mmap enclave image [%m]\n");
return -1;
}
temp_image_offset = image_load_info.memory_offset;
for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
size_t bytes_to_write = 0;
size_t memory_offset = 0;
size_t memory_size = ne_user_mem_regions[i].memory_size;
size_t remaining_bytes = 0;
void *userspace_addr = ne_user_mem_regions[i].userspace_addr;
if (temp_image_offset >= memory_size) {
temp_image_offset -= memory_size;
continue;
} else if (temp_image_offset != 0) {
memory_offset = temp_image_offset;
memory_size -= temp_image_offset;
temp_image_offset = 0;
}
remaining_bytes = enclave_image_size - image_written_bytes;
bytes_to_write = memory_size < remaining_bytes ?
memory_size : remaining_bytes;
memcpy(userspace_addr + memory_offset,
enclave_image + image_written_bytes, bytes_to_write);
image_written_bytes += bytes_to_write;
if (image_written_bytes == enclave_image_size)
break;
}
munmap(enclave_image, enclave_image_size);
close(enclave_image_fd);
return 0;
}
/**
* ne_set_user_mem_region() - Set a user space memory region for the given enclave.
* @enclave_fd : The file descriptor associated with the enclave.
* @ne_user_mem_region : User space memory region to be set for the enclave.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_set_user_mem_region(int enclave_fd, struct ne_user_mem_region ne_user_mem_region)
{
struct ne_user_memory_region mem_region = {
.flags = NE_DEFAULT_MEMORY_REGION,
.memory_size = ne_user_mem_region.memory_size,
.userspace_addr = (__u64)ne_user_mem_region.userspace_addr,
};
int rc = -EINVAL;
rc = ioctl(enclave_fd, NE_SET_USER_MEMORY_REGION, &mem_region);
if (rc < 0) {
switch (errno) {
case NE_ERR_NOT_IN_INIT_STATE: {
printf("Error in set user memory region, enclave not in init state\n");
break;
}
case NE_ERR_INVALID_MEM_REGION_SIZE: {
printf("Error in set user memory region, mem size not multiple of 2 MiB\n");
break;
}
case NE_ERR_INVALID_MEM_REGION_ADDR: {
printf("Error in set user memory region, invalid user space address\n");
break;
}
case NE_ERR_UNALIGNED_MEM_REGION_ADDR: {
printf("Error in set user memory region, unaligned user space address\n");
break;
}
case NE_ERR_MEM_REGION_ALREADY_USED: {
printf("Error in set user memory region, memory region already used\n");
break;
}
case NE_ERR_MEM_NOT_HUGE_PAGE: {
printf("Error in set user memory region, not backed by huge pages\n");
break;
}
case NE_ERR_MEM_DIFFERENT_NUMA_NODE: {
printf("Error in set user memory region, different NUMA node than CPUs\n");
break;
}
case NE_ERR_MEM_MAX_REGIONS: {
printf("Error in set user memory region, max memory regions reached\n");
break;
}
case NE_ERR_INVALID_PAGE_SIZE: {
printf("Error in set user memory region, has page not multiple of 2 MiB\n");
break;
}
case NE_ERR_INVALID_FLAG_VALUE: {
printf("Error in set user memory region, provided invalid flag\n");
break;
}
default:
printf("Error in set user memory region [%m]\n");
}
return rc;
}
return 0;
}
/**
* ne_free_mem_regions() - Unmap all the user space memory regions that were set
* aside for the enclave.
* @ne_user_mem_regions: The user space memory regions associated with an enclave.
*
* Context: Process context.
*/
static void ne_free_mem_regions(struct ne_user_mem_region ne_user_mem_regions[])
{
unsigned int i = 0;
for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
munmap(ne_user_mem_regions[i].userspace_addr,
ne_user_mem_regions[i].memory_size);
}
/**
* ne_add_vcpu() - Add a vCPU to the given enclave.
* @enclave_fd : The file descriptor associated with the enclave.
* @vcpu_id: vCPU id to be set for the enclave, either provided or
* auto-generated (if provided vCPU id is 0).
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_add_vcpu(int enclave_fd, unsigned int *vcpu_id)
{
int rc = -EINVAL;
rc = ioctl(enclave_fd, NE_ADD_VCPU, vcpu_id);
if (rc < 0) {
switch (errno) {
case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
printf("Error in add vcpu, no CPUs available in the NE CPU pool\n");
break;
}
case NE_ERR_VCPU_ALREADY_USED: {
printf("Error in add vcpu, the provided vCPU is already used\n");
break;
}
case NE_ERR_VCPU_NOT_IN_CPU_POOL: {
printf("Error in add vcpu, the provided vCPU is not in the NE CPU pool\n");
break;
}
case NE_ERR_VCPU_INVALID_CPU_CORE: {
printf("Error in add vcpu, the core id of the provided vCPU is invalid\n");
break;
}
case NE_ERR_NOT_IN_INIT_STATE: {
printf("Error in add vcpu, enclave not in init state\n");
break;
}
case NE_ERR_INVALID_VCPU: {
printf("Error in add vcpu, the provided vCPU is out of avail CPUs range\n");
break;
}
default:
printf("Error in add vcpu [%m]\n");
}
return rc;
}
return 0;
}
/**
* ne_start_enclave() - Start the given enclave.
* @enclave_fd : The file descriptor associated with the enclave.
* @enclave_start_info : Enclave metadata used for starting e.g. vsock CID.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_start_enclave(int enclave_fd, struct ne_enclave_start_info *enclave_start_info)
{
int rc = -EINVAL;
rc = ioctl(enclave_fd, NE_START_ENCLAVE, enclave_start_info);
if (rc < 0) {
switch (errno) {
case NE_ERR_NOT_IN_INIT_STATE: {
printf("Error in start enclave, enclave not in init state\n");
break;
}
case NE_ERR_NO_MEM_REGIONS_ADDED: {
printf("Error in start enclave, no memory regions have been added\n");
break;
}
case NE_ERR_NO_VCPUS_ADDED: {
printf("Error in start enclave, no vCPUs have been added\n");
break;
}
case NE_ERR_FULL_CORES_NOT_USED: {
printf("Error in start enclave, enclave has no full cores set\n");
break;
}
case NE_ERR_ENCLAVE_MEM_MIN_SIZE: {
printf("Error in start enclave, enclave memory is less than min size\n");
break;
}
case NE_ERR_INVALID_FLAG_VALUE: {
printf("Error in start enclave, provided invalid flag\n");
break;
}
case NE_ERR_INVALID_ENCLAVE_CID: {
printf("Error in start enclave, provided invalid enclave CID\n");
break;
}
default:
printf("Error in start enclave [%m]\n");
}
return rc;
}
return 0;
}
/**
* ne_start_enclave_check_booted() - Start the enclave and wait for a heartbeat
* from it, on a newly created vsock channel,
* to check it has booted.
* @enclave_fd : The file descriptor associated with the enclave.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_start_enclave_check_booted(int enclave_fd)
{
struct sockaddr_vm client_vsock_addr = {};
int client_vsock_fd = -1;
socklen_t client_vsock_len = sizeof(client_vsock_addr);
struct ne_enclave_start_info enclave_start_info = {};
struct pollfd fds[1] = {};
int rc = -EINVAL;
unsigned char recv_buf = 0;
struct sockaddr_vm server_vsock_addr = {
.svm_family = AF_VSOCK,
.svm_cid = NE_IMAGE_LOAD_HEARTBEAT_CID,
.svm_port = NE_IMAGE_LOAD_HEARTBEAT_PORT,
};
int server_vsock_fd = -1;
server_vsock_fd = socket(AF_VSOCK, SOCK_STREAM, 0);
if (server_vsock_fd < 0) {
rc = server_vsock_fd;
printf("Error in socket [%m]\n");
return rc;
}
rc = bind(server_vsock_fd, (struct sockaddr *)&server_vsock_addr,
sizeof(server_vsock_addr));
if (rc < 0) {
printf("Error in bind [%m]\n");
goto out;
}
rc = listen(server_vsock_fd, 1);
if (rc < 0) {
printf("Error in listen [%m]\n");
goto out;
}
rc = ne_start_enclave(enclave_fd, &enclave_start_info);
if (rc < 0)
goto out;
printf("Enclave started, CID %llu\n", enclave_start_info.enclave_cid);
fds[0].fd = server_vsock_fd;
fds[0].events = POLLIN;
rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
if (rc < 0) {
printf("Error in poll [%m]\n");
goto out;
}
if (!rc) {
printf("Poll timeout, %d seconds elapsed\n", NE_POLL_WAIT_TIME);
rc = -ETIMEDOUT;
goto out;
}
if ((fds[0].revents & POLLIN) == 0) {
printf("Poll received value %d\n", fds[0].revents);
rc = -EINVAL;
goto out;
}
rc = accept(server_vsock_fd, (struct sockaddr *)&client_vsock_addr,
&client_vsock_len);
if (rc < 0) {
printf("Error in accept [%m]\n");
goto out;
}
client_vsock_fd = rc;
/*
* Read the heartbeat value that the init process in the enclave sends
* after vsock connect.
*/
rc = read(client_vsock_fd, &recv_buf, sizeof(recv_buf));
if (rc < 0) {
printf("Error in read [%m]\n");
goto out;
}
if (rc != sizeof(recv_buf) || recv_buf != NE_IMAGE_LOAD_HEARTBEAT_VALUE) {
printf("Read %d instead of %d\n", recv_buf,
NE_IMAGE_LOAD_HEARTBEAT_VALUE);
goto out;
}
/* Write the heartbeat value back. */
rc = write(client_vsock_fd, &recv_buf, sizeof(recv_buf));
if (rc < 0) {
printf("Error in write [%m]\n");
goto out;
}
rc = 0;
out:
close(server_vsock_fd);
return rc;
}
int main(int argc, char *argv[])
{
int enclave_fd = -1;
unsigned int i = 0;
int ne_dev_fd = -1;
struct ne_user_mem_region ne_user_mem_regions[NE_DEFAULT_NR_MEM_REGIONS] = {};
unsigned int ne_vcpus[NE_DEFAULT_NR_VCPUS] = {};
int rc = -EINVAL;
pthread_t thread_id = 0;
unsigned long slot_uid = 0;
if (argc != 2) {
printf("Usage: %s <path_to_enclave_image>\n", argv[0]);
exit(EXIT_FAILURE);
}
if (strlen(argv[1]) >= PATH_MAX) {
printf("The size of the path to enclave image is higher than max path\n");
exit(EXIT_FAILURE);
}
ne_dev_fd = open(NE_DEV_NAME, O_RDWR | O_CLOEXEC);
if (ne_dev_fd < 0) {
printf("Error in open NE device [%m]\n");
exit(EXIT_FAILURE);
}
printf("Creating enclave slot ...\n");
rc = ne_create_vm(ne_dev_fd, &slot_uid, &enclave_fd);
close(ne_dev_fd);
if (rc < 0)
exit(EXIT_FAILURE);
printf("Enclave fd %d\n", enclave_fd);
rc = pthread_create(&thread_id, NULL, ne_poll_enclave_fd, (void *)&enclave_fd);
if (rc < 0) {
printf("Error in thread create [%m]\n");
close(enclave_fd);
exit(EXIT_FAILURE);
}
for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
ne_user_mem_regions[i].memory_size = NE_MIN_MEM_REGION_SIZE;
rc = ne_alloc_user_mem_region(&ne_user_mem_regions[i]);
if (rc < 0) {
printf("Error in alloc userspace memory region, iter %d\n", i);
goto release_enclave_fd;
}
}
rc = ne_load_enclave_image(enclave_fd, ne_user_mem_regions, argv[1]);
if (rc < 0)
goto release_enclave_fd;
for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
rc = ne_set_user_mem_region(enclave_fd, ne_user_mem_regions[i]);
if (rc < 0) {
printf("Error in set memory region, iter %d\n", i);
goto release_enclave_fd;
}
}
printf("Enclave memory regions were added\n");
for (i = 0; i < NE_DEFAULT_NR_VCPUS; i++) {
/*
* The vCPU is chosen from the enclave vCPU pool, if the value
* of the vcpu_id is 0.
*/
ne_vcpus[i] = 0;
rc = ne_add_vcpu(enclave_fd, &ne_vcpus[i]);
if (rc < 0) {
printf("Error in add vcpu, iter %d\n", i);
goto release_enclave_fd;
}
printf("Added vCPU %d to the enclave\n", ne_vcpus[i]);
}
printf("Enclave vCPUs were added\n");
rc = ne_start_enclave_check_booted(enclave_fd);
if (rc < 0) {
printf("Error in the enclave start / image loading heartbeat logic [rc=%d]\n", rc);
goto release_enclave_fd;
}
printf("Entering sleep for %d seconds ...\n", NE_SLEEP_TIME);
sleep(NE_SLEEP_TIME);
close(enclave_fd);
ne_free_mem_regions(ne_user_mem_regions);
exit(EXIT_SUCCESS);
release_enclave_fd:
close(enclave_fd);
ne_free_mem_regions(ne_user_mem_regions);
exit(EXIT_FAILURE);
}
| linux-master | samples/nitro_enclaves/ne_ioctl_sample.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Simple Landlock sandbox manager able to launch a process restricted by a
* user-defined filesystem access control policy.
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2020 ANSSI
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/landlock.h>
#include <linux/prctl.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <unistd.h>
#ifndef landlock_create_ruleset
static inline int
landlock_create_ruleset(const struct landlock_ruleset_attr *const attr,
const size_t size, const __u32 flags)
{
return syscall(__NR_landlock_create_ruleset, attr, size, flags);
}
#endif
#ifndef landlock_add_rule
static inline int landlock_add_rule(const int ruleset_fd,
const enum landlock_rule_type rule_type,
const void *const rule_attr,
const __u32 flags)
{
return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type, rule_attr,
flags);
}
#endif
#ifndef landlock_restrict_self
static inline int landlock_restrict_self(const int ruleset_fd,
const __u32 flags)
{
return syscall(__NR_landlock_restrict_self, ruleset_fd, flags);
}
#endif
#define ENV_FS_RO_NAME "LL_FS_RO"
#define ENV_FS_RW_NAME "LL_FS_RW"
#define ENV_PATH_TOKEN ":"
static int parse_path(char *env_path, const char ***const path_list)
{
int i, num_paths = 0;
if (env_path) {
num_paths++;
for (i = 0; env_path[i]; i++) {
if (env_path[i] == ENV_PATH_TOKEN[0])
num_paths++;
}
}
*path_list = malloc(num_paths * sizeof(**path_list));
for (i = 0; i < num_paths; i++)
(*path_list)[i] = strsep(&env_path, ENV_PATH_TOKEN);
return num_paths;
}
/* clang-format off */
#define ACCESS_FILE ( \
LANDLOCK_ACCESS_FS_EXECUTE | \
LANDLOCK_ACCESS_FS_WRITE_FILE | \
LANDLOCK_ACCESS_FS_READ_FILE | \
LANDLOCK_ACCESS_FS_TRUNCATE)
/* clang-format on */
static int populate_ruleset(const char *const env_var, const int ruleset_fd,
const __u64 allowed_access)
{
int num_paths, i, ret = 1;
char *env_path_name;
const char **path_list = NULL;
struct landlock_path_beneath_attr path_beneath = {
.parent_fd = -1,
};
env_path_name = getenv(env_var);
if (!env_path_name) {
/* Prevents users to forget a setting. */
fprintf(stderr, "Missing environment variable %s\n", env_var);
return 1;
}
env_path_name = strdup(env_path_name);
unsetenv(env_var);
num_paths = parse_path(env_path_name, &path_list);
if (num_paths == 1 && path_list[0][0] == '\0') {
/*
* Allows to not use all possible restrictions (e.g. use
* LL_FS_RO without LL_FS_RW).
*/
ret = 0;
goto out_free_name;
}
for (i = 0; i < num_paths; i++) {
struct stat statbuf;
path_beneath.parent_fd = open(path_list[i], O_PATH | O_CLOEXEC);
if (path_beneath.parent_fd < 0) {
fprintf(stderr, "Failed to open \"%s\": %s\n",
path_list[i], strerror(errno));
goto out_free_name;
}
if (fstat(path_beneath.parent_fd, &statbuf)) {
close(path_beneath.parent_fd);
goto out_free_name;
}
path_beneath.allowed_access = allowed_access;
if (!S_ISDIR(statbuf.st_mode))
path_beneath.allowed_access &= ACCESS_FILE;
if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0)) {
fprintf(stderr,
"Failed to update the ruleset with \"%s\": %s\n",
path_list[i], strerror(errno));
close(path_beneath.parent_fd);
goto out_free_name;
}
close(path_beneath.parent_fd);
}
ret = 0;
out_free_name:
free(path_list);
free(env_path_name);
return ret;
}
/* clang-format off */
#define ACCESS_FS_ROUGHLY_READ ( \
LANDLOCK_ACCESS_FS_EXECUTE | \
LANDLOCK_ACCESS_FS_READ_FILE | \
LANDLOCK_ACCESS_FS_READ_DIR)
#define ACCESS_FS_ROUGHLY_WRITE ( \
LANDLOCK_ACCESS_FS_WRITE_FILE | \
LANDLOCK_ACCESS_FS_REMOVE_DIR | \
LANDLOCK_ACCESS_FS_REMOVE_FILE | \
LANDLOCK_ACCESS_FS_MAKE_CHAR | \
LANDLOCK_ACCESS_FS_MAKE_DIR | \
LANDLOCK_ACCESS_FS_MAKE_REG | \
LANDLOCK_ACCESS_FS_MAKE_SOCK | \
LANDLOCK_ACCESS_FS_MAKE_FIFO | \
LANDLOCK_ACCESS_FS_MAKE_BLOCK | \
LANDLOCK_ACCESS_FS_MAKE_SYM | \
LANDLOCK_ACCESS_FS_REFER | \
LANDLOCK_ACCESS_FS_TRUNCATE)
/* clang-format on */
#define LANDLOCK_ABI_LAST 3
int main(const int argc, char *const argv[], char *const *const envp)
{
const char *cmd_path;
char *const *cmd_argv;
int ruleset_fd, abi;
__u64 access_fs_ro = ACCESS_FS_ROUGHLY_READ,
access_fs_rw = ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_WRITE;
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = access_fs_rw,
};
if (argc < 2) {
fprintf(stderr,
"usage: %s=\"...\" %s=\"...\" %s <cmd> [args]...\n\n",
ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
fprintf(stderr,
"Launch a command in a restricted environment.\n\n");
fprintf(stderr, "Environment variables containing paths, "
"each separated by a colon:\n");
fprintf(stderr,
"* %s: list of paths allowed to be used in a read-only way.\n",
ENV_FS_RO_NAME);
fprintf(stderr,
"* %s: list of paths allowed to be used in a read-write way.\n",
ENV_FS_RW_NAME);
fprintf(stderr,
"\nexample:\n"
"%s=\"/bin:/lib:/usr:/proc:/etc:/dev/urandom\" "
"%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" "
"%s bash -i\n\n",
ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
fprintf(stderr,
"This sandboxer can use Landlock features "
"up to ABI version %d.\n",
LANDLOCK_ABI_LAST);
return 1;
}
abi = landlock_create_ruleset(NULL, 0, LANDLOCK_CREATE_RULESET_VERSION);
if (abi < 0) {
const int err = errno;
perror("Failed to check Landlock compatibility");
switch (err) {
case ENOSYS:
fprintf(stderr,
"Hint: Landlock is not supported by the current kernel. "
"To support it, build the kernel with "
"CONFIG_SECURITY_LANDLOCK=y and prepend "
"\"landlock,\" to the content of CONFIG_LSM.\n");
break;
case EOPNOTSUPP:
fprintf(stderr,
"Hint: Landlock is currently disabled. "
"It can be enabled in the kernel configuration by "
"prepending \"landlock,\" to the content of CONFIG_LSM, "
"or at boot time by setting the same content to the "
"\"lsm\" kernel parameter.\n");
break;
}
return 1;
}
/* Best-effort security. */
switch (abi) {
case 1:
/*
* Removes LANDLOCK_ACCESS_FS_REFER for ABI < 2
*
* Note: The "refer" operations (file renaming and linking
* across different directories) are always forbidden when using
* Landlock with ABI 1.
*
* If only ABI 1 is available, this sandboxer knowingly forbids
* refer operations.
*
* If a program *needs* to do refer operations after enabling
* Landlock, it can not use Landlock at ABI level 1. To be
* compatible with different kernel versions, such programs
* should then fall back to not restrict themselves at all if
* the running kernel only supports ABI 1.
*/
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_REFER;
__attribute__((fallthrough));
case 2:
/* Removes LANDLOCK_ACCESS_FS_TRUNCATE for ABI < 3 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_TRUNCATE;
fprintf(stderr,
"Hint: You should update the running kernel "
"to leverage Landlock features "
"provided by ABI version %d (instead of %d).\n",
LANDLOCK_ABI_LAST, abi);
__attribute__((fallthrough));
case LANDLOCK_ABI_LAST:
break;
default:
fprintf(stderr,
"Hint: You should update this sandboxer "
"to leverage Landlock features "
"provided by ABI version %d (instead of %d).\n",
abi, LANDLOCK_ABI_LAST);
}
access_fs_ro &= ruleset_attr.handled_access_fs;
access_fs_rw &= ruleset_attr.handled_access_fs;
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
if (ruleset_fd < 0) {
perror("Failed to create a ruleset");
return 1;
}
if (populate_ruleset(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) {
goto err_close_ruleset;
}
if (populate_ruleset(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) {
goto err_close_ruleset;
}
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("Failed to restrict privileges");
goto err_close_ruleset;
}
if (landlock_restrict_self(ruleset_fd, 0)) {
perror("Failed to enforce ruleset");
goto err_close_ruleset;
}
close(ruleset_fd);
cmd_path = argv[1];
cmd_argv = argv + 1;
execvpe(cmd_path, cmd_argv, envp);
fprintf(stderr, "Failed to execute \"%s\": %s\n", cmd_path,
strerror(errno));
fprintf(stderr, "Hint: access to the binary, the interpreter or "
"shared libraries may be denied.\n");
return 1;
err_close_ruleset:
close(ruleset_fd);
return 1;
}
| linux-master | samples/landlock/sandboxer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-shadow-mod.c - Shadow variables, buggy module demo
*
* Purpose
* -------
*
* As a demonstration of livepatch shadow variable API, this module
* introduces memory leak behavior that livepatch modules
* livepatch-shadow-fix1.ko and livepatch-shadow-fix2.ko correct and
* enhance.
*
* WARNING - even though the livepatch-shadow-fix modules patch the
* memory leak, please load these modules at your own risk -- some
* amount of memory may leaked before the bug is patched.
*
*
* Usage
* -----
*
* Step 1 - Load the buggy demonstration module:
*
* insmod samples/livepatch/livepatch-shadow-mod.ko
*
* Watch dmesg output for a few moments to see new dummy being allocated
* and a periodic cleanup check. (Note: a small amount of memory is
* being leaked.)
*
*
* Step 2 - Load livepatch fix1:
*
* insmod samples/livepatch/livepatch-shadow-fix1.ko
*
* Continue watching dmesg and note that now livepatch_fix1_dummy_free()
* and livepatch_fix1_dummy_alloc() are logging messages about leaked
* memory and eventually leaks prevented.
*
*
* Step 3 - Load livepatch fix2 (on top of fix1):
*
* insmod samples/livepatch/livepatch-shadow-fix2.ko
*
* This module extends functionality through shadow variables, as a new
* "check" counter is added to the dummy structure. Periodic dmesg
* messages will log these as dummies are cleaned up.
*
*
* Step 4 - Cleanup
*
* Unwind the demonstration by disabling the livepatch fix modules, then
* removing them and the demo module:
*
* echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix2/enabled
* echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix1/enabled
* rmmod livepatch-shadow-fix2
* rmmod livepatch-shadow-fix1
* rmmod livepatch-shadow-mod
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/workqueue.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Buggy module for shadow variable demo");
/* Allocate new dummies every second */
#define ALLOC_PERIOD 1
/* Check for expired dummies after a few new ones have been allocated */
#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
/* Dummies expire after a few cleanup instances */
#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
/*
* Keep a list of all the dummies so we can clean up any residual ones
* on module exit
*/
static LIST_HEAD(dummy_list);
static DEFINE_MUTEX(dummy_list_mutex);
struct dummy {
struct list_head list;
unsigned long jiffies_expire;
};
static __used noinline struct dummy *dummy_alloc(void)
{
struct dummy *d;
int *leak;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->jiffies_expire = jiffies +
msecs_to_jiffies(1000 * EXPIRE_PERIOD);
/* Oops, forgot to save leak! */
leak = kzalloc(sizeof(*leak), GFP_KERNEL);
if (!leak) {
kfree(d);
return NULL;
}
pr_info("%s: dummy @ %p, expires @ %lx\n",
__func__, d, d->jiffies_expire);
return d;
}
static __used noinline void dummy_free(struct dummy *d)
{
pr_info("%s: dummy @ %p, expired = %lx\n",
__func__, d, d->jiffies_expire);
kfree(d);
}
static __used noinline bool dummy_check(struct dummy *d,
unsigned long jiffies)
{
return time_after(jiffies, d->jiffies_expire);
}
/*
* alloc_work_func: allocates new dummy structures, allocates additional
* memory, aptly named "leak", but doesn't keep
* permanent record of it.
*/
static void alloc_work_func(struct work_struct *work);
static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
static void alloc_work_func(struct work_struct *work)
{
struct dummy *d;
d = dummy_alloc();
if (!d)
return;
mutex_lock(&dummy_list_mutex);
list_add(&d->list, &dummy_list);
mutex_unlock(&dummy_list_mutex);
schedule_delayed_work(&alloc_dwork,
msecs_to_jiffies(1000 * ALLOC_PERIOD));
}
/*
* cleanup_work_func: frees dummy structures. Without knownledge of
* "leak", it leaks the additional memory that
* alloc_work_func created.
*/
static void cleanup_work_func(struct work_struct *work);
static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
static void cleanup_work_func(struct work_struct *work)
{
struct dummy *d, *tmp;
unsigned long j;
j = jiffies;
pr_info("%s: jiffies = %lx\n", __func__, j);
mutex_lock(&dummy_list_mutex);
list_for_each_entry_safe(d, tmp, &dummy_list, list) {
/* Kick out and free any expired dummies */
if (dummy_check(d, j)) {
list_del(&d->list);
dummy_free(d);
}
}
mutex_unlock(&dummy_list_mutex);
schedule_delayed_work(&cleanup_dwork,
msecs_to_jiffies(1000 * CLEANUP_PERIOD));
}
static int livepatch_shadow_mod_init(void)
{
schedule_delayed_work(&alloc_dwork,
msecs_to_jiffies(1000 * ALLOC_PERIOD));
schedule_delayed_work(&cleanup_dwork,
msecs_to_jiffies(1000 * CLEANUP_PERIOD));
return 0;
}
static void livepatch_shadow_mod_exit(void)
{
struct dummy *d, *tmp;
/* Wait for any dummies at work */
cancel_delayed_work_sync(&alloc_dwork);
cancel_delayed_work_sync(&cleanup_dwork);
/* Cleanup residual dummies */
list_for_each_entry_safe(d, tmp, &dummy_list, list) {
list_del(&d->list);
dummy_free(d);
}
}
module_init(livepatch_shadow_mod_init);
module_exit(livepatch_shadow_mod_exit);
| linux-master | samples/livepatch/livepatch-shadow-mod.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-shadow-fix1.c - Shadow variables, livepatch demo
*
* Purpose
* -------
*
* Fixes the memory leak introduced in livepatch-shadow-mod through the
* use of a shadow variable. This fix demonstrates the "extending" of
* short-lived data structures by patching its allocation and release
* functions.
*
*
* Usage
* -----
*
* This module is not intended to be standalone. See the "Usage"
* section of livepatch-shadow-mod.c.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
#include <linux/slab.h>
/* Shadow variable enums */
#define SV_LEAK 1
/* Allocate new dummies every second */
#define ALLOC_PERIOD 1
/* Check for expired dummies after a few new ones have been allocated */
#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
/* Dummies expire after a few cleanup instances */
#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
struct dummy {
struct list_head list;
unsigned long jiffies_expire;
};
/*
* The constructor makes more sense together with klp_shadow_get_or_alloc().
* In this example, it would be safe to assign the pointer also to the shadow
* variable returned by klp_shadow_alloc(). But we wanted to show the more
* complicated use of the API.
*/
static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
{
int **shadow_leak = shadow_data;
int **leak = ctor_data;
if (!ctor_data)
return -EINVAL;
*shadow_leak = *leak;
return 0;
}
static struct dummy *livepatch_fix1_dummy_alloc(void)
{
struct dummy *d;
int *leak;
int **shadow_leak;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->jiffies_expire = jiffies +
msecs_to_jiffies(1000 * EXPIRE_PERIOD);
/*
* Patch: save the extra memory location into a SV_LEAK shadow
* variable. A patched dummy_free routine can later fetch this
* pointer to handle resource release.
*/
leak = kzalloc(sizeof(*leak), GFP_KERNEL);
if (!leak)
goto err_leak;
shadow_leak = klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
shadow_leak_ctor, &leak);
if (!shadow_leak) {
pr_err("%s: failed to allocate shadow variable for the leaking pointer: dummy @ %p, leak @ %p\n",
__func__, d, leak);
goto err_shadow;
}
pr_info("%s: dummy @ %p, expires @ %lx\n",
__func__, d, d->jiffies_expire);
return d;
err_shadow:
kfree(leak);
err_leak:
kfree(d);
return NULL;
}
static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
{
void *d = obj;
int **shadow_leak = shadow_data;
pr_info("%s: dummy @ %p, prevented leak @ %p\n",
__func__, d, *shadow_leak);
kfree(*shadow_leak);
}
static void livepatch_fix1_dummy_free(struct dummy *d)
{
int **shadow_leak;
/*
* Patch: fetch the saved SV_LEAK shadow variable, detach and
* free it. Note: handle cases where this shadow variable does
* not exist (ie, dummy structures allocated before this livepatch
* was loaded.)
*/
shadow_leak = klp_shadow_get(d, SV_LEAK);
if (shadow_leak)
klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor);
else
pr_info("%s: dummy @ %p leaked!\n", __func__, d);
kfree(d);
}
static struct klp_func funcs[] = {
{
.old_name = "dummy_alloc",
.new_func = livepatch_fix1_dummy_alloc,
},
{
.old_name = "dummy_free",
.new_func = livepatch_fix1_dummy_free,
}, { }
};
static struct klp_object objs[] = {
{
.name = "livepatch_shadow_mod",
.funcs = funcs,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int livepatch_shadow_fix1_init(void)
{
return klp_enable_patch(&patch);
}
static void livepatch_shadow_fix1_exit(void)
{
/* Cleanup any existing SV_LEAK shadow variables */
klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor);
}
module_init(livepatch_shadow_fix1_init);
module_exit(livepatch_shadow_fix1_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
| linux-master | samples/livepatch/livepatch-shadow-fix1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-callbacks-mod.c - (un)patching callbacks demo support module
*
*
* Purpose
* -------
*
* Simple module to demonstrate livepatch (un)patching callbacks.
*
*
* Usage
* -----
*
* This module is not intended to be standalone. See the "Usage"
* section of livepatch-callbacks-demo.c.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
static int livepatch_callbacks_mod_init(void)
{
pr_info("%s\n", __func__);
return 0;
}
static void livepatch_callbacks_mod_exit(void)
{
pr_info("%s\n", __func__);
}
module_init(livepatch_callbacks_mod_init);
module_exit(livepatch_callbacks_mod_exit);
MODULE_LICENSE("GPL");
| linux-master | samples/livepatch/livepatch-callbacks-mod.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-shadow-fix2.c - Shadow variables, livepatch demo
*
* Purpose
* -------
*
* Adds functionality to livepatch-shadow-mod's in-flight data
* structures through a shadow variable. The livepatch patches a
* routine that periodically inspects data structures, incrementing a
* per-data-structure counter, creating the counter if needed.
*
*
* Usage
* -----
*
* This module is not intended to be standalone. See the "Usage"
* section of livepatch-shadow-mod.c.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
#include <linux/slab.h>
/* Shadow variable enums */
#define SV_LEAK 1
#define SV_COUNTER 2
struct dummy {
struct list_head list;
unsigned long jiffies_expire;
};
static bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
{
int *shadow_count;
/*
* Patch: handle in-flight dummy structures, if they do not
* already have a SV_COUNTER shadow variable, then attach a
* new one.
*/
shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER,
sizeof(*shadow_count), GFP_NOWAIT,
NULL, NULL);
if (shadow_count)
*shadow_count += 1;
return time_after(jiffies, d->jiffies_expire);
}
static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
{
void *d = obj;
int **shadow_leak = shadow_data;
pr_info("%s: dummy @ %p, prevented leak @ %p\n",
__func__, d, *shadow_leak);
kfree(*shadow_leak);
}
static void livepatch_fix2_dummy_free(struct dummy *d)
{
int **shadow_leak;
int *shadow_count;
/* Patch: copy the memory leak patch from the fix1 module. */
shadow_leak = klp_shadow_get(d, SV_LEAK);
if (shadow_leak)
klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor);
else
pr_info("%s: dummy @ %p leaked!\n", __func__, d);
/*
* Patch: fetch the SV_COUNTER shadow variable and display
* the final count. Detach the shadow variable.
*/
shadow_count = klp_shadow_get(d, SV_COUNTER);
if (shadow_count) {
pr_info("%s: dummy @ %p, check counter = %d\n",
__func__, d, *shadow_count);
klp_shadow_free(d, SV_COUNTER, NULL);
}
kfree(d);
}
static struct klp_func funcs[] = {
{
.old_name = "dummy_check",
.new_func = livepatch_fix2_dummy_check,
},
{
.old_name = "dummy_free",
.new_func = livepatch_fix2_dummy_free,
}, { }
};
static struct klp_object objs[] = {
{
.name = "livepatch_shadow_mod",
.funcs = funcs,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int livepatch_shadow_fix2_init(void)
{
return klp_enable_patch(&patch);
}
static void livepatch_shadow_fix2_exit(void)
{
/* Cleanup any existing SV_COUNTER shadow variables */
klp_shadow_free_all(SV_COUNTER, NULL);
}
module_init(livepatch_shadow_fix2_init);
module_exit(livepatch_shadow_fix2_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
| linux-master | samples/livepatch/livepatch-shadow-fix2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-callbacks-busymod.c - (un)patching callbacks demo support module
*
*
* Purpose
* -------
*
* Simple module to demonstrate livepatch (un)patching callbacks.
*
*
* Usage
* -----
*
* This module is not intended to be standalone. See the "Usage"
* section of livepatch-callbacks-mod.c.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
static int sleep_secs;
module_param(sleep_secs, int, 0644);
MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
static void busymod_work_func(struct work_struct *work);
static DECLARE_DELAYED_WORK(work, busymod_work_func);
static void busymod_work_func(struct work_struct *work)
{
pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
msleep(sleep_secs * 1000);
pr_info("%s exit\n", __func__);
}
static int livepatch_callbacks_mod_init(void)
{
pr_info("%s\n", __func__);
schedule_delayed_work(&work,
msecs_to_jiffies(1000 * 0));
return 0;
}
static void livepatch_callbacks_mod_exit(void)
{
cancel_delayed_work_sync(&work);
pr_info("%s\n", __func__);
}
module_init(livepatch_callbacks_mod_init);
module_exit(livepatch_callbacks_mod_exit);
MODULE_LICENSE("GPL");
| linux-master | samples/livepatch/livepatch-callbacks-busymod.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/*
* livepatch-callbacks-demo.c - (un)patching callbacks livepatch demo
*
*
* Purpose
* -------
*
* Demonstration of registering livepatch (un)patching callbacks.
*
*
* Usage
* -----
*
* Step 1 - load the simple module
*
* insmod samples/livepatch/livepatch-callbacks-mod.ko
*
*
* Step 2 - load the demonstration livepatch (with callbacks)
*
* insmod samples/livepatch/livepatch-callbacks-demo.ko
*
*
* Step 3 - cleanup
*
* echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
* rmmod livepatch_callbacks_demo
* rmmod livepatch_callbacks_mod
*
* Watch dmesg output to see livepatch enablement, callback execution
* and patching operations for both vmlinux and module targets.
*
* NOTE: swap the insmod order of livepatch-callbacks-mod.ko and
* livepatch-callbacks-demo.ko to observe what happens when a
* target module is loaded after a livepatch with callbacks.
*
* NOTE: 'pre_patch_ret' is a module parameter that sets the pre-patch
* callback return status. Try setting up a non-zero status
* such as -19 (-ENODEV):
*
* # Load demo livepatch, vmlinux is patched
* insmod samples/livepatch/livepatch-callbacks-demo.ko
*
* # Setup next pre-patch callback to return -ENODEV
* echo -19 > /sys/module/livepatch_callbacks_demo/parameters/pre_patch_ret
*
* # Module loader refuses to load the target module
* insmod samples/livepatch/livepatch-callbacks-mod.ko
* insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-mod.ko: No such device
*
* NOTE: There is a second target module,
* livepatch-callbacks-busymod.ko, available for experimenting
* with livepatch (un)patch callbacks. This module contains
* a 'sleep_secs' parameter that parks the module on one of the
* functions that the livepatch demo module wants to patch.
* Modifying this value and tweaking the order of module loads can
* effectively demonstrate stalled patch transitions:
*
* # Load a target module, let it park on 'busymod_work_func' for
* # thirty seconds
* insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=30
*
* # Meanwhile load the livepatch
* insmod samples/livepatch/livepatch-callbacks-demo.ko
*
* # ... then load and unload another target module while the
* # transition is in progress
* insmod samples/livepatch/livepatch-callbacks-mod.ko
* rmmod samples/livepatch/livepatch-callbacks-mod.ko
*
* # Finally cleanup
* echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
* rmmod samples/livepatch/livepatch-callbacks-demo.ko
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
static int pre_patch_ret;
module_param(pre_patch_ret, int, 0644);
MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
static const char *const module_state[] = {
[MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
[MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
[MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
[MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
};
static void callback_info(const char *callback, struct klp_object *obj)
{
if (obj->mod)
pr_info("%s: %s -> %s\n", callback, obj->mod->name,
module_state[obj->mod->state]);
else
pr_info("%s: vmlinux\n", callback);
}
/* Executed on object patching (ie, patch enablement) */
static int pre_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
return pre_patch_ret;
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void pre_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
static void patched_work_func(struct work_struct *work)
{
pr_info("%s\n", __func__);
}
static struct klp_func no_funcs[] = {
{ }
};
static struct klp_func busymod_funcs[] = {
{
.old_name = "busymod_work_func",
.new_func = patched_work_func,
}, { }
};
static struct klp_object objs[] = {
{
.name = NULL, /* vmlinux */
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, {
.name = "livepatch_callbacks_mod",
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, {
.name = "livepatch_callbacks_busymod",
.funcs = busymod_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int livepatch_callbacks_demo_init(void)
{
return klp_enable_patch(&patch);
}
static void livepatch_callbacks_demo_exit(void)
{
}
module_init(livepatch_callbacks_demo_init);
module_exit(livepatch_callbacks_demo_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
| linux-master | samples/livepatch/livepatch-callbacks-demo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* livepatch-sample.c - Kernel Live Patching Sample Module
*
* Copyright (C) 2014 Seth Jennings <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
/*
* This (dumb) live patch overrides the function that prints the
* kernel boot cmdline when /proc/cmdline is read.
*
* Example:
*
* $ cat /proc/cmdline
* <your cmdline>
*
* $ insmod livepatch-sample.ko
* $ cat /proc/cmdline
* this has been live patched
*
* $ echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled
* $ cat /proc/cmdline
* <your cmdline>
*/
#include <linux/seq_file.h>
static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%s\n", "this has been live patched");
return 0;
}
static struct klp_func funcs[] = {
{
.old_name = "cmdline_proc_show",
.new_func = livepatch_cmdline_proc_show,
}, { }
};
static struct klp_object objs[] = {
{
/* name being NULL means vmlinux */
.funcs = funcs,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int livepatch_init(void)
{
return klp_enable_patch(&patch);
}
static void livepatch_exit(void)
{
}
module_init(livepatch_init);
module_exit(livepatch_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
| linux-master | samples/livepatch/livepatch-sample.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, Microsoft Corporation.
*
* Authors:
* Beau Belgrave <[email protected]>
*/
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/uio.h>
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/user_events.h>
const char *data_file = "/sys/kernel/tracing/user_events_data";
int enabled = 0;
static int event_reg(int fd, const char *command, int *write, int *enabled)
{
struct user_reg reg = {0};
reg.size = sizeof(reg);
reg.enable_bit = 31;
reg.enable_size = sizeof(*enabled);
reg.enable_addr = (__u64)enabled;
reg.name_args = (__u64)command;
if (ioctl(fd, DIAG_IOCSREG, ®) == -1)
return -1;
*write = reg.write_index;
return 0;
}
int main(int argc, char **argv)
{
int data_fd, write;
struct iovec io[2];
__u32 count = 0;
data_fd = open(data_file, O_RDWR);
if (event_reg(data_fd, "test u32 count", &write, &enabled) == -1)
return errno;
/* Setup iovec */
io[0].iov_base = &write;
io[0].iov_len = sizeof(write);
io[1].iov_base = &count;
io[1].iov_len = sizeof(count);
ask:
printf("Press enter to check status...\n");
getchar();
/* Check if anyone is listening */
if (enabled) {
/* Yep, trace out our data */
writev(data_fd, (const struct iovec *)io, 2);
/* Increase the count */
count++;
printf("Something was attached, wrote data\n");
}
goto ask;
return 0;
}
| linux-master | samples/user_events/example.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <memory.h>
#include <malloc.h>
#include <time.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
#include <sys/time.h>
#include <linux/hpet.h>
extern void hpet_open_close(int, const char **);
extern void hpet_info(int, const char **);
extern void hpet_poll(int, const char **);
extern void hpet_fasync(int, const char **);
extern void hpet_read(int, const char **);
#include <sys/poll.h>
#include <sys/ioctl.h>
struct hpet_command {
char *command;
void (*func)(int argc, const char ** argv);
} hpet_command[] = {
{
"open-close",
hpet_open_close
},
{
"info",
hpet_info
},
{
"poll",
hpet_poll
},
{
"fasync",
hpet_fasync
},
};
int
main(int argc, const char ** argv)
{
unsigned int i;
argc--;
argv++;
if (!argc) {
fprintf(stderr, "-hpet: requires command\n");
return -1;
}
for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++)
if (!strcmp(argv[0], hpet_command[i].command)) {
argc--;
argv++;
fprintf(stderr, "-hpet: executing %s\n",
hpet_command[i].command);
hpet_command[i].func(argc, argv);
return 0;
}
fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]);
return -1;
}
void
hpet_open_close(int argc, const char **argv)
{
int fd;
if (argc != 1) {
fprintf(stderr, "hpet_open_close: device-name\n");
return;
}
fd = open(argv[0], O_RDONLY);
if (fd < 0)
fprintf(stderr, "hpet_open_close: open failed\n");
else
close(fd);
return;
}
void
hpet_info(int argc, const char **argv)
{
struct hpet_info info;
int fd;
if (argc != 1) {
fprintf(stderr, "hpet_info: device-name\n");
return;
}
fd = open(argv[0], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
return;
}
if (ioctl(fd, HPET_INFO, &info) < 0) {
fprintf(stderr, "hpet_info: failed to get info\n");
goto out;
}
fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
info.hi_ireqfreq, info.hi_flags);
fprintf(stderr, "hi_hpet %d hi_timer %d\n",
info.hi_hpet, info.hi_timer);
out:
close(fd);
return;
}
void
hpet_poll(int argc, const char **argv)
{
unsigned long freq;
int iterations, i, fd;
struct pollfd pfd;
struct hpet_info info;
struct timeval stv, etv;
struct timezone tz;
long usec;
if (argc != 3) {
fprintf(stderr, "hpet_poll: device-name freq iterations\n");
return;
}
freq = atoi(argv[1]);
iterations = atoi(argv[2]);
fd = open(argv[0], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]);
return;
}
if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n");
goto out;
}
if (ioctl(fd, HPET_INFO, &info) < 0) {
fprintf(stderr, "hpet_poll: failed to get info\n");
goto out;
}
fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags);
if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
fprintf(stderr, "hpet_poll: HPET_EPI failed\n");
goto out;
}
if (ioctl(fd, HPET_IE_ON, 0) < 0) {
fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n");
goto out;
}
pfd.fd = fd;
pfd.events = POLLIN;
for (i = 0; i < iterations; i++) {
pfd.revents = 0;
gettimeofday(&stv, &tz);
if (poll(&pfd, 1, -1) < 0)
fprintf(stderr, "hpet_poll: poll failed\n");
else {
long data;
gettimeofday(&etv, &tz);
usec = stv.tv_sec * 1000000 + stv.tv_usec;
usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec;
fprintf(stderr,
"hpet_poll: expired time = 0x%lx\n", usec);
fprintf(stderr, "hpet_poll: revents = 0x%x\n",
pfd.revents);
if (read(fd, &data, sizeof(data)) != sizeof(data)) {
fprintf(stderr, "hpet_poll: read failed\n");
}
else
fprintf(stderr, "hpet_poll: data 0x%lx\n",
data);
}
}
out:
close(fd);
return;
}
static int hpet_sigio_count;
static void
hpet_sigio(int val)
{
fprintf(stderr, "hpet_sigio: called\n");
hpet_sigio_count++;
}
void
hpet_fasync(int argc, const char **argv)
{
unsigned long freq;
int iterations, i, fd, value;
sig_t oldsig;
struct hpet_info info;
hpet_sigio_count = 0;
fd = -1;
if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) {
fprintf(stderr, "hpet_fasync: failed to set signal handler\n");
return;
}
if (argc != 3) {
fprintf(stderr, "hpet_fasync: device-name freq iterations\n");
goto out;
}
fd = open(argv[0], O_RDONLY);
if (fd < 0) {
fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]);
return;
}
if ((fcntl(fd, F_SETOWN, getpid()) == 1) ||
((value = fcntl(fd, F_GETFL)) == 1) ||
(fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) {
fprintf(stderr, "hpet_fasync: fcntl failed\n");
goto out;
}
freq = atoi(argv[1]);
iterations = atoi(argv[2]);
if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n");
goto out;
}
if (ioctl(fd, HPET_INFO, &info) < 0) {
fprintf(stderr, "hpet_fasync: failed to get info\n");
goto out;
}
fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags);
if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
fprintf(stderr, "hpet_fasync: HPET_EPI failed\n");
goto out;
}
if (ioctl(fd, HPET_IE_ON, 0) < 0) {
fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n");
goto out;
}
for (i = 0; i < iterations; i++) {
(void) pause();
fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count);
}
out:
signal(SIGIO, oldsig);
if (fd >= 0)
close(fd);
return;
}
| linux-master | samples/timers/hpet_example.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* samples/kmemleak/kmemleak-test.c
*
* Copyright (C) 2008 ARM Limited
* Written by Catalin Marinas <[email protected]>
*/
#define pr_fmt(fmt) "kmemleak: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include <linux/fdtable.h>
#include <linux/kmemleak.h>
struct test_node {
long header[25];
struct list_head list;
long footer[25];
};
static LIST_HEAD(test_list);
static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
/*
* Some very simple testing. This function needs to be extended for
* proper testing.
*/
static int kmemleak_test_init(void)
{
struct test_node *elem;
int i;
pr_info("Kmemleak testing\n");
/* make some orphan objects */
pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
#ifndef CONFIG_MODULES
pr_info("kmem_cache_alloc(files_cachep) = %p\n",
kmem_cache_alloc(files_cachep, GFP_KERNEL));
pr_info("kmem_cache_alloc(files_cachep) = %p\n",
kmem_cache_alloc(files_cachep, GFP_KERNEL));
#endif
pr_info("vmalloc(64) = %p\n", vmalloc(64));
pr_info("vmalloc(64) = %p\n", vmalloc(64));
pr_info("vmalloc(64) = %p\n", vmalloc(64));
pr_info("vmalloc(64) = %p\n", vmalloc(64));
pr_info("vmalloc(64) = %p\n", vmalloc(64));
/*
* Add elements to a list. They should only appear as orphan
* after the module is removed.
*/
for (i = 0; i < 10; i++) {
elem = kzalloc(sizeof(*elem), GFP_KERNEL);
pr_info("kzalloc(sizeof(*elem)) = %p\n", elem);
if (!elem)
return -ENOMEM;
INIT_LIST_HEAD(&elem->list);
list_add_tail(&elem->list, &test_list);
}
for_each_possible_cpu(i) {
per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
pr_info("kmalloc(129) = %p\n",
per_cpu(kmemleak_test_pointer, i));
}
return 0;
}
module_init(kmemleak_test_init);
static void __exit kmemleak_test_exit(void)
{
struct test_node *elem, *tmp;
/*
* Remove the list elements without actually freeing the
* memory.
*/
list_for_each_entry_safe(elem, tmp, &test_list, list)
list_del(&elem->list);
}
module_exit(kmemleak_test_exit);
MODULE_LICENSE("GPL");
| linux-master | samples/kmemleak/kmemleak-test.c |
// SPDX-License-Identifier: GPL-2.0
/* Use watch_queue API to watch for notifications.
*
* Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define _GNU_SOURCE
#include <stdbool.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <limits.h>
#include <linux/watch_queue.h>
#include <linux/unistd.h>
#include <linux/keyctl.h>
#ifndef KEYCTL_WATCH_KEY
#define KEYCTL_WATCH_KEY -1
#endif
#ifndef __NR_keyctl
#define __NR_keyctl -1
#endif
#define BUF_SIZE 256
static long keyctl_watch_key(int key, int watch_fd, int watch_id)
{
return syscall(__NR_keyctl, KEYCTL_WATCH_KEY, key, watch_fd, watch_id);
}
static const char *key_subtypes[256] = {
[NOTIFY_KEY_INSTANTIATED] = "instantiated",
[NOTIFY_KEY_UPDATED] = "updated",
[NOTIFY_KEY_LINKED] = "linked",
[NOTIFY_KEY_UNLINKED] = "unlinked",
[NOTIFY_KEY_CLEARED] = "cleared",
[NOTIFY_KEY_REVOKED] = "revoked",
[NOTIFY_KEY_INVALIDATED] = "invalidated",
[NOTIFY_KEY_SETATTR] = "setattr",
};
static void saw_key_change(struct watch_notification *n, size_t len)
{
struct key_notification *k = (struct key_notification *)n;
if (len != sizeof(struct key_notification)) {
fprintf(stderr, "Incorrect key message length\n");
return;
}
printf("KEY %08x change=%u[%s] aux=%u\n",
k->key_id, n->subtype, key_subtypes[n->subtype], k->aux);
}
/*
* Consume and display events.
*/
static void consumer(int fd)
{
unsigned char buffer[433], *p, *end;
union {
struct watch_notification n;
unsigned char buf1[128];
} n;
ssize_t buf_len;
for (;;) {
buf_len = read(fd, buffer, sizeof(buffer));
if (buf_len == -1) {
perror("read");
exit(1);
}
if (buf_len == 0) {
printf("-- END --\n");
return;
}
if (buf_len > sizeof(buffer)) {
fprintf(stderr, "Read buffer overrun: %zd\n", buf_len);
return;
}
printf("read() = %zd\n", buf_len);
p = buffer;
end = buffer + buf_len;
while (p < end) {
size_t largest, len;
largest = end - p;
if (largest > 128)
largest = 128;
if (largest < sizeof(struct watch_notification)) {
fprintf(stderr, "Short message header: %zu\n", largest);
return;
}
memcpy(&n, p, largest);
printf("NOTIFY[%03zx]: ty=%06x sy=%02x i=%08x\n",
p - buffer, n.n.type, n.n.subtype, n.n.info);
len = n.n.info & WATCH_INFO_LENGTH;
if (len < sizeof(n.n) || len > largest) {
fprintf(stderr, "Bad message length: %zu/%zu\n", len, largest);
exit(1);
}
switch (n.n.type) {
case WATCH_TYPE_META:
switch (n.n.subtype) {
case WATCH_META_REMOVAL_NOTIFICATION:
printf("REMOVAL of watchpoint %08x\n",
(n.n.info & WATCH_INFO_ID) >>
WATCH_INFO_ID__SHIFT);
break;
case WATCH_META_LOSS_NOTIFICATION:
printf("-- LOSS --\n");
break;
default:
printf("other meta record\n");
break;
}
break;
case WATCH_TYPE_KEY_NOTIFY:
saw_key_change(&n.n, len);
break;
default:
printf("other type\n");
break;
}
p += len;
}
}
}
static struct watch_notification_filter filter = {
.nr_filters = 1,
.filters = {
[0] = {
.type = WATCH_TYPE_KEY_NOTIFY,
.subtype_filter[0] = UINT_MAX,
},
},
};
int main(int argc, char **argv)
{
int pipefd[2], fd;
if (pipe2(pipefd, O_NOTIFICATION_PIPE) == -1) {
perror("pipe2");
exit(1);
}
fd = pipefd[0];
if (ioctl(fd, IOC_WATCH_QUEUE_SET_SIZE, BUF_SIZE) == -1) {
perror("watch_queue(size)");
exit(1);
}
if (ioctl(fd, IOC_WATCH_QUEUE_SET_FILTER, &filter) == -1) {
perror("watch_queue(filter)");
exit(1);
}
if (keyctl_watch_key(KEY_SPEC_SESSION_KEYRING, fd, 0x01) == -1) {
perror("keyctl");
exit(1);
}
if (keyctl_watch_key(KEY_SPEC_USER_KEYRING, fd, 0x02) == -1) {
perror("keyctl");
exit(1);
}
consumer(fd);
exit(0);
}
| linux-master | samples/watch_queue/watch_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Here's a sample kernel module showing the use of fprobe to dump a
* stack trace and selected registers when kernel_clone() is called.
*
* For more information on theory of operation of kprobes, see
* Documentation/trace/kprobes.rst
*
* You will see the trace data in /var/log/messages and on the console
* whenever kernel_clone() is invoked to create a new process.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fprobe.h>
#include <linux/sched/debug.h>
#include <linux/slab.h>
#define BACKTRACE_DEPTH 16
#define MAX_SYMBOL_LEN 4096
static struct fprobe sample_probe;
static unsigned long nhit;
static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
module_param_string(symbol, symbol, sizeof(symbol), 0644);
MODULE_PARM_DESC(symbol, "Probed symbol(s), given by comma separated symbols or a wildcard pattern.");
static char nosymbol[MAX_SYMBOL_LEN] = "";
module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
MODULE_PARM_DESC(nosymbol, "Not-probed symbols, given by a wildcard pattern.");
static bool stackdump = true;
module_param(stackdump, bool, 0644);
MODULE_PARM_DESC(stackdump, "Enable stackdump.");
static bool use_trace = false;
module_param(use_trace, bool, 0644);
MODULE_PARM_DESC(use_trace, "Use trace_printk instead of printk. This is only for debugging.");
static void show_backtrace(void)
{
unsigned long stacks[BACKTRACE_DEPTH];
unsigned int len;
len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2);
stack_trace_print(stacks, len, 24);
}
static int sample_entry_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct pt_regs *regs, void *data)
{
if (use_trace)
/*
* This is just an example, no kernel code should call
* trace_printk() except when actively debugging.
*/
trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
else
pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
nhit++;
if (stackdump)
show_backtrace();
return 0;
}
static void sample_exit_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip, struct pt_regs *regs,
void *data)
{
unsigned long rip = ret_ip;
if (use_trace)
/*
* This is just an example, no kernel code should call
* trace_printk() except when actively debugging.
*/
trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
(void *)ip, (void *)ip, (void *)rip, (void *)rip);
else
pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
(void *)ip, (void *)ip, (void *)rip, (void *)rip);
nhit++;
if (stackdump)
show_backtrace();
}
static int __init fprobe_init(void)
{
char *p, *symbuf = NULL;
const char **syms;
int ret, count, i;
sample_probe.entry_handler = sample_entry_handler;
sample_probe.exit_handler = sample_exit_handler;
if (strchr(symbol, '*')) {
/* filter based fprobe */
ret = register_fprobe(&sample_probe, symbol,
nosymbol[0] == '\0' ? NULL : nosymbol);
goto out;
} else if (!strchr(symbol, ',')) {
symbuf = symbol;
ret = register_fprobe_syms(&sample_probe, (const char **)&symbuf, 1);
goto out;
}
/* Comma separated symbols */
symbuf = kstrdup(symbol, GFP_KERNEL);
if (!symbuf)
return -ENOMEM;
p = symbuf;
count = 1;
while ((p = strchr(++p, ',')) != NULL)
count++;
pr_info("%d symbols found\n", count);
syms = kcalloc(count, sizeof(char *), GFP_KERNEL);
if (!syms) {
kfree(symbuf);
return -ENOMEM;
}
p = symbuf;
for (i = 0; i < count; i++)
syms[i] = strsep(&p, ",");
ret = register_fprobe_syms(&sample_probe, syms, count);
kfree(syms);
kfree(symbuf);
out:
if (ret < 0)
pr_err("register_fprobe failed, returned %d\n", ret);
else
pr_info("Planted fprobe at %s\n", symbol);
return ret;
}
static void __exit fprobe_exit(void)
{
unregister_fprobe(&sample_probe);
pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n",
symbol, nhit, sample_probe.nmissed);
}
module_init(fprobe_init)
module_exit(fprobe_exit)
MODULE_LICENSE("GPL");
| linux-master | samples/fprobe/fprobe_example.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2020 Linaro Limited. All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include "coresight-config.h"
#include "coresight-syscfg.h"
/* create an alternate autofdo configuration */
/* we will provide 4 sets of preset parameter values */
#define AFDO2_NR_PRESETS 4
/* the total number of parameters in used features - strobing has 2 */
#define AFDO2_NR_PARAM_SUM 2
static const char *afdo2_ref_names[] = {
"strobing",
};
/*
* set of presets leaves strobing window constant while varying period to allow
* experimentation with mark / space ratios for various workloads
*/
static u64 afdo2_presets[AFDO2_NR_PRESETS][AFDO2_NR_PARAM_SUM] = {
{ 1000, 100 },
{ 1000, 1000 },
{ 1000, 5000 },
{ 1000, 10000 },
};
struct cscfg_config_desc afdo2 = {
.name = "autofdo2",
.description = "Setup ETMs with strobing for autofdo\n"
"Supplied presets allow experimentation with mark-space ratio for various loads\n",
.nr_feat_refs = ARRAY_SIZE(afdo2_ref_names),
.feat_ref_names = afdo2_ref_names,
.nr_presets = AFDO2_NR_PRESETS,
.nr_total_params = AFDO2_NR_PARAM_SUM,
.presets = &afdo2_presets[0][0],
};
static struct cscfg_feature_desc *sample_feats[] = {
NULL
};
static struct cscfg_config_desc *sample_cfgs[] = {
&afdo2,
NULL
};
static struct cscfg_load_owner_info mod_owner = {
.type = CSCFG_OWNER_MODULE,
.owner_handle = THIS_MODULE,
};
/* module init and exit - just load and unload configs */
static int __init cscfg_sample_init(void)
{
return cscfg_load_config_sets(sample_cfgs, sample_feats, &mod_owner);
}
static void __exit cscfg_sample_exit(void)
{
cscfg_unload_config_sets(&mod_owner);
}
module_init(cscfg_sample_init);
module_exit(cscfg_sample_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mike Leach <[email protected]>");
MODULE_DESCRIPTION("CoreSight Syscfg Example");
| linux-master | samples/coresight/coresight-cfg-sample.c |
/******************************************************************************
* Intel Management Engine Interface (Intel MEI) Linux driver
* Intel MEI Interface Header
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Corporation.
* [email protected]
* http://www.intel.com
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <errno.h>
#include <stdint.h>
#include <stdbool.h>
#include <bits/wordsize.h>
#include <linux/mei.h>
/*****************************************************************************
* Intel Management Engine Interface
*****************************************************************************/
#define mei_msg(_me, fmt, ARGS...) do { \
if (_me->verbose) \
fprintf(stderr, fmt, ##ARGS); \
} while (0)
#define mei_err(_me, fmt, ARGS...) do { \
fprintf(stderr, "Error: " fmt, ##ARGS); \
} while (0)
struct mei {
uuid_le guid;
bool initialized;
bool verbose;
unsigned int buf_size;
unsigned char prot_ver;
int fd;
};
static void mei_deinit(struct mei *cl)
{
if (cl->fd != -1)
close(cl->fd);
cl->fd = -1;
cl->buf_size = 0;
cl->prot_ver = 0;
cl->initialized = false;
}
static bool mei_init(struct mei *me, const uuid_le *guid,
unsigned char req_protocol_version, bool verbose)
{
int result;
struct mei_client *cl;
struct mei_connect_client_data data;
me->verbose = verbose;
me->fd = open("/dev/mei0", O_RDWR);
if (me->fd == -1) {
mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
goto err;
}
memcpy(&me->guid, guid, sizeof(*guid));
memset(&data, 0, sizeof(data));
me->initialized = true;
memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
if (result) {
mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
goto err;
}
cl = &data.out_client_properties;
mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
mei_msg(me, "protocol_version %d\n", cl->protocol_version);
if ((req_protocol_version > 0) &&
(cl->protocol_version != req_protocol_version)) {
mei_err(me, "Intel MEI protocol version not supported\n");
goto err;
}
me->buf_size = cl->max_msg_length;
me->prot_ver = cl->protocol_version;
return true;
err:
mei_deinit(me);
return false;
}
static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
ssize_t len, unsigned long timeout)
{
struct timeval tv;
fd_set set;
ssize_t rc;
tv.tv_sec = timeout / 1000;
tv.tv_usec = (timeout % 1000) * 1000000;
mei_msg(me, "call read length = %zd\n", len);
FD_ZERO(&set);
FD_SET(me->fd, &set);
rc = select(me->fd + 1, &set, NULL, NULL, &tv);
if (rc > 0 && FD_ISSET(me->fd, &set)) {
mei_msg(me, "have reply\n");
} else if (rc == 0) {
rc = -1;
mei_err(me, "read failed on timeout\n");
goto out;
} else { /* rc < 0 */
rc = errno;
mei_err(me, "read failed on select with status %zd %s\n",
rc, strerror(errno));
goto out;
}
rc = read(me->fd, buffer, len);
if (rc < 0) {
mei_err(me, "read failed with status %zd %s\n",
rc, strerror(errno));
goto out;
}
mei_msg(me, "read succeeded with result %zd\n", rc);
out:
if (rc < 0)
mei_deinit(me);
return rc;
}
static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
ssize_t len, unsigned long timeout)
{
ssize_t written;
ssize_t rc;
mei_msg(me, "call write length = %zd\n", len);
written = write(me->fd, buffer, len);
if (written < 0) {
rc = -errno;
mei_err(me, "write failed with status %zd %s\n",
written, strerror(errno));
goto out;
}
mei_msg(me, "write success\n");
rc = written;
out:
if (rc < 0)
mei_deinit(me);
return rc;
}
/***************************************************************************
* Intel Advanced Management Technology ME Client
***************************************************************************/
#define AMT_MAJOR_VERSION 1
#define AMT_MINOR_VERSION 1
#define AMT_STATUS_SUCCESS 0x0
#define AMT_STATUS_INTERNAL_ERROR 0x1
#define AMT_STATUS_NOT_READY 0x2
#define AMT_STATUS_INVALID_AMT_MODE 0x3
#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
#define AMT_STATUS_SDK_RESOURCES 0x1004
#define AMT_BIOS_VERSION_LEN 65
#define AMT_VERSIONS_NUMBER 50
#define AMT_UNICODE_STRING_LEN 20
struct amt_unicode_string {
uint16_t length;
char string[AMT_UNICODE_STRING_LEN];
} __attribute__((packed));
struct amt_version_type {
struct amt_unicode_string description;
struct amt_unicode_string version;
} __attribute__((packed));
struct amt_version {
uint8_t major;
uint8_t minor;
} __attribute__((packed));
struct amt_code_versions {
uint8_t bios[AMT_BIOS_VERSION_LEN];
uint32_t count;
struct amt_version_type versions[AMT_VERSIONS_NUMBER];
} __attribute__((packed));
/***************************************************************************
* Intel Advanced Management Technology Host Interface
***************************************************************************/
struct amt_host_if_msg_header {
struct amt_version version;
uint16_t _reserved;
uint32_t command;
uint32_t length;
} __attribute__((packed));
struct amt_host_if_resp_header {
struct amt_host_if_msg_header header;
uint32_t status;
unsigned char data[];
} __attribute__((packed));
const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
const struct amt_host_if_msg_header CODE_VERSION_REQ = {
.version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
._reserved = 0,
.command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
.length = 0
};
struct amt_host_if {
struct mei mei_cl;
unsigned long send_timeout;
bool initialized;
};
static bool amt_host_if_init(struct amt_host_if *acmd,
unsigned long send_timeout, bool verbose)
{
acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
return acmd->initialized;
}
static void amt_host_if_deinit(struct amt_host_if *acmd)
{
mei_deinit(&acmd->mei_cl);
acmd->initialized = false;
}
static uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
{
uint32_t status = AMT_STATUS_SUCCESS;
struct amt_code_versions *code_ver;
size_t code_ver_len;
uint32_t ver_type_cnt;
uint32_t len;
uint32_t i;
code_ver = (struct amt_code_versions *)resp->data;
/* length - sizeof(status) */
code_ver_len = resp->header.length - sizeof(uint32_t);
ver_type_cnt = code_ver_len -
sizeof(code_ver->bios) -
sizeof(code_ver->count);
if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
status = AMT_STATUS_INTERNAL_ERROR;
goto out;
}
for (i = 0; i < code_ver->count; i++) {
len = code_ver->versions[i].description.length;
if (len > AMT_UNICODE_STRING_LEN) {
status = AMT_STATUS_INTERNAL_ERROR;
goto out;
}
len = code_ver->versions[i].version.length;
if (code_ver->versions[i].version.string[len] != '\0' ||
len != strlen(code_ver->versions[i].version.string)) {
status = AMT_STATUS_INTERNAL_ERROR;
goto out;
}
}
out:
return status;
}
static uint32_t amt_verify_response_header(uint32_t command,
const struct amt_host_if_msg_header *resp_hdr,
uint32_t response_size)
{
if (response_size < sizeof(struct amt_host_if_resp_header)) {
return AMT_STATUS_INTERNAL_ERROR;
} else if (response_size != (resp_hdr->length +
sizeof(struct amt_host_if_msg_header))) {
return AMT_STATUS_INTERNAL_ERROR;
} else if (resp_hdr->command != command) {
return AMT_STATUS_INTERNAL_ERROR;
} else if (resp_hdr->_reserved != 0) {
return AMT_STATUS_INTERNAL_ERROR;
} else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
resp_hdr->version.minor < AMT_MINOR_VERSION) {
return AMT_STATUS_INTERNAL_ERROR;
}
return AMT_STATUS_SUCCESS;
}
static uint32_t amt_host_if_call(struct amt_host_if *acmd,
const unsigned char *command, ssize_t command_sz,
uint8_t **read_buf, uint32_t rcmd,
unsigned int expected_sz)
{
uint32_t in_buf_sz;
ssize_t out_buf_sz;
ssize_t written;
uint32_t status;
struct amt_host_if_resp_header *msg_hdr;
in_buf_sz = acmd->mei_cl.buf_size;
*read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
if (*read_buf == NULL)
return AMT_STATUS_SDK_RESOURCES;
memset(*read_buf, 0, in_buf_sz);
msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
written = mei_send_msg(&acmd->mei_cl,
command, command_sz, acmd->send_timeout);
if (written != command_sz)
return AMT_STATUS_INTERNAL_ERROR;
out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
if (out_buf_sz <= 0)
return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
status = msg_hdr->status;
if (status != AMT_STATUS_SUCCESS)
return status;
status = amt_verify_response_header(rcmd,
&msg_hdr->header, out_buf_sz);
if (status != AMT_STATUS_SUCCESS)
return status;
if (expected_sz && expected_sz != out_buf_sz)
return AMT_STATUS_INTERNAL_ERROR;
return AMT_STATUS_SUCCESS;
}
static uint32_t amt_get_code_versions(struct amt_host_if *cmd,
struct amt_code_versions *versions)
{
struct amt_host_if_resp_header *response = NULL;
uint32_t status;
status = amt_host_if_call(cmd,
(const unsigned char *)&CODE_VERSION_REQ,
sizeof(CODE_VERSION_REQ),
(uint8_t **)&response,
AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
if (status != AMT_STATUS_SUCCESS)
goto out;
status = amt_verify_code_versions(response);
if (status != AMT_STATUS_SUCCESS)
goto out;
memcpy(versions, response->data, sizeof(struct amt_code_versions));
out:
if (response != NULL)
free(response);
return status;
}
/************************** end of amt_host_if_command ***********************/
int main(int argc, char **argv)
{
struct amt_code_versions ver;
struct amt_host_if acmd;
unsigned int i;
uint32_t status;
int ret;
bool verbose;
verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
if (!amt_host_if_init(&acmd, 5000, verbose)) {
ret = 1;
goto out;
}
status = amt_get_code_versions(&acmd, &ver);
amt_host_if_deinit(&acmd);
switch (status) {
case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
printf("Intel AMT: DISABLED\n");
ret = 0;
break;
case AMT_STATUS_SUCCESS:
printf("Intel AMT: ENABLED\n");
for (i = 0; i < ver.count; i++) {
printf("%s:\t%s\n", ver.versions[i].description.string,
ver.versions[i].version.string);
}
ret = 0;
break;
default:
printf("An error has occurred\n");
ret = 1;
break;
}
out:
return ret;
}
| linux-master | samples/mei/mei-amt-version.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Here's a sample kernel module showing the use of kprobes to dump a
* stack trace and selected registers when kernel_clone() is called.
*
* For more information on theory of operation of kprobes, see
* Documentation/trace/kprobes.rst
*
* You will see the trace data in /var/log/messages and on the console
* whenever kernel_clone() is invoked to create a new process.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kprobes.h>
static char symbol[KSYM_NAME_LEN] = "kernel_clone";
module_param_string(symbol, symbol, KSYM_NAME_LEN, 0644);
/* For each probe you need to allocate a kprobe structure */
static struct kprobe kp = {
.symbol_name = symbol,
};
/* kprobe pre_handler: called just before the probed instruction is executed */
static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs)
{
#ifdef CONFIG_X86
pr_info("<%s> p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->ip, regs->flags);
#endif
#ifdef CONFIG_PPC
pr_info("<%s> p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->nip, regs->msr);
#endif
#ifdef CONFIG_MIPS
pr_info("<%s> p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
#endif
#ifdef CONFIG_ARM64
pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
#endif
#ifdef CONFIG_ARM
pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, cpsr = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->ARM_pc, (long)regs->ARM_cpsr);
#endif
#ifdef CONFIG_RISCV
pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->epc, regs->status);
#endif
#ifdef CONFIG_S390
pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->psw.addr, regs->flags);
#endif
#ifdef CONFIG_LOONGARCH
pr_info("<%s> p->addr = 0x%p, era = 0x%lx, estat = 0x%lx\n",
p->symbol_name, p->addr, regs->csr_era, regs->csr_estat);
#endif
/* A dump_stack() here will give a stack backtrace */
return 0;
}
/* kprobe post_handler: called after the probed instruction is executed */
static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
#ifdef CONFIG_X86
pr_info("<%s> p->addr = 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
#ifdef CONFIG_PPC
pr_info("<%s> p->addr = 0x%p, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->msr);
#endif
#ifdef CONFIG_MIPS
pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_status);
#endif
#ifdef CONFIG_ARM64
pr_info("<%s> p->addr = 0x%p, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pstate);
#endif
#ifdef CONFIG_ARM
pr_info("<%s> p->addr = 0x%p, cpsr = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->ARM_cpsr);
#endif
#ifdef CONFIG_RISCV
pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->status);
#endif
#ifdef CONFIG_S390
pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
#ifdef CONFIG_LOONGARCH
pr_info("<%s> p->addr = 0x%p, estat = 0x%lx\n",
p->symbol_name, p->addr, regs->csr_estat);
#endif
}
static int __init kprobe_init(void)
{
int ret;
kp.pre_handler = handler_pre;
kp.post_handler = handler_post;
ret = register_kprobe(&kp);
if (ret < 0) {
pr_err("register_kprobe failed, returned %d\n", ret);
return ret;
}
pr_info("Planted kprobe at %p\n", kp.addr);
return 0;
}
static void __exit kprobe_exit(void)
{
unregister_kprobe(&kp);
pr_info("kprobe at %p unregistered\n", kp.addr);
}
module_init(kprobe_init)
module_exit(kprobe_exit)
MODULE_LICENSE("GPL");
| linux-master | samples/kprobes/kprobe_example.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kretprobe_example.c
*
* Here's a sample kernel module showing the use of return probes to
* report the return value and total time taken for probed function
* to run.
*
* usage: insmod kretprobe_example.ko func=<func_name>
*
* If no func_name is specified, kernel_clone is instrumented
*
* For more information on theory of operation of kretprobes, see
* Documentation/trace/kprobes.rst
*
* Build and insert the kernel module as done in the kprobe example.
* You will see the trace data in /var/log/messages and on the console
* whenever the probed function returns. (Some messages may be suppressed
* if syslogd is configured to eliminate duplicate messages.)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/ktime.h>
#include <linux/sched.h>
static char func_name[KSYM_NAME_LEN] = "kernel_clone";
module_param_string(func, func_name, KSYM_NAME_LEN, 0644);
MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
" function's execution time");
/* per-instance private data */
struct my_data {
ktime_t entry_stamp;
};
/* Here we use the entry_hanlder to timestamp function entry */
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct my_data *data;
if (!current->mm)
return 1; /* Skip kernel threads */
data = (struct my_data *)ri->data;
data->entry_stamp = ktime_get();
return 0;
}
NOKPROBE_SYMBOL(entry_handler);
/*
* Return-probe handler: Log the return value and duration. Duration may turn
* out to be zero consistently, depending upon the granularity of time
* accounting on the platform.
*/
static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long retval = regs_return_value(regs);
struct my_data *data = (struct my_data *)ri->data;
s64 delta;
ktime_t now;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
pr_info("%s returned %lu and took %lld ns to execute\n",
func_name, retval, (long long)delta);
return 0;
}
NOKPROBE_SYMBOL(ret_handler);
static struct kretprobe my_kretprobe = {
.handler = ret_handler,
.entry_handler = entry_handler,
.data_size = sizeof(struct my_data),
/* Probe up to 20 instances concurrently. */
.maxactive = 20,
};
static int __init kretprobe_init(void)
{
int ret;
my_kretprobe.kp.symbol_name = func_name;
ret = register_kretprobe(&my_kretprobe);
if (ret < 0) {
pr_err("register_kretprobe failed, returned %d\n", ret);
return ret;
}
pr_info("Planted return probe at %s: %p\n",
my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr);
return 0;
}
static void __exit kretprobe_exit(void)
{
unregister_kretprobe(&my_kretprobe);
pr_info("kretprobe at %p unregistered\n", my_kretprobe.kp.addr);
/* nmissed > 0 suggests that maxactive was set too low. */
pr_info("Missed probing %d instances of %s\n",
my_kretprobe.nmissed, my_kretprobe.kp.symbol_name);
}
module_init(kretprobe_init)
module_exit(kretprobe_exit)
MODULE_LICENSE("GPL");
| linux-master | samples/kprobes/kretprobe_example.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* data_breakpoint.c - Sample HW Breakpoint file to watch kernel data address
*
* usage: insmod data_breakpoint.ko ksym=<ksym_name>
*
* This file is a kernel module that places a breakpoint over ksym_name kernel
* variable using Hardware Breakpoint register. The corresponding handler which
* prints a backtrace is invoked every time a write operation is performed on
* that variable.
*
* Copyright (C) IBM Corporation, 2009
*
* Author: K.Prasad <[email protected]>
*/
#include <linux/module.h> /* Needed by all modules */
#include <linux/kernel.h> /* Needed for KERN_INFO */
#include <linux/init.h> /* Needed for the macros */
#include <linux/kallsyms.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
static struct perf_event * __percpu *sample_hbp;
static char ksym_name[KSYM_NAME_LEN] = "jiffies";
module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
" write operations on the kernel symbol");
static void sample_hbp_handler(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
printk(KERN_INFO "%s value is changed\n", ksym_name);
dump_stack();
printk(KERN_INFO "Dump stack from sample_hbp_handler\n");
}
static int __init hw_break_module_init(void)
{
int ret;
struct perf_event_attr attr;
void *addr = __symbol_get(ksym_name);
if (!addr)
return -ENXIO;
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)addr;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = HW_BREAKPOINT_W;
sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler, NULL);
if (IS_ERR((void __force *)sample_hbp)) {
ret = PTR_ERR((void __force *)sample_hbp);
goto fail;
}
printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name);
return 0;
fail:
printk(KERN_INFO "Breakpoint registration failed\n");
return ret;
}
static void __exit hw_break_module_exit(void)
{
unregister_wide_hw_breakpoint(sample_hbp);
#ifdef CONFIG_MODULE_UNLOAD
__symbol_put(ksym_name);
#endif
printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
}
module_init(hw_break_module_init);
module_exit(hw_break_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("K.Prasad");
MODULE_DESCRIPTION("ksym breakpoint");
| linux-master | samples/hw_breakpoint/data_breakpoint.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/android/binder.h>
#include <linux/android/binderfs.h>
int main(int argc, char *argv[])
{
int fd, ret, saved_errno;
struct binderfs_device device = { 0 };
ret = unshare(CLONE_NEWNS);
if (ret < 0) {
fprintf(stderr, "%s - Failed to unshare mount namespace\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
if (ret < 0) {
fprintf(stderr, "%s - Failed to mount / as private\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = mkdir("/dev/binderfs", 0755);
if (ret < 0 && errno != EEXIST) {
fprintf(stderr, "%s - Failed to create binderfs mountpoint\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
if (ret < 0) {
fprintf(stderr, "%s - Failed to mount binderfs\n",
strerror(errno));
exit(EXIT_FAILURE);
}
memcpy(device.name, "my-binder", strlen("my-binder"));
fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
fprintf(stderr, "%s - Failed to open binder-control device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
if (ret < 0) {
fprintf(stderr, "%s - Failed to allocate new binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
printf("Allocated new binder device with major %d, minor %d, and name %s\n",
device.major, device.minor, device.name);
ret = unlink("/dev/binderfs/my-binder");
if (ret < 0) {
fprintf(stderr, "%s - Failed to delete binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
/* Cleanup happens when the mount namespace dies. */
exit(EXIT_SUCCESS);
}
| linux-master | samples/binderfs/binderfs_example.c |
/*
* Created by: Jason Wessel <[email protected]>
*
* Copyright (c) 2010 Wind River Systems, Inc. All Rights Reserved.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kdb.h>
/*
* All kdb shell command call backs receive argc and argv, where
* argv[0] is the command the end user typed
*/
static int kdb_hello_cmd(int argc, const char **argv)
{
if (argc > 1)
return KDB_ARGCOUNT;
if (argc)
kdb_printf("Hello %s.\n", argv[1]);
else
kdb_printf("Hello world!\n");
return 0;
}
static kdbtab_t hello_cmd = {
.name = "hello",
.func = kdb_hello_cmd,
.usage = "[string]",
.help = "Say Hello World or Hello [string]",
};
static int __init kdb_hello_cmd_init(void)
{
/*
* Registration of a dynamically added kdb command is done with
* kdb_register().
*/
kdb_register(&hello_cmd);
return 0;
}
static void __exit kdb_hello_cmd_exit(void)
{
kdb_unregister(&hello_cmd);
}
module_init(kdb_hello_cmd_init);
module_exit(kdb_hello_cmd_exit);
MODULE_AUTHOR("WindRiver");
MODULE_DESCRIPTION("KDB example to add a hello command");
MODULE_LICENSE("GPL");
| linux-master | samples/kdb/kdb_hello.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
int main(void)
{
int fd = open("/dev/watchdog", O_WRONLY);
int ret = 0;
if (fd == -1) {
perror("watchdog");
exit(EXIT_FAILURE);
}
while (1) {
ret = write(fd, "\0", 1);
if (ret != 1) {
ret = -1;
break;
}
sleep(10);
}
close(fd);
return ret;
}
| linux-master | samples/watchdog/watchdog-simple.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This is a V4L2 PCI Skeleton Driver. It gives an initial skeleton source
* for use with other PCI drivers.
*
* This skeleton PCI driver assumes that the card has an S-Video connector as
* input 0 and an HDMI connector as input 1.
*
* Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
MODULE_DESCRIPTION("V4L2 PCI Skeleton Driver");
MODULE_AUTHOR("Hans Verkuil");
MODULE_LICENSE("GPL v2");
/**
* struct skeleton - All internal data for one instance of device
* @pdev: PCI device
* @v4l2_dev: top-level v4l2 device struct
* @vdev: video node structure
* @ctrl_handler: control handler structure
* @lock: ioctl serialization mutex
* @std: current SDTV standard
* @timings: current HDTV timings
* @format: current pix format
* @input: current video input (0 = SDTV, 1 = HDTV)
* @queue: vb2 video capture queue
* @qlock: spinlock controlling access to buf_list and sequence
* @buf_list: list of buffers queued for DMA
* @field: the field (TOP/BOTTOM/other) of the current buffer
* @sequence: frame sequence counter
*/
struct skeleton {
struct pci_dev *pdev;
struct v4l2_device v4l2_dev;
struct video_device vdev;
struct v4l2_ctrl_handler ctrl_handler;
struct mutex lock;
v4l2_std_id std;
struct v4l2_dv_timings timings;
struct v4l2_pix_format format;
unsigned input;
struct vb2_queue queue;
spinlock_t qlock;
struct list_head buf_list;
unsigned field;
unsigned sequence;
};
struct skel_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
};
static inline struct skel_buffer *to_skel_buffer(struct vb2_v4l2_buffer *vbuf)
{
return container_of(vbuf, struct skel_buffer, vb);
}
static const struct pci_device_id skeleton_pci_tbl[] = {
/* { PCI_DEVICE(PCI_VENDOR_ID_, PCI_DEVICE_ID_) }, */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skeleton_pci_tbl);
/*
* HDTV: this structure has the capabilities of the HDTV receiver.
* It is used to constrain the huge list of possible formats based
* upon the hardware capabilities.
*/
static const struct v4l2_dv_timings_cap skel_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
V4L2_INIT_BT_TIMINGS(
720, 1920, /* min/max width */
480, 1080, /* min/max height */
27000000, 74250000, /* min/max pixelclock*/
V4L2_DV_BT_STD_CEA861, /* Supported standards */
/* capabilities */
V4L2_DV_BT_CAP_INTERLACED | V4L2_DV_BT_CAP_PROGRESSIVE
)
};
/*
* Supported SDTV standards. This does the same job as skel_timings_cap, but
* for standard TV formats.
*/
#define SKEL_TVNORMS V4L2_STD_ALL
/*
* Interrupt handler: typically interrupts happen after a new frame has been
* captured. It is the job of the handler to remove the new frame from the
* internal list and give it back to the vb2 framework, updating the sequence
* counter, field and timestamp at the same time.
*/
static irqreturn_t skeleton_irq(int irq, void *dev_id)
{
#ifdef TODO
struct skeleton *skel = dev_id;
/* handle interrupt */
/* Once a new frame has been captured, mark it as done like this: */
if (captured_new_frame) {
...
spin_lock(&skel->qlock);
list_del(&new_buf->list);
spin_unlock(&skel->qlock);
new_buf->vb.vb2_buf.timestamp = ktime_get_ns();
new_buf->vb.sequence = skel->sequence++;
new_buf->vb.field = skel->field;
if (skel->format.field == V4L2_FIELD_ALTERNATE) {
if (skel->field == V4L2_FIELD_BOTTOM)
skel->field = V4L2_FIELD_TOP;
else if (skel->field == V4L2_FIELD_TOP)
skel->field = V4L2_FIELD_BOTTOM;
}
vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
#endif
return IRQ_HANDLED;
}
/*
* Setup the constraints of the queue: besides setting the number of planes
* per buffer and the size and allocation context of each plane, it also
* checks if sufficient buffers have been allocated. Usually 3 is a good
* minimum number: many DMA engines need a minimum of 2 buffers in the
* queue and you need to have another available for userspace processing.
*/
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct skeleton *skel = vb2_get_drv_priv(vq);
skel->field = skel->format.field;
if (skel->field == V4L2_FIELD_ALTERNATE) {
/*
* You cannot use read() with FIELD_ALTERNATE since the field
* information (TOP/BOTTOM) cannot be passed back to the user.
*/
if (vb2_fileio_is_active(vq))
return -EINVAL;
skel->field = V4L2_FIELD_TOP;
}
if (vq->num_buffers + *nbuffers < 3)
*nbuffers = 3 - vq->num_buffers;
if (*nplanes)
return sizes[0] < skel->format.sizeimage ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = skel->format.sizeimage;
return 0;
}
/*
* Prepare the buffer for queueing to the DMA engine: check and set the
* payload size.
*/
static int buffer_prepare(struct vb2_buffer *vb)
{
struct skeleton *skel = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size = skel->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(&skel->pdev->dev, "buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
return 0;
}
/*
* Queue this buffer to the DMA engine.
*/
static void buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct skeleton *skel = vb2_get_drv_priv(vb->vb2_queue);
struct skel_buffer *buf = to_skel_buffer(vbuf);
unsigned long flags;
spin_lock_irqsave(&skel->qlock, flags);
list_add_tail(&buf->list, &skel->buf_list);
/* TODO: Update any DMA pointers if necessary */
spin_unlock_irqrestore(&skel->qlock, flags);
}
static void return_all_buffers(struct skeleton *skel,
enum vb2_buffer_state state)
{
struct skel_buffer *buf, *node;
unsigned long flags;
spin_lock_irqsave(&skel->qlock, flags);
list_for_each_entry_safe(buf, node, &skel->buf_list, list) {
vb2_buffer_done(&buf->vb.vb2_buf, state);
list_del(&buf->list);
}
spin_unlock_irqrestore(&skel->qlock, flags);
}
/*
* Start streaming. First check if the minimum number of buffers have been
* queued. If not, then return -ENOBUFS and the vb2 framework will call
* this function again the next time a buffer has been queued until enough
* buffers are available to actually start the DMA engine.
*/
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct skeleton *skel = vb2_get_drv_priv(vq);
int ret = 0;
skel->sequence = 0;
/* TODO: start DMA */
if (ret) {
/*
* In case of an error, return all active buffers to the
* QUEUED state
*/
return_all_buffers(skel, VB2_BUF_STATE_QUEUED);
}
return ret;
}
/*
* Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued
* and passed on to the vb2 framework marked as STATE_ERROR.
*/
static void stop_streaming(struct vb2_queue *vq)
{
struct skeleton *skel = vb2_get_drv_priv(vq);
/* TODO: stop DMA */
/* Release all active buffers */
return_all_buffers(skel, VB2_BUF_STATE_ERROR);
}
/*
* The vb2 queue ops. Note that since q->lock is set we can use the standard
* vb2_ops_wait_prepare/finish helper functions. If q->lock would be NULL,
* then this driver would have to provide these ops.
*/
static const struct vb2_ops skel_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/*
* Required ioctl querycap. Note that the version field is prefilled with
* the version of the kernel.
*/
static int skeleton_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct skeleton *skel = video_drvdata(file);
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, "V4L2 PCI Skeleton", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(skel->pdev));
return 0;
}
/*
* Helper function to check and correct struct v4l2_pix_format. It's used
* not only in VIDIOC_TRY/S_FMT, but also elsewhere if changes to the SDTV
* standard, HDTV timings or the video input would require updating the
* current format.
*/
static void skeleton_fill_pix_format(struct skeleton *skel,
struct v4l2_pix_format *pix)
{
pix->pixelformat = V4L2_PIX_FMT_YUYV;
if (skel->input == 0) {
/* S-Video input */
pix->width = 720;
pix->height = (skel->std & V4L2_STD_525_60) ? 480 : 576;
pix->field = V4L2_FIELD_INTERLACED;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
} else {
/* HDMI input */
pix->width = skel->timings.bt.width;
pix->height = skel->timings.bt.height;
if (skel->timings.bt.interlaced) {
pix->field = V4L2_FIELD_ALTERNATE;
pix->height /= 2;
} else {
pix->field = V4L2_FIELD_NONE;
}
pix->colorspace = V4L2_COLORSPACE_REC709;
}
/*
* The YUYV format is four bytes for every two pixels, so bytesperline
* is width * 2.
*/
pix->bytesperline = pix->width * 2;
pix->sizeimage = pix->bytesperline * pix->height;
pix->priv = 0;
}
static int skeleton_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct skeleton *skel = video_drvdata(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
/*
* Due to historical reasons providing try_fmt with an unsupported
* pixelformat will return -EINVAL for video receivers. Webcam drivers,
* however, will silently correct the pixelformat. Some video capture
* applications rely on this behavior...
*/
if (pix->pixelformat != V4L2_PIX_FMT_YUYV)
return -EINVAL;
skeleton_fill_pix_format(skel, pix);
return 0;
}
static int skeleton_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct skeleton *skel = video_drvdata(file);
int ret;
ret = skeleton_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
/*
* It is not allowed to change the format while buffers for use with
* streaming have already been allocated.
*/
if (vb2_is_busy(&skel->queue))
return -EBUSY;
/* TODO: change format */
skel->format = f->fmt.pix;
return 0;
}
static int skeleton_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct skeleton *skel = video_drvdata(file);
f->fmt.pix = skel->format;
return 0;
}
static int skeleton_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_YUYV;
return 0;
}
static int skeleton_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct skeleton *skel = video_drvdata(file);
/* S_STD is not supported on the HDMI input */
if (skel->input)
return -ENODATA;
/*
* No change, so just return. Some applications call S_STD again after
* the buffers for streaming have been set up, so we have to allow for
* this behavior.
*/
if (std == skel->std)
return 0;
/*
* Changing the standard implies a format change, which is not allowed
* while buffers for use with streaming have already been allocated.
*/
if (vb2_is_busy(&skel->queue))
return -EBUSY;
/* TODO: handle changing std */
skel->std = std;
/* Update the internal format */
skeleton_fill_pix_format(skel, &skel->format);
return 0;
}
static int skeleton_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct skeleton *skel = video_drvdata(file);
/* G_STD is not supported on the HDMI input */
if (skel->input)
return -ENODATA;
*std = skel->std;
return 0;
}
/*
* Query the current standard as seen by the hardware. This function shall
* never actually change the standard, it just detects and reports.
* The framework will initially set *std to tvnorms (i.e. the set of
* supported standards by this input), and this function should just AND
* this value. If there is no signal, then *std should be set to 0.
*/
static int skeleton_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct skeleton *skel = video_drvdata(file);
/* QUERY_STD is not supported on the HDMI input */
if (skel->input)
return -ENODATA;
#ifdef TODO
/*
* Query currently seen standard. Initial value of *std is
* V4L2_STD_ALL. This function should look something like this:
*/
get_signal_info();
if (no_signal) {
*std = 0;
return 0;
}
/* Use signal information to reduce the number of possible standards */
if (signal_has_525_lines)
*std &= V4L2_STD_525_60;
else
*std &= V4L2_STD_625_50;
#endif
return 0;
}
static int skeleton_s_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
struct skeleton *skel = video_drvdata(file);
/* S_DV_TIMINGS is not supported on the S-Video input */
if (skel->input == 0)
return -ENODATA;
/* Quick sanity check */
if (!v4l2_valid_dv_timings(timings, &skel_timings_cap, NULL, NULL))
return -EINVAL;
/* Check if the timings are part of the CEA-861 timings. */
if (!v4l2_find_dv_timings_cap(timings, &skel_timings_cap,
0, NULL, NULL))
return -EINVAL;
/* Return 0 if the new timings are the same as the current timings. */
if (v4l2_match_dv_timings(timings, &skel->timings, 0, false))
return 0;
/*
* Changing the timings implies a format change, which is not allowed
* while buffers for use with streaming have already been allocated.
*/
if (vb2_is_busy(&skel->queue))
return -EBUSY;
/* TODO: Configure new timings */
/* Save timings */
skel->timings = *timings;
/* Update the internal format */
skeleton_fill_pix_format(skel, &skel->format);
return 0;
}
static int skeleton_g_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
struct skeleton *skel = video_drvdata(file);
/* G_DV_TIMINGS is not supported on the S-Video input */
if (skel->input == 0)
return -ENODATA;
*timings = skel->timings;
return 0;
}
static int skeleton_enum_dv_timings(struct file *file, void *_fh,
struct v4l2_enum_dv_timings *timings)
{
struct skeleton *skel = video_drvdata(file);
/* ENUM_DV_TIMINGS is not supported on the S-Video input */
if (skel->input == 0)
return -ENODATA;
return v4l2_enum_dv_timings_cap(timings, &skel_timings_cap,
NULL, NULL);
}
/*
* Query the current timings as seen by the hardware. This function shall
* never actually change the timings, it just detects and reports.
* If no signal is detected, then return -ENOLINK. If the hardware cannot
* lock to the signal, then return -ENOLCK. If the signal is out of range
* of the capabilities of the system (e.g., it is possible that the receiver
* can lock but that the DMA engine it is connected to cannot handle
* pixelclocks above a certain frequency), then -ERANGE is returned.
*/
static int skeleton_query_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
struct skeleton *skel = video_drvdata(file);
/* QUERY_DV_TIMINGS is not supported on the S-Video input */
if (skel->input == 0)
return -ENODATA;
#ifdef TODO
/*
* Query currently seen timings. This function should look
* something like this:
*/
detect_timings();
if (no_signal)
return -ENOLINK;
if (cannot_lock_to_signal)
return -ENOLCK;
if (signal_out_of_range_of_capabilities)
return -ERANGE;
/* Useful for debugging */
v4l2_print_dv_timings(skel->v4l2_dev.name, "query_dv_timings:",
timings, true);
#endif
return 0;
}
static int skeleton_dv_timings_cap(struct file *file, void *fh,
struct v4l2_dv_timings_cap *cap)
{
struct skeleton *skel = video_drvdata(file);
/* DV_TIMINGS_CAP is not supported on the S-Video input */
if (skel->input == 0)
return -ENODATA;
*cap = skel_timings_cap;
return 0;
}
static int skeleton_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
if (i->index > 1)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
if (i->index == 0) {
i->std = SKEL_TVNORMS;
strlcpy(i->name, "S-Video", sizeof(i->name));
i->capabilities = V4L2_IN_CAP_STD;
} else {
i->std = 0;
strlcpy(i->name, "HDMI", sizeof(i->name));
i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
}
return 0;
}
static int skeleton_s_input(struct file *file, void *priv, unsigned int i)
{
struct skeleton *skel = video_drvdata(file);
if (i > 1)
return -EINVAL;
/*
* Changing the input implies a format change, which is not allowed
* while buffers for use with streaming have already been allocated.
*/
if (vb2_is_busy(&skel->queue))
return -EBUSY;
skel->input = i;
/*
* Update tvnorms. The tvnorms value is used by the core to implement
* VIDIOC_ENUMSTD so it has to be correct. If tvnorms == 0, then
* ENUMSTD will return -ENODATA.
*/
skel->vdev.tvnorms = i ? 0 : SKEL_TVNORMS;
/* Update the internal format */
skeleton_fill_pix_format(skel, &skel->format);
return 0;
}
static int skeleton_g_input(struct file *file, void *priv, unsigned int *i)
{
struct skeleton *skel = video_drvdata(file);
*i = skel->input;
return 0;
}
/* The control handler. */
static int skeleton_s_ctrl(struct v4l2_ctrl *ctrl)
{
/*struct skeleton *skel =
container_of(ctrl->handler, struct skeleton, ctrl_handler);*/
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
/* TODO: set brightness to ctrl->val */
break;
case V4L2_CID_CONTRAST:
/* TODO: set contrast to ctrl->val */
break;
case V4L2_CID_SATURATION:
/* TODO: set saturation to ctrl->val */
break;
case V4L2_CID_HUE:
/* TODO: set hue to ctrl->val */
break;
default:
return -EINVAL;
}
return 0;
}
/* ------------------------------------------------------------------
File operations for the device
------------------------------------------------------------------*/
static const struct v4l2_ctrl_ops skel_ctrl_ops = {
.s_ctrl = skeleton_s_ctrl,
};
/*
* The set of all supported ioctls. Note that all the streaming ioctls
* use the vb2 helper functions that take care of all the locking and
* that also do ownership tracking (i.e. only the filehandle that requested
* the buffers can call the streaming ioctls, all other filehandles will
* receive -EBUSY if they attempt to call the same streaming ioctls).
*
* The last three ioctls also use standard helper functions: these implement
* standard behavior for drivers with controls.
*/
static const struct v4l2_ioctl_ops skel_ioctl_ops = {
.vidioc_querycap = skeleton_querycap,
.vidioc_try_fmt_vid_cap = skeleton_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = skeleton_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = skeleton_g_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap = skeleton_enum_fmt_vid_cap,
.vidioc_g_std = skeleton_g_std,
.vidioc_s_std = skeleton_s_std,
.vidioc_querystd = skeleton_querystd,
.vidioc_s_dv_timings = skeleton_s_dv_timings,
.vidioc_g_dv_timings = skeleton_g_dv_timings,
.vidioc_enum_dv_timings = skeleton_enum_dv_timings,
.vidioc_query_dv_timings = skeleton_query_dv_timings,
.vidioc_dv_timings_cap = skeleton_dv_timings_cap,
.vidioc_enum_input = skeleton_enum_input,
.vidioc_g_input = skeleton_g_input,
.vidioc_s_input = skeleton_s_input,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
* The set of file operations. Note that all these ops are standard core
* helper functions.
*/
static const struct v4l2_file_operations skel_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.unlocked_ioctl = video_ioctl2,
.read = vb2_fop_read,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll,
};
/*
* The initial setup of this device instance. Note that the initial state of
* the driver should be complete. So the initial format, standard, timings
* and video input should all be initialized to some reasonable value.
*/
static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
/* The initial timings are chosen to be 720p60. */
static const struct v4l2_dv_timings timings_def =
V4L2_DV_BT_CEA_1280X720P60;
struct skeleton *skel;
struct video_device *vdev;
struct v4l2_ctrl_handler *hdl;
struct vb2_queue *q;
int ret;
/* Enable PCI */
ret = pci_enable_device(pdev);
if (ret)
return ret;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "no suitable DMA available.\n");
goto disable_pci;
}
/* Allocate a new instance */
skel = devm_kzalloc(&pdev->dev, sizeof(struct skeleton), GFP_KERNEL);
if (!skel) {
ret = -ENOMEM;
goto disable_pci;
}
/* Allocate the interrupt */
ret = devm_request_irq(&pdev->dev, pdev->irq,
skeleton_irq, 0, KBUILD_MODNAME, skel);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto disable_pci;
}
skel->pdev = pdev;
/* Fill in the initial format-related settings */
skel->timings = timings_def;
skel->std = V4L2_STD_625_50;
skeleton_fill_pix_format(skel, &skel->format);
/* Initialize the top-level structure */
ret = v4l2_device_register(&pdev->dev, &skel->v4l2_dev);
if (ret)
goto disable_pci;
mutex_init(&skel->lock);
/* Add the controls */
hdl = &skel->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1, 16);
v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
V4L2_CID_SATURATION, 0, 255, 1, 127);
v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
if (hdl->error) {
ret = hdl->error;
goto free_hdl;
}
skel->v4l2_dev.ctrl_handler = hdl;
/* Initialize the vb2 queue */
q = &skel->queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
q->dev = &pdev->dev;
q->drv_priv = skel;
q->buf_struct_size = sizeof(struct skel_buffer);
q->ops = &skel_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
/*
* Assume that this DMA engine needs to have at least two buffers
* available before it can be started. The start_streaming() op
* won't be called until at least this many buffers are queued up.
*/
q->min_buffers_needed = 2;
/*
* The serialization lock for the streaming ioctls. This is the same
* as the main serialization lock, but if some of the non-streaming
* ioctls could take a long time to execute, then you might want to
* have a different lock here to prevent VIDIOC_DQBUF from being
* blocked while waiting for another action to finish. This is
* generally not needed for PCI devices, but USB devices usually do
* want a separate lock here.
*/
q->lock = &skel->lock;
/*
* Since this driver can only do 32-bit DMA we must make sure that
* the vb2 core will allocate the buffers in 32-bit DMA memory.
*/
q->gfp_flags = GFP_DMA32;
ret = vb2_queue_init(q);
if (ret)
goto free_hdl;
INIT_LIST_HEAD(&skel->buf_list);
spin_lock_init(&skel->qlock);
/* Initialize the video_device structure */
vdev = &skel->vdev;
strlcpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
/*
* There is nothing to clean up, so release is set to an empty release
* function. The release callback must be non-NULL.
*/
vdev->release = video_device_release_empty;
vdev->fops = &skel_fops,
vdev->ioctl_ops = &skel_ioctl_ops,
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
/*
* The main serialization lock. All ioctls are serialized by this
* lock. Exception: if q->lock is set, then the streaming ioctls
* are serialized by that separate lock.
*/
vdev->lock = &skel->lock;
vdev->queue = q;
vdev->v4l2_dev = &skel->v4l2_dev;
/* Supported SDTV standards, if any */
vdev->tvnorms = SKEL_TVNORMS;
video_set_drvdata(vdev, skel);
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto free_hdl;
dev_info(&pdev->dev, "V4L2 PCI Skeleton Driver loaded\n");
return 0;
free_hdl:
v4l2_ctrl_handler_free(&skel->ctrl_handler);
v4l2_device_unregister(&skel->v4l2_dev);
disable_pci:
pci_disable_device(pdev);
return ret;
}
static void skeleton_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct skeleton *skel = container_of(v4l2_dev, struct skeleton, v4l2_dev);
video_unregister_device(&skel->vdev);
v4l2_ctrl_handler_free(&skel->ctrl_handler);
v4l2_device_unregister(&skel->v4l2_dev);
pci_disable_device(skel->pdev);
}
static struct pci_driver skeleton_driver = {
.name = KBUILD_MODNAME,
.probe = skeleton_probe,
.remove = skeleton_remove,
.id_table = skeleton_pci_tbl,
};
module_pci_driver(skeleton_driver);
| linux-master | samples/v4l/v4l2-pci-skeleton.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sample in-kernel QMI client driver
*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/qrtr.h>
#include <linux/net.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/string.h>
#include <net/sock.h>
#include <linux/soc/qcom/qmi.h>
#define PING_REQ1_TLV_TYPE 0x1
#define PING_RESP1_TLV_TYPE 0x2
#define PING_OPT1_TLV_TYPE 0x10
#define PING_OPT2_TLV_TYPE 0x11
#define DATA_REQ1_TLV_TYPE 0x1
#define DATA_RESP1_TLV_TYPE 0x2
#define DATA_OPT1_TLV_TYPE 0x10
#define DATA_OPT2_TLV_TYPE 0x11
#define TEST_MED_DATA_SIZE_V01 8192
#define TEST_MAX_NAME_SIZE_V01 255
#define TEST_PING_REQ_MSG_ID_V01 0x20
#define TEST_DATA_REQ_MSG_ID_V01 0x21
#define TEST_PING_REQ_MAX_MSG_LEN_V01 266
#define TEST_DATA_REQ_MAX_MSG_LEN_V01 8456
struct test_name_type_v01 {
u32 name_len;
char name[TEST_MAX_NAME_SIZE_V01];
};
static const struct qmi_elem_info test_name_type_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
.offset = offsetof(struct test_name_type_v01,
name_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = TEST_MAX_NAME_SIZE_V01,
.elem_size = sizeof(char),
.array_type = VAR_LEN_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
.offset = offsetof(struct test_name_type_v01,
name),
},
{}
};
struct test_ping_req_msg_v01 {
char ping[4];
u8 client_name_valid;
struct test_name_type_v01 client_name;
};
static const struct qmi_elem_info test_ping_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 4,
.elem_size = sizeof(char),
.array_type = STATIC_ARRAY,
.tlv_type = PING_REQ1_TLV_TYPE,
.offset = offsetof(struct test_ping_req_msg_v01,
ping),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = PING_OPT1_TLV_TYPE,
.offset = offsetof(struct test_ping_req_msg_v01,
client_name_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct test_name_type_v01),
.array_type = NO_ARRAY,
.tlv_type = PING_OPT1_TLV_TYPE,
.offset = offsetof(struct test_ping_req_msg_v01,
client_name),
.ei_array = test_name_type_v01_ei,
},
{}
};
struct test_ping_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 pong_valid;
char pong[4];
u8 service_name_valid;
struct test_name_type_v01 service_name;
};
static const struct qmi_elem_info test_ping_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = PING_RESP1_TLV_TYPE,
.offset = offsetof(struct test_ping_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = PING_OPT1_TLV_TYPE,
.offset = offsetof(struct test_ping_resp_msg_v01,
pong_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 4,
.elem_size = sizeof(char),
.array_type = STATIC_ARRAY,
.tlv_type = PING_OPT1_TLV_TYPE,
.offset = offsetof(struct test_ping_resp_msg_v01,
pong),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = PING_OPT2_TLV_TYPE,
.offset = offsetof(struct test_ping_resp_msg_v01,
service_name_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct test_name_type_v01),
.array_type = NO_ARRAY,
.tlv_type = PING_OPT2_TLV_TYPE,
.offset = offsetof(struct test_ping_resp_msg_v01,
service_name),
.ei_array = test_name_type_v01_ei,
},
{}
};
struct test_data_req_msg_v01 {
u32 data_len;
u8 data[TEST_MED_DATA_SIZE_V01];
u8 client_name_valid;
struct test_name_type_v01 client_name;
};
static const struct qmi_elem_info test_data_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = DATA_REQ1_TLV_TYPE,
.offset = offsetof(struct test_data_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = TEST_MED_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = DATA_REQ1_TLV_TYPE,
.offset = offsetof(struct test_data_req_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT1_TLV_TYPE,
.offset = offsetof(struct test_data_req_msg_v01,
client_name_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct test_name_type_v01),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT1_TLV_TYPE,
.offset = offsetof(struct test_data_req_msg_v01,
client_name),
.ei_array = test_name_type_v01_ei,
},
{}
};
struct test_data_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 data_valid;
u32 data_len;
u8 data[TEST_MED_DATA_SIZE_V01];
u8 service_name_valid;
struct test_name_type_v01 service_name;
};
static const struct qmi_elem_info test_data_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = DATA_RESP1_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT1_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT1_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = TEST_MED_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = DATA_OPT1_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT2_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
service_name_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct test_name_type_v01),
.array_type = NO_ARRAY,
.tlv_type = DATA_OPT2_TLV_TYPE,
.offset = offsetof(struct test_data_resp_msg_v01,
service_name),
.ei_array = test_name_type_v01_ei,
},
{}
};
/*
* ping_write() - ping_pong debugfs file write handler
* @file: debugfs file context
* @user_buf: reference to the user data (ignored)
* @count: number of bytes in @user_buf
* @ppos: offset in @file to write
*
* This function allows user space to send out a ping_pong QMI encoded message
* to the associated remote test service and will return with the result of the
* transaction. It serves as an example of how to provide a custom response
* handler.
*
* Return: @count, or negative errno on failure.
*/
static ssize_t ping_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct qmi_handle *qmi = file->private_data;
struct test_ping_req_msg_v01 req = {};
struct qmi_txn txn;
int ret;
memcpy(req.ping, "ping", sizeof(req.ping));
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0)
return ret;
ret = qmi_send_request(qmi, NULL, &txn,
TEST_PING_REQ_MSG_ID_V01,
TEST_PING_REQ_MAX_MSG_LEN_V01,
test_ping_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
return ret;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0)
count = ret;
return count;
}
static const struct file_operations ping_fops = {
.open = simple_open,
.write = ping_write,
};
static void ping_pong_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, const void *data)
{
const struct test_ping_resp_msg_v01 *resp = data;
if (!txn) {
pr_err("spurious ping response\n");
return;
}
if (resp->resp.result == QMI_RESULT_FAILURE_V01)
txn->result = -ENXIO;
else if (!resp->pong_valid || memcmp(resp->pong, "pong", 4))
txn->result = -EINVAL;
complete(&txn->completion);
}
/*
* data_write() - data debugfs file write handler
* @file: debugfs file context
* @user_buf: reference to the user data
* @count: number of bytes in @user_buf
* @ppos: offset in @file to write
*
* This function allows user space to send out a data QMI encoded message to
* the associated remote test service and will return with the result of the
* transaction. It serves as an example of how to have the QMI helpers decode a
* transaction response into a provided object automatically.
*
* Return: @count, or negative errno on failure.
*/
static ssize_t data_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct qmi_handle *qmi = file->private_data;
struct test_data_resp_msg_v01 *resp;
struct test_data_req_msg_v01 *req;
struct qmi_txn txn;
int ret;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp) {
kfree(req);
return -ENOMEM;
}
req->data_len = min_t(size_t, sizeof(req->data), count);
if (copy_from_user(req->data, user_buf, req->data_len)) {
ret = -EFAULT;
goto out;
}
ret = qmi_txn_init(qmi, &txn, test_data_resp_msg_v01_ei, resp);
if (ret < 0)
goto out;
ret = qmi_send_request(qmi, NULL, &txn,
TEST_DATA_REQ_MSG_ID_V01,
TEST_DATA_REQ_MAX_MSG_LEN_V01,
test_data_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
goto out;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0) {
goto out;
} else if (!resp->data_valid ||
resp->data_len != req->data_len ||
memcmp(resp->data, req->data, req->data_len)) {
pr_err("response data doesn't match expectation\n");
ret = -EINVAL;
goto out;
}
ret = count;
out:
kfree(resp);
kfree(req);
return ret;
}
static const struct file_operations data_fops = {
.open = simple_open,
.write = data_write,
};
static const struct qmi_msg_handler qmi_sample_handlers[] = {
{
.type = QMI_RESPONSE,
.msg_id = TEST_PING_REQ_MSG_ID_V01,
.ei = test_ping_resp_msg_v01_ei,
.decoded_size = sizeof(struct test_ping_req_msg_v01),
.fn = ping_pong_cb
},
{}
};
struct qmi_sample {
struct qmi_handle qmi;
struct dentry *de_dir;
struct dentry *de_data;
struct dentry *de_ping;
};
static struct dentry *qmi_debug_dir;
static int qmi_sample_probe(struct platform_device *pdev)
{
struct sockaddr_qrtr *sq;
struct qmi_sample *sample;
char path[20];
int ret;
sample = devm_kzalloc(&pdev->dev, sizeof(*sample), GFP_KERNEL);
if (!sample)
return -ENOMEM;
ret = qmi_handle_init(&sample->qmi, TEST_DATA_REQ_MAX_MSG_LEN_V01,
NULL,
qmi_sample_handlers);
if (ret < 0)
return ret;
sq = dev_get_platdata(&pdev->dev);
ret = kernel_connect(sample->qmi.sock, (struct sockaddr *)sq,
sizeof(*sq), 0);
if (ret < 0) {
pr_err("failed to connect to remote service port\n");
goto err_release_qmi_handle;
}
snprintf(path, sizeof(path), "%d:%d", sq->sq_node, sq->sq_port);
sample->de_dir = debugfs_create_dir(path, qmi_debug_dir);
if (IS_ERR(sample->de_dir)) {
ret = PTR_ERR(sample->de_dir);
goto err_release_qmi_handle;
}
sample->de_data = debugfs_create_file("data", 0600, sample->de_dir,
sample, &data_fops);
if (IS_ERR(sample->de_data)) {
ret = PTR_ERR(sample->de_data);
goto err_remove_de_dir;
}
sample->de_ping = debugfs_create_file("ping", 0600, sample->de_dir,
sample, &ping_fops);
if (IS_ERR(sample->de_ping)) {
ret = PTR_ERR(sample->de_ping);
goto err_remove_de_data;
}
platform_set_drvdata(pdev, sample);
return 0;
err_remove_de_data:
debugfs_remove(sample->de_data);
err_remove_de_dir:
debugfs_remove(sample->de_dir);
err_release_qmi_handle:
qmi_handle_release(&sample->qmi);
return ret;
}
static int qmi_sample_remove(struct platform_device *pdev)
{
struct qmi_sample *sample = platform_get_drvdata(pdev);
debugfs_remove(sample->de_ping);
debugfs_remove(sample->de_data);
debugfs_remove(sample->de_dir);
qmi_handle_release(&sample->qmi);
return 0;
}
static struct platform_driver qmi_sample_driver = {
.probe = qmi_sample_probe,
.remove = qmi_sample_remove,
.driver = {
.name = "qmi_sample_client",
},
};
static int qmi_sample_new_server(struct qmi_handle *qmi,
struct qmi_service *service)
{
struct platform_device *pdev;
struct sockaddr_qrtr sq = { AF_QIPCRTR, service->node, service->port };
int ret;
pdev = platform_device_alloc("qmi_sample_client", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
ret = platform_device_add_data(pdev, &sq, sizeof(sq));
if (ret)
goto err_put_device;
ret = platform_device_add(pdev);
if (ret)
goto err_put_device;
service->priv = pdev;
return 0;
err_put_device:
platform_device_put(pdev);
return ret;
}
static void qmi_sample_del_server(struct qmi_handle *qmi,
struct qmi_service *service)
{
struct platform_device *pdev = service->priv;
platform_device_unregister(pdev);
}
static struct qmi_handle lookup_client;
static const struct qmi_ops lookup_ops = {
.new_server = qmi_sample_new_server,
.del_server = qmi_sample_del_server,
};
static int qmi_sample_init(void)
{
int ret;
qmi_debug_dir = debugfs_create_dir("qmi_sample", NULL);
if (IS_ERR(qmi_debug_dir)) {
pr_err("failed to create qmi_sample dir\n");
return PTR_ERR(qmi_debug_dir);
}
ret = platform_driver_register(&qmi_sample_driver);
if (ret)
goto err_remove_debug_dir;
ret = qmi_handle_init(&lookup_client, 0, &lookup_ops, NULL);
if (ret < 0)
goto err_unregister_driver;
qmi_add_lookup(&lookup_client, 15, 0, 0);
return 0;
err_unregister_driver:
platform_driver_unregister(&qmi_sample_driver);
err_remove_debug_dir:
debugfs_remove(qmi_debug_dir);
return ret;
}
static void qmi_sample_exit(void)
{
qmi_handle_release(&lookup_client);
platform_driver_unregister(&qmi_sample_driver);
debugfs_remove(qmi_debug_dir);
}
module_init(qmi_sample_init);
module_exit(qmi_sample_exit);
MODULE_DESCRIPTION("Sample QMI client driver");
MODULE_LICENSE("GPL v2");
| linux-master | samples/qmi/qmi_sample_client.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Seccomp BPF helper functions
*
* Copyright (c) 2012 The Chromium OS Authors <[email protected]>
* Author: Will Drewry <[email protected]>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using prctl(PR_ATTACH_SECCOMP_FILTER).
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bpf-helper.h"
int bpf_resolve_jumps(struct bpf_labels *labels,
struct sock_filter *filter, size_t count)
{
size_t i;
if (count < 1 || count > BPF_MAXINSNS)
return -1;
/*
* Walk it once, backwards, to build the label table and do fixups.
* Since backward jumps are disallowed by BPF, this is easy.
*/
for (i = 0; i < count; ++i) {
size_t offset = count - i - 1;
struct sock_filter *instr = &filter[offset];
if (instr->code != (BPF_JMP+BPF_JA))
continue;
switch ((instr->jt<<8)|instr->jf) {
case (JUMP_JT<<8)|JUMP_JF:
if (labels->labels[instr->k].location == 0xffffffff) {
fprintf(stderr, "Unresolved label: '%s'\n",
labels->labels[instr->k].label);
return 1;
}
instr->k = labels->labels[instr->k].location -
(offset + 1);
instr->jt = 0;
instr->jf = 0;
continue;
case (LABEL_JT<<8)|LABEL_JF:
if (labels->labels[instr->k].location != 0xffffffff) {
fprintf(stderr, "Duplicate label use: '%s'\n",
labels->labels[instr->k].label);
return 1;
}
labels->labels[instr->k].location = offset;
instr->k = 0; /* fall through */
instr->jt = 0;
instr->jf = 0;
continue;
}
}
return 0;
}
/* Simple lookup table for labels. */
__u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label)
{
struct __bpf_label *begin = labels->labels, *end;
int id;
if (labels->count == BPF_LABELS_MAX) {
fprintf(stderr, "Too many labels\n");
exit(1);
}
if (labels->count == 0) {
begin->label = label;
begin->location = 0xffffffff;
labels->count++;
return 0;
}
end = begin + labels->count;
for (id = 0; begin < end; ++begin, ++id) {
if (!strcmp(label, begin->label))
return id;
}
begin->label = label;
begin->location = 0xffffffff;
labels->count++;
return id;
}
void seccomp_bpf_print(struct sock_filter *filter, size_t count)
{
struct sock_filter *end = filter + count;
for ( ; filter < end; ++filter)
printf("{ code=%u,jt=%u,jf=%u,k=%u },\n",
filter->code, filter->jt, filter->jf, filter->k);
}
| linux-master | samples/seccomp/bpf-helper.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Seccomp filter example for x86 (32-bit and 64-bit) with BPF macros
*
* Copyright (c) 2012 The Chromium OS Authors <[email protected]>
* Author: Will Drewry <[email protected]>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using prctl(PR_SET_SECCOMP, 2, ...).
*/
#if defined(__i386__) || defined(__x86_64__)
#define SUPPORTED_ARCH 1
#endif
#if defined(SUPPORTED_ARCH)
#define __USE_GNU 1
#define _GNU_SOURCE 1
#include <linux/types.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <linux/unistd.h>
#include <signal.h>
#include <stdio.h>
#include <stddef.h>
#include <string.h>
#include <sys/prctl.h>
#include <unistd.h>
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
#define syscall_nr (offsetof(struct seccomp_data, nr))
#if defined(__i386__)
#define REG_RESULT REG_EAX
#define REG_SYSCALL REG_EAX
#define REG_ARG0 REG_EBX
#define REG_ARG1 REG_ECX
#define REG_ARG2 REG_EDX
#define REG_ARG3 REG_ESI
#define REG_ARG4 REG_EDI
#define REG_ARG5 REG_EBP
#elif defined(__x86_64__)
#define REG_RESULT REG_RAX
#define REG_SYSCALL REG_RAX
#define REG_ARG0 REG_RDI
#define REG_ARG1 REG_RSI
#define REG_ARG2 REG_RDX
#define REG_ARG3 REG_R10
#define REG_ARG4 REG_R8
#define REG_ARG5 REG_R9
#endif
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#endif
#ifndef SYS_SECCOMP
#define SYS_SECCOMP 1
#endif
static void emulator(int nr, siginfo_t *info, void *void_context)
{
ucontext_t *ctx = (ucontext_t *)(void_context);
int syscall;
char *buf;
ssize_t bytes;
size_t len;
if (info->si_code != SYS_SECCOMP)
return;
if (!ctx)
return;
syscall = ctx->uc_mcontext.gregs[REG_SYSCALL];
buf = (char *) ctx->uc_mcontext.gregs[REG_ARG1];
len = (size_t) ctx->uc_mcontext.gregs[REG_ARG2];
if (syscall != __NR_write)
return;
if (ctx->uc_mcontext.gregs[REG_ARG0] != STDERR_FILENO)
return;
/* Redirect stderr messages to stdout. Doesn't handle EINTR, etc */
ctx->uc_mcontext.gregs[REG_RESULT] = -1;
if (write(STDOUT_FILENO, "[ERR] ", 6) > 0) {
bytes = write(STDOUT_FILENO, buf, len);
ctx->uc_mcontext.gregs[REG_RESULT] = bytes;
}
return;
}
static int install_emulator(void)
{
struct sigaction act;
sigset_t mask;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
sigaddset(&mask, SIGSYS);
act.sa_sigaction = &emulator;
act.sa_flags = SA_SIGINFO;
if (sigaction(SIGSYS, &act, NULL) < 0) {
perror("sigaction");
return -1;
}
if (sigprocmask(SIG_UNBLOCK, &mask, NULL)) {
perror("sigprocmask");
return -1;
}
return 0;
}
static int install_filter(void)
{
struct sock_filter filter[] = {
/* Grab the system call number */
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_nr),
/* Jump table for the allowed syscalls */
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_rt_sigreturn, 0, 1),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
#ifdef __NR_sigreturn
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_sigreturn, 0, 1),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
#endif
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_exit_group, 0, 1),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_exit, 0, 1),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_read, 1, 0),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_write, 3, 2),
/* Check that read is only using stdin. */
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_arg(0)),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDIN_FILENO, 4, 0),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL),
/* Check that write is only using stdout */
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_arg(0)),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDOUT_FILENO, 1, 0),
/* Trap attempts to write to stderr */
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDERR_FILENO, 1, 2),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL),
};
struct sock_fprog prog = {
.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
.filter = filter,
};
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("prctl(NO_NEW_PRIVS)");
return 1;
}
if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
perror("prctl");
return 1;
}
return 0;
}
#define payload(_c) (_c), sizeof((_c))
int main(int argc, char **argv)
{
char buf[4096];
ssize_t bytes = 0;
if (install_emulator())
return 1;
if (install_filter())
return 1;
syscall(__NR_write, STDOUT_FILENO,
payload("OHAI! WHAT IS YOUR NAME? "));
bytes = syscall(__NR_read, STDIN_FILENO, buf, sizeof(buf));
syscall(__NR_write, STDOUT_FILENO, payload("HELLO, "));
syscall(__NR_write, STDOUT_FILENO, buf, bytes);
syscall(__NR_write, STDERR_FILENO,
payload("Error message going to STDERR\n"));
return 0;
}
#else /* SUPPORTED_ARCH */
/*
* This sample is x86-only. Since kernel samples are compiled with the
* host toolchain, a non-x86 host will result in using only the main()
* below.
*/
int main(void)
{
return 1;
}
#endif /* SUPPORTED_ARCH */
| linux-master | samples/seccomp/bpf-direct.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Seccomp BPF example using a macro-based generator.
*
* Copyright (c) 2012 The Chromium OS Authors <[email protected]>
* Author: Will Drewry <[email protected]>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using prctl(PR_ATTACH_SECCOMP_FILTER).
*/
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <linux/unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/prctl.h>
#include <unistd.h>
#include "bpf-helper.h"
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#endif
int main(int argc, char **argv)
{
struct bpf_labels l = {
.count = 0,
};
static const char msg1[] = "Please type something: ";
static const char msg2[] = "You typed: ";
char buf[256];
struct sock_filter filter[] = {
/* TODO: LOAD_SYSCALL_NR(arch) and enforce an arch */
LOAD_SYSCALL_NR,
SYSCALL(__NR_exit, ALLOW),
SYSCALL(__NR_exit_group, ALLOW),
SYSCALL(__NR_write, JUMP(&l, write_fd)),
SYSCALL(__NR_read, JUMP(&l, read)),
DENY, /* Don't passthrough into a label */
LABEL(&l, read),
ARG(0),
JNE(STDIN_FILENO, DENY),
ARG(1),
JNE((unsigned long)buf, DENY),
ARG(2),
JGE(sizeof(buf), DENY),
ALLOW,
LABEL(&l, write_fd),
ARG(0),
JEQ(STDOUT_FILENO, JUMP(&l, write_buf)),
JEQ(STDERR_FILENO, JUMP(&l, write_buf)),
DENY,
LABEL(&l, write_buf),
ARG(1),
JEQ((unsigned long)msg1, JUMP(&l, msg1_len)),
JEQ((unsigned long)msg2, JUMP(&l, msg2_len)),
JEQ((unsigned long)buf, JUMP(&l, buf_len)),
DENY,
LABEL(&l, msg1_len),
ARG(2),
JLT(sizeof(msg1), ALLOW),
DENY,
LABEL(&l, msg2_len),
ARG(2),
JLT(sizeof(msg2), ALLOW),
DENY,
LABEL(&l, buf_len),
ARG(2),
JLT(sizeof(buf), ALLOW),
DENY,
};
struct sock_fprog prog = {
.filter = filter,
.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
};
ssize_t bytes;
bpf_resolve_jumps(&l, filter, sizeof(filter)/sizeof(*filter));
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("prctl(NO_NEW_PRIVS)");
return 1;
}
if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
perror("prctl(SECCOMP)");
return 1;
}
syscall(__NR_write, STDOUT_FILENO, msg1, strlen(msg1));
bytes = syscall(__NR_read, STDIN_FILENO, buf, sizeof(buf)-1);
bytes = (bytes > 0 ? bytes : 0);
syscall(__NR_write, STDERR_FILENO, msg2, strlen(msg2));
syscall(__NR_write, STDERR_FILENO, buf, bytes);
/* Now get killed */
syscall(__NR_write, STDERR_FILENO, msg2, strlen(msg2)+2);
return 0;
}
| linux-master | samples/seccomp/bpf-fancy.c |
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <stddef.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/user.h>
#include <sys/ioctl.h>
#include <sys/ptrace.h>
#include <sys/mount.h>
#include <linux/limits.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
static int seccomp(unsigned int op, unsigned int flags, void *args)
{
errno = 0;
return syscall(__NR_seccomp, op, flags, args);
}
static int send_fd(int sock, int fd)
{
struct msghdr msg = {};
struct cmsghdr *cmsg;
char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c';
struct iovec io = {
.iov_base = &c,
.iov_len = 1,
};
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
*((int *)CMSG_DATA(cmsg)) = fd;
msg.msg_controllen = cmsg->cmsg_len;
if (sendmsg(sock, &msg, 0) < 0) {
perror("sendmsg");
return -1;
}
return 0;
}
static int recv_fd(int sock)
{
struct msghdr msg = {};
struct cmsghdr *cmsg;
char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c';
struct iovec io = {
.iov_base = &c,
.iov_len = 1,
};
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
if (recvmsg(sock, &msg, 0) < 0) {
perror("recvmsg");
return -1;
}
cmsg = CMSG_FIRSTHDR(&msg);
return *((int *)CMSG_DATA(cmsg));
}
static int user_trap_syscall(int nr, unsigned int flags)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_USER_NOTIF),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
}
static int handle_req(struct seccomp_notif *req,
struct seccomp_notif_resp *resp, int listener)
{
char path[PATH_MAX], source[PATH_MAX], target[PATH_MAX];
int ret = -1, mem;
resp->id = req->id;
resp->error = -EPERM;
resp->val = 0;
if (req->data.nr != __NR_mount) {
fprintf(stderr, "huh? trapped something besides mount? %d\n", req->data.nr);
return -1;
}
/* Only allow bind mounts. */
if (!(req->data.args[3] & MS_BIND))
return 0;
/*
* Ok, let's read the task's memory to see where they wanted their
* mount to go.
*/
snprintf(path, sizeof(path), "/proc/%d/mem", req->pid);
mem = open(path, O_RDONLY);
if (mem < 0) {
perror("open mem");
return -1;
}
/*
* Now we avoid a TOCTOU: we referred to a pid by its pid, but since
* the pid that made the syscall may have died, we need to confirm that
* the pid is still valid after we open its /proc/pid/mem file. We can
* ask the listener fd this as follows.
*
* Note that this check should occur *after* any task-specific
* resources are opened, to make sure that the task has not died and
* we're not wrongly reading someone else's state in order to make
* decisions.
*/
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req->id) < 0) {
fprintf(stderr, "task died before we could map its memory\n");
goto out;
}
/*
* Phew, we've got the right /proc/pid/mem. Now we can read it. Note
* that to avoid another TOCTOU, we should read all of the pointer args
* before we decide to allow the syscall.
*/
if (lseek(mem, req->data.args[0], SEEK_SET) < 0) {
perror("seek");
goto out;
}
ret = read(mem, source, sizeof(source));
if (ret < 0) {
perror("read");
goto out;
}
if (lseek(mem, req->data.args[1], SEEK_SET) < 0) {
perror("seek");
goto out;
}
ret = read(mem, target, sizeof(target));
if (ret < 0) {
perror("read");
goto out;
}
/*
* Our policy is to only allow bind mounts inside /tmp. This isn't very
* interesting, because we could do unprivlieged bind mounts with user
* namespaces already, but you get the idea.
*/
if (!strncmp(source, "/tmp/", 5) && !strncmp(target, "/tmp/", 5)) {
if (mount(source, target, NULL, req->data.args[3], NULL) < 0) {
ret = -1;
perror("actual mount");
goto out;
}
resp->error = 0;
}
/* Even if we didn't allow it because of policy, generating the
* response was be a success, because we want to tell the worker EPERM.
*/
ret = 0;
out:
close(mem);
return ret;
}
int main(void)
{
int sk_pair[2], ret = 1, status, listener;
pid_t worker = 0 , tracer = 0;
if (socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair) < 0) {
perror("socketpair");
return 1;
}
worker = fork();
if (worker < 0) {
perror("fork");
goto close_pair;
}
if (worker == 0) {
listener = user_trap_syscall(__NR_mount,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0) {
perror("seccomp");
exit(1);
}
/*
* Drop privileges. We definitely can't mount as uid 1000.
*/
if (setuid(1000) < 0) {
perror("setuid");
exit(1);
}
/*
* Send the listener to the parent; also serves as
* synchronization.
*/
if (send_fd(sk_pair[1], listener) < 0)
exit(1);
close(listener);
if (mkdir("/tmp/foo", 0755) < 0) {
perror("mkdir");
exit(1);
}
/*
* Try a bad mount just for grins.
*/
if (mount("/dev/sda", "/tmp/foo", NULL, 0, NULL) != -1) {
fprintf(stderr, "huh? mounted /dev/sda?\n");
exit(1);
}
if (errno != EPERM) {
perror("bad error from mount");
exit(1);
}
/*
* Ok, we expect this one to succeed.
*/
if (mount("/tmp/foo", "/tmp/foo", NULL, MS_BIND, NULL) < 0) {
perror("mount");
exit(1);
}
exit(0);
}
/*
* Get the listener from the child.
*/
listener = recv_fd(sk_pair[0]);
if (listener < 0)
goto out_kill;
/*
* Fork a task to handle the requests. This isn't strictly necessary,
* but it makes the particular writing of this sample easier, since we
* can just wait ofr the tracee to exit and kill the tracer.
*/
tracer = fork();
if (tracer < 0) {
perror("fork");
goto out_kill;
}
if (tracer == 0) {
struct seccomp_notif *req;
struct seccomp_notif_resp *resp;
struct seccomp_notif_sizes sizes;
if (seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes) < 0) {
perror("seccomp(GET_NOTIF_SIZES)");
goto out_close;
}
req = malloc(sizes.seccomp_notif);
if (!req)
goto out_close;
resp = malloc(sizes.seccomp_notif_resp);
if (!resp)
goto out_req;
memset(resp, 0, sizes.seccomp_notif_resp);
while (1) {
memset(req, 0, sizes.seccomp_notif);
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
perror("ioctl recv");
goto out_resp;
}
if (handle_req(req, resp, listener) < 0)
goto out_resp;
/*
* ENOENT here means that the task may have gotten a
* signal and restarted the syscall. It's up to the
* handler to decide what to do in this case, but for
* the sample code, we just ignore it. Probably
* something better should happen, like undoing the
* mount, or keeping track of the args to make sure we
* don't do it again.
*/
if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, resp) < 0 &&
errno != ENOENT) {
perror("ioctl send");
goto out_resp;
}
}
out_resp:
free(resp);
out_req:
free(req);
out_close:
close(listener);
exit(1);
}
close(listener);
if (waitpid(worker, &status, 0) != worker) {
perror("waitpid");
goto out_kill;
}
if (umount2("/tmp/foo", MNT_DETACH) < 0 && errno != EINVAL) {
perror("umount2");
goto out_kill;
}
if (remove("/tmp/foo") < 0 && errno != ENOENT) {
perror("remove");
exit(1);
}
if (!WIFEXITED(status) || WEXITSTATUS(status)) {
fprintf(stderr, "worker exited nonzero\n");
goto out_kill;
}
ret = 0;
out_kill:
if (tracer > 0)
kill(tracer, SIGKILL);
if (worker > 0)
kill(worker, SIGKILL);
close_pair:
close(sk_pair[0]);
close(sk_pair[1]);
return ret;
}
| linux-master | samples/seccomp/user-trap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Naive system call dropper built on seccomp_filter.
*
* Copyright (c) 2012 The Chromium OS Authors <[email protected]>
* Author: Will Drewry <[email protected]>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using prctl(PR_SET_SECCOMP, 2, ...).
*
* When run, returns the specified errno for the specified
* system call number against the given architecture.
*
*/
#include <errno.h>
#include <linux/audit.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <linux/unistd.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <sys/prctl.h>
#include <unistd.h>
static int install_filter(int arch, int nr, int error)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
(offsetof(struct seccomp_data, arch))),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, arch, 0, 3),
BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
(offsetof(struct seccomp_data, nr))),
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1),
BPF_STMT(BPF_RET+BPF_K,
SECCOMP_RET_ERRNO|(error & SECCOMP_RET_DATA)),
BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
.filter = filter,
};
if (error == -1) {
struct sock_filter kill = BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL);
filter[4] = kill;
}
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("prctl(NO_NEW_PRIVS)");
return 1;
}
if (prctl(PR_SET_SECCOMP, 2, &prog)) {
perror("prctl(PR_SET_SECCOMP)");
return 1;
}
return 0;
}
int main(int argc, char **argv)
{
if (argc < 5) {
fprintf(stderr, "Usage:\n"
"dropper <arch> <syscall_nr> <errno> <prog> [<args>]\n"
"Hint: AUDIT_ARCH_I386: 0x%X\n"
" AUDIT_ARCH_X86_64: 0x%X\n"
" errno == -1 means SECCOMP_RET_KILL\n"
"\n", AUDIT_ARCH_I386, AUDIT_ARCH_X86_64);
return 1;
}
if (install_filter(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0),
strtol(argv[3], NULL, 0)))
return 1;
execv(argv[4], &argv[4]);
printf("Failed to execv\n");
return 255;
}
| linux-master | samples/seccomp/dropper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ucon.c
*
* Copyright (c) 2004+ Evgeniy Polyakov <[email protected]>
*/
#include <asm/types.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <arpa/inet.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <getopt.h>
#include <linux/connector.h>
#define DEBUG
#define NETLINK_CONNECTOR 11
/* Hopefully your userspace connector.h matches this kernel */
#define CN_TEST_IDX CN_NETLINK_USERS + 3
#define CN_TEST_VAL 0x456
#ifdef DEBUG
#define ulog(f, a...) fprintf(stdout, f, ##a)
#else
#define ulog(f, a...) do {} while (0)
#endif
static int need_exit;
static __u32 seq;
static int netlink_send(int s, struct cn_msg *msg)
{
struct nlmsghdr *nlh;
unsigned int size;
int err;
char buf[128];
struct cn_msg *m;
size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
nlh = (struct nlmsghdr *)buf;
nlh->nlmsg_seq = seq++;
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = 0;
m = NLMSG_DATA(nlh);
#if 0
ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
__func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
#endif
memcpy(m, msg, sizeof(*m) + msg->len);
err = send(s, nlh, size, 0);
if (err == -1)
ulog("Failed to send: %s [%d].\n",
strerror(errno), errno);
return err;
}
static void usage(void)
{
printf(
"Usage: ucon [options] [output file]\n"
"\n"
"\t-h\tthis help screen\n"
"\t-s\tsend buffers to the test module\n"
"\n"
"The default behavior of ucon is to subscribe to the test module\n"
"and wait for state messages. Any ones received are dumped to the\n"
"specified output file (or stdout). The test module is assumed to\n"
"have an id of {%u.%u}\n"
"\n"
"If you get no output, then verify the cn_test module id matches\n"
"the expected id above.\n"
, CN_TEST_IDX, CN_TEST_VAL
);
}
int main(int argc, char *argv[])
{
int s;
char buf[1024];
int len;
struct nlmsghdr *reply;
struct sockaddr_nl l_local;
struct cn_msg *data;
FILE *out;
time_t tm;
struct pollfd pfd;
bool send_msgs = false;
while ((s = getopt(argc, argv, "hs")) != -1) {
switch (s) {
case 's':
send_msgs = true;
break;
case 'h':
usage();
return 0;
default:
/* getopt() outputs an error for us */
usage();
return 1;
}
}
if (argc != optind) {
out = fopen(argv[optind], "a+");
if (!out) {
ulog("Unable to open %s for writing: %s\n",
argv[1], strerror(errno));
out = stdout;
}
} else
out = stdout;
memset(buf, 0, sizeof(buf));
s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (s == -1) {
perror("socket");
return -1;
}
l_local.nl_family = AF_NETLINK;
l_local.nl_groups = -1; /* bitmask of requested groups */
l_local.nl_pid = 0;
ulog("subscribing to %u.%u\n", CN_TEST_IDX, CN_TEST_VAL);
if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
perror("bind");
close(s);
return -1;
}
#if 0
{
int on = 0x57; /* Additional group number */
setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
}
#endif
if (send_msgs) {
int i, j;
memset(buf, 0, sizeof(buf));
data = (struct cn_msg *)buf;
data->id.idx = CN_TEST_IDX;
data->id.val = CN_TEST_VAL;
data->seq = seq++;
data->ack = 0;
data->len = 0;
for (j=0; j<10; ++j) {
for (i=0; i<1000; ++i) {
len = netlink_send(s, data);
}
ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
}
return 0;
}
pfd.fd = s;
while (!need_exit) {
pfd.events = POLLIN;
pfd.revents = 0;
switch (poll(&pfd, 1, -1)) {
case 0:
need_exit = 1;
break;
case -1:
if (errno != EINTR) {
need_exit = 1;
break;
}
continue;
}
if (need_exit)
break;
memset(buf, 0, sizeof(buf));
len = recv(s, buf, sizeof(buf), 0);
if (len == -1) {
perror("recv buf");
close(s);
return -1;
}
reply = (struct nlmsghdr *)buf;
switch (reply->nlmsg_type) {
case NLMSG_ERROR:
fprintf(out, "Error message received.\n");
fflush(out);
break;
case NLMSG_DONE:
data = (struct cn_msg *)NLMSG_DATA(reply);
time(&tm);
fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
fflush(out);
break;
default:
break;
}
}
close(s);
return 0;
}
| linux-master | samples/connector/ucon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cn_test.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <[email protected]>
* All rights reserved.
*/
#define pr_fmt(fmt) "cn_test: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/connector.h>
static struct cb_id cn_test_id = { CN_NETLINK_USERS + 3, 0x456 };
static char cn_test_name[] = "cn_test";
static struct sock *nls;
static struct timer_list cn_test_timer;
static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
__func__, jiffies, msg->id.idx, msg->id.val,
msg->seq, msg->ack, msg->len,
msg->len ? (char *)msg->data : "");
}
/*
* Do not remove this function even if no one is using it as
* this is an example of how to get notifications about new
* connector user registration
*/
#if 0
static int cn_test_want_notify(void)
{
struct cn_ctl_msg *ctl;
struct cn_notify_req *req;
struct cn_msg *msg = NULL;
int size, size0;
struct sk_buff *skb;
struct nlmsghdr *nlh;
u32 group = 1;
size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req);
size = NLMSG_SPACE(size0);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
pr_err("failed to allocate new skb with size=%u\n", size);
return -ENOMEM;
}
nlh = nlmsg_put(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh), 0);
if (!nlh) {
kfree_skb(skb);
return -EMSGSIZE;
}
msg = nlmsg_data(nlh);
memset(msg, 0, size0);
msg->id.idx = -1;
msg->id.val = -1;
msg->seq = 0x123;
msg->ack = 0x345;
msg->len = size0 - sizeof(*msg);
ctl = (struct cn_ctl_msg *)(msg + 1);
ctl->idx_notify_num = 1;
ctl->val_notify_num = 2;
ctl->group = group;
ctl->len = msg->len - sizeof(*ctl);
req = (struct cn_notify_req *)(ctl + 1);
/*
* Idx.
*/
req->first = cn_test_id.idx;
req->range = 10;
/*
* Val 0.
*/
req++;
req->first = cn_test_id.val;
req->range = 10;
/*
* Val 1.
*/
req++;
req->first = cn_test_id.val + 20;
req->range = 10;
NETLINK_CB(skb).dst_group = ctl->group;
//netlink_broadcast(nls, skb, 0, ctl->group, GFP_ATOMIC);
netlink_unicast(nls, skb, 0, 0);
pr_info("request was sent: group=0x%x\n", ctl->group);
return 0;
}
#endif
static u32 cn_test_timer_counter;
static void cn_test_timer_func(struct timer_list *unused)
{
struct cn_msg *m;
char data[32];
pr_debug("%s: timer fired\n", __func__);
m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
if (m) {
memcpy(&m->id, &cn_test_id, sizeof(m->id));
m->seq = cn_test_timer_counter;
m->len = sizeof(data);
m->len =
scnprintf(data, sizeof(data), "counter = %u",
cn_test_timer_counter) + 1;
memcpy(m + 1, data, m->len);
cn_netlink_send(m, 0, 0, GFP_ATOMIC);
kfree(m);
}
cn_test_timer_counter++;
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
}
static int cn_test_init(void)
{
int err;
err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
if (err)
goto err_out;
cn_test_id.val++;
err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
if (err) {
cn_del_callback(&cn_test_id);
goto err_out;
}
timer_setup(&cn_test_timer, cn_test_timer_func, 0);
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
pr_info("initialized with id={%u.%u}\n",
cn_test_id.idx, cn_test_id.val);
return 0;
err_out:
if (nls && nls->sk_socket)
sock_release(nls->sk_socket);
return err;
}
static void cn_test_fini(void)
{
del_timer_sync(&cn_test_timer);
cn_del_callback(&cn_test_id);
cn_test_id.val--;
cn_del_callback(&cn_test_id);
if (nls && nls->sk_socket)
sock_release(nls->sk_socket);
}
module_init(cn_test_init);
module_exit(cn_test_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Connector's test module");
| linux-master | samples/connector/cn_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif
extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, struct pt_regs *regs);
void my_direct_func(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, struct pt_regs *regs)
{
trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
vma, address, flags, regs);
}
extern void my_tramp(void *);
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
#include <asm/nospec-branch.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" pushq %rsi\n"
" pushq %rdx\n"
" pushq %rcx\n"
" call my_direct_func\n"
" popq %rcx\n"
" popq %rdx\n"
" popq %rsi\n"
" popq %rdi\n"
" leave\n"
ASM_RET
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_S390
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" brasl %r14,my_direct_func\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" hint 34\n" // bti c
" sub sp, sp, #48\n"
" stp x9, x30, [sp]\n"
" stp x0, x1, [sp, #16]\n"
" stp x2, x3, [sp, #32]\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldp x0, x1, [sp, #16]\n"
" ldp x2, x3, [sp, #32]\n"
" add sp, sp, #48\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:\n"
" addi.d $sp, $sp, -48\n"
" st.d $a0, $sp, 0\n"
" st.d $a1, $sp, 8\n"
" st.d $a2, $sp, 16\n"
" st.d $t0, $sp, 24\n"
" st.d $ra, $sp, 32\n"
" bl my_direct_func\n"
" ld.d $a0, $sp, 0\n"
" ld.d $a1, $sp, 8\n"
" ld.d $a2, $sp, 16\n"
" ld.d $t0, $sp, 24\n"
" ld.d $ra, $sp, 32\n"
" addi.d $sp, $sp, 48\n"
" jr $t0\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_LOONGARCH */
static struct ftrace_ops direct;
static int __init ftrace_direct_init(void)
{
ftrace_set_filter_ip(&direct, (unsigned long) handle_mm_fault, 0, 0);
return register_ftrace_direct(&direct, (unsigned long) my_tramp);
}
static void __exit ftrace_direct_exit(void)
{
unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true);
}
module_init(ftrace_direct_init);
module_exit(ftrace_direct_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("Another example use case of using register_ftrace_direct()");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-direct-too.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/trace.h>
#include <linux/trace_events.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
/*
* Any file that uses trace points, must include the header.
* But only one file, must include the header by defining
* CREATE_TRACE_POINTS first. This will make the C code that
* creates the handles for the trace points.
*/
#define CREATE_TRACE_POINTS
#include "sample-trace-array.h"
struct trace_array *tr;
static void mytimer_handler(struct timer_list *unused);
static struct task_struct *simple_tsk;
static void trace_work_fn(struct work_struct *work)
{
/*
* Disable tracing for event "sample_event".
*/
trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
false);
}
static DECLARE_WORK(trace_work, trace_work_fn);
/*
* mytimer: Timer setup to disable tracing for event "sample_event". This
* timer is only for the purposes of the sample module to demonstrate access of
* Ftrace instances from within kernel.
*/
static DEFINE_TIMER(mytimer, mytimer_handler);
static void mytimer_handler(struct timer_list *unused)
{
schedule_work(&trace_work);
}
static void simple_thread_func(int count)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
/*
* Printing count value using trace_array_printk() - trace_printk()
* equivalent for the instance buffers.
*/
trace_array_printk(tr, _THIS_IP_, "trace_array_printk: count=%d\n",
count);
/*
* Tracepoint for event "sample_event". This will print the
* current value of count and current jiffies.
*/
trace_sample_event(count, jiffies);
}
static int simple_thread(void *arg)
{
int count = 0;
unsigned long delay = msecs_to_jiffies(5000);
/*
* Enable tracing for "sample_event".
*/
trace_array_set_clr_event(tr, "sample-subsystem", "sample_event", true);
/*
* Adding timer - mytimer. This timer will disable tracing after
* delay seconds.
*
*/
add_timer(&mytimer);
mod_timer(&mytimer, jiffies+delay);
while (!kthread_should_stop())
simple_thread_func(count++);
del_timer(&mytimer);
cancel_work_sync(&trace_work);
/*
* trace_array_put() decrements the reference counter associated with
* the trace array - "tr". We are done using the trace array, hence
* decrement the reference counter so that it can be destroyed using
* trace_array_destroy().
*/
trace_array_put(tr);
return 0;
}
static int __init sample_trace_array_init(void)
{
/*
* Return a pointer to the trace array with name "sample-instance" if it
* exists, else create a new trace array.
*
* NOTE: This function increments the reference counter
* associated with the trace array - "tr".
*/
tr = trace_array_get_by_name("sample-instance");
if (!tr)
return -1;
/*
* If context specific per-cpu buffers havent already been allocated.
*/
trace_printk_init_buffers();
simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
if (IS_ERR(simple_tsk)) {
trace_array_put(tr);
trace_array_destroy(tr);
return -1;
}
return 0;
}
static void __exit sample_trace_array_exit(void)
{
kthread_stop(simple_tsk);
/*
* We are unloading our module and no longer require the trace array.
* Remove/destroy "tr" using trace_array_destroy()
*/
trace_array_destroy(tr);
}
module_init(sample_trace_array_init);
module_exit(sample_trace_array_exit);
MODULE_AUTHOR("Divya Indi");
MODULE_DESCRIPTION("Sample module for kernel access to Ftrace instances");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/sample-trace-array.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif
extern void my_direct_func1(void);
extern void my_direct_func2(void);
void my_direct_func1(void)
{
trace_printk("my direct func1\n");
}
void my_direct_func2(void)
{
trace_printk("my direct func2\n");
}
extern void my_tramp1(void *);
extern void my_tramp2(void *);
static unsigned long my_ip = (unsigned long)schedule;
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
#include <asm/nospec-branch.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" call my_direct_func1\n"
" leave\n"
" .size my_tramp1, .-my_tramp1\n"
ASM_RET
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" call my_direct_func2\n"
" leave\n"
ASM_RET
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_S390
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" brasl %r14,my_direct_func1\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" brasl %r14,my_direct_func2\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" hint 34\n" // bti c
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func1\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" hint 34\n" // bti c
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func2\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:\n"
" addi.d $sp, $sp, -16\n"
" st.d $t0, $sp, 0\n"
" st.d $ra, $sp, 8\n"
" bl my_direct_func1\n"
" ld.d $t0, $sp, 0\n"
" ld.d $ra, $sp, 8\n"
" addi.d $sp, $sp, 16\n"
" jr $t0\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:\n"
" addi.d $sp, $sp, -16\n"
" st.d $t0, $sp, 0\n"
" st.d $ra, $sp, 8\n"
" bl my_direct_func2\n"
" ld.d $t0, $sp, 0\n"
" ld.d $ra, $sp, 8\n"
" addi.d $sp, $sp, 16\n"
" jr $t0\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_LOONGARCH */
static struct ftrace_ops direct;
static unsigned long my_tramp = (unsigned long)my_tramp1;
static unsigned long tramps[2] = {
(unsigned long)my_tramp1,
(unsigned long)my_tramp2,
};
static int simple_thread(void *arg)
{
static int t;
int ret = 0;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(2 * HZ);
if (ret)
continue;
t ^= 1;
ret = modify_ftrace_direct(&direct, tramps[t]);
if (!ret)
my_tramp = tramps[t];
WARN_ON_ONCE(ret);
}
return 0;
}
static struct task_struct *simple_tsk;
static int __init ftrace_direct_init(void)
{
int ret;
ftrace_set_filter_ip(&direct, (unsigned long) my_ip, 0, 0);
ret = register_ftrace_direct(&direct, my_tramp);
if (!ret)
simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
return ret;
}
static void __exit ftrace_direct_exit(void)
{
kthread_stop(simple_tsk);
unregister_ftrace_direct(&direct, my_tramp, true);
}
module_init(ftrace_direct_init);
module_exit(ftrace_direct_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-direct-modify.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/sched.h> /* for wake_up_process() */
#include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif
extern void my_direct_func(struct task_struct *p);
void my_direct_func(struct task_struct *p)
{
trace_printk("waking up %s-%d\n", p->comm, p->pid);
}
extern void my_tramp(void *);
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
#include <asm/nospec-branch.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" call my_direct_func\n"
" popq %rdi\n"
" leave\n"
ASM_RET
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_S390
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" brasl %r14,my_direct_func\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:\n"
" addi.d $sp, $sp, -32\n"
" st.d $a0, $sp, 0\n"
" st.d $t0, $sp, 8\n"
" st.d $ra, $sp, 16\n"
" bl my_direct_func\n"
" ld.d $a0, $sp, 0\n"
" ld.d $t0, $sp, 8\n"
" ld.d $ra, $sp, 16\n"
" addi.d $sp, $sp, 32\n"
" jr $t0\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_LOONGARCH */
static struct ftrace_ops direct;
static int __init ftrace_direct_init(void)
{
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
return register_ftrace_direct(&direct, (unsigned long) my_tramp);
}
static void __exit ftrace_direct_exit(void)
{
unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true);
}
module_init(ftrace_direct_init);
module_exit(ftrace_direct_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("Example use case of using register_ftrace_direct()");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-direct.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h>
#include <linux/sched/stat.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif
extern void my_direct_func(unsigned long ip);
void my_direct_func(unsigned long ip)
{
trace_printk("ip %lx\n", ip);
}
extern void my_tramp(void *);
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
#include <asm/nospec-branch.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func\n"
" popq %rdi\n"
" leave\n"
ASM_RET
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_S390
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" lgr %r2,%r0\n"
" brasl %r14,my_direct_func\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH
#include <asm/asm.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:\n"
" addi.d $sp, $sp, -32\n"
" st.d $a0, $sp, 0\n"
" st.d $t0, $sp, 8\n"
" st.d $ra, $sp, 16\n"
" move $a0, $t0\n"
" bl my_direct_func\n"
" ld.d $a0, $sp, 0\n"
" ld.d $t0, $sp, 8\n"
" ld.d $ra, $sp, 16\n"
" addi.d $sp, $sp, 32\n"
" jr $t0\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_LOONGARCH */
static struct ftrace_ops direct;
static int __init ftrace_direct_multi_init(void)
{
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
return register_ftrace_direct(&direct, (unsigned long) my_tramp);
}
static void __exit ftrace_direct_multi_exit(void)
{
unregister_ftrace_direct(&direct, (unsigned long) my_tramp, true);
}
module_init(ftrace_direct_multi_init);
module_exit(ftrace_direct_multi_exit);
MODULE_AUTHOR("Jiri Olsa");
MODULE_DESCRIPTION("Example use case of using register_ftrace_direct_multi()");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-direct-multi.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif
extern void my_direct_func1(unsigned long ip);
extern void my_direct_func2(unsigned long ip);
void my_direct_func1(unsigned long ip)
{
trace_printk("my direct func1 ip %lx\n", ip);
}
void my_direct_func2(unsigned long ip)
{
trace_printk("my direct func2 ip %lx\n", ip);
}
extern void my_tramp1(void *);
extern void my_tramp2(void *);
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
#include <asm/nospec-branch.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func1\n"
" popq %rdi\n"
" leave\n"
ASM_RET
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func2\n"
" popq %rdi\n"
" leave\n"
ASM_RET
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_S390
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" lgr %r2,%r0\n"
" brasl %r14,my_direct_func1\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp1, .-my_tramp1\n"
"\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" lgr %r1,%r15\n"
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
" lgr %r2,%r0\n"
" brasl %r14,my_direct_func2\n"
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
" lgr %r1,%r0\n"
" br %r1\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func1\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func2\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH
#include <asm/asm.h>
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:\n"
" addi.d $sp, $sp, -32\n"
" st.d $a0, $sp, 0\n"
" st.d $t0, $sp, 8\n"
" st.d $ra, $sp, 16\n"
" move $a0, $t0\n"
" bl my_direct_func1\n"
" ld.d $a0, $sp, 0\n"
" ld.d $t0, $sp, 8\n"
" ld.d $ra, $sp, 16\n"
" addi.d $sp, $sp, 32\n"
" jr $t0\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:\n"
" addi.d $sp, $sp, -32\n"
" st.d $a0, $sp, 0\n"
" st.d $t0, $sp, 8\n"
" st.d $ra, $sp, 16\n"
" move $a0, $t0\n"
" bl my_direct_func2\n"
" ld.d $a0, $sp, 0\n"
" ld.d $t0, $sp, 8\n"
" ld.d $ra, $sp, 16\n"
" addi.d $sp, $sp, 32\n"
" jr $t0\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_LOONGARCH */
static unsigned long my_tramp = (unsigned long)my_tramp1;
static unsigned long tramps[2] = {
(unsigned long)my_tramp1,
(unsigned long)my_tramp2,
};
static struct ftrace_ops direct;
static int simple_thread(void *arg)
{
static int t;
int ret = 0;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(2 * HZ);
if (ret)
continue;
t ^= 1;
ret = modify_ftrace_direct(&direct, tramps[t]);
if (!ret)
my_tramp = tramps[t];
WARN_ON_ONCE(ret);
}
return 0;
}
static struct task_struct *simple_tsk;
static int __init ftrace_direct_multi_init(void)
{
int ret;
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
ret = register_ftrace_direct(&direct, my_tramp);
if (!ret)
simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
return ret;
}
static void __exit ftrace_direct_multi_exit(void)
{
kthread_stop(simple_tsk);
unregister_ftrace_direct(&direct, my_tramp, true);
}
module_init(ftrace_direct_multi_init);
module_exit(ftrace_direct_multi_exit);
MODULE_AUTHOR("Jiri Olsa");
MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-direct-multi-modify.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ftrace.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <asm/barrier.h>
/*
* Arbitrary large value chosen to be sufficiently large to minimize noise but
* sufficiently small to complete quickly.
*/
static unsigned int nr_function_calls = 100000;
module_param(nr_function_calls, uint, 0);
MODULE_PARM_DESC(nr_function_calls, "How many times to call the relevant tracee");
/*
* The number of ops associated with a call site affects whether a tracer can
* be called directly or whether it's necessary to go via the list func, which
* can be significantly more expensive.
*/
static unsigned int nr_ops_relevant = 1;
module_param(nr_ops_relevant, uint, 0);
MODULE_PARM_DESC(nr_ops_relevant, "How many ftrace_ops to associate with the relevant tracee");
/*
* On architectures where all call sites share the same trampoline, having
* tracers enabled for distinct functions can force the use of the list func
* and incur overhead for all call sites.
*/
static unsigned int nr_ops_irrelevant;
module_param(nr_ops_irrelevant, uint, 0);
MODULE_PARM_DESC(nr_ops_irrelevant, "How many ftrace_ops to associate with the irrelevant tracee");
/*
* On architectures with DYNAMIC_FTRACE_WITH_REGS, saving the full pt_regs can
* be more expensive than only saving the minimal necessary regs.
*/
static bool save_regs;
module_param(save_regs, bool, 0);
MODULE_PARM_DESC(save_regs, "Register ops with FTRACE_OPS_FL_SAVE_REGS (save all registers in the trampoline)");
static bool assist_recursion;
module_param(assist_recursion, bool, 0);
MODULE_PARM_DESC(assist_reursion, "Register ops with FTRACE_OPS_FL_RECURSION");
static bool assist_rcu;
module_param(assist_rcu, bool, 0);
MODULE_PARM_DESC(assist_reursion, "Register ops with FTRACE_OPS_FL_RCU");
/*
* By default, a trivial tracer is used which immediately returns to mimimize
* overhead. Sometimes a consistency check using a more expensive tracer is
* desireable.
*/
static bool check_count;
module_param(check_count, bool, 0);
MODULE_PARM_DESC(check_count, "Check that tracers are called the expected number of times\n");
/*
* Usually it's not interesting to leave the ops registered after the test
* runs, but sometimes it can be useful to leave them registered so that they
* can be inspected through the tracefs 'enabled_functions' file.
*/
static bool persist;
module_param(persist, bool, 0);
MODULE_PARM_DESC(persist, "Successfully load module and leave ftrace ops registered after test completes\n");
/*
* Marked as noinline to ensure that an out-of-line traceable copy is
* generated by the compiler.
*
* The barrier() ensures the compiler won't elide calls by determining there
* are no side-effects.
*/
static noinline void tracee_relevant(void)
{
barrier();
}
/*
* Marked as noinline to ensure that an out-of-line traceable copy is
* generated by the compiler.
*
* The barrier() ensures the compiler won't elide calls by determining there
* are no side-effects.
*/
static noinline void tracee_irrelevant(void)
{
barrier();
}
struct sample_ops {
struct ftrace_ops ops;
unsigned int count;
};
static void ops_func_nop(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op,
struct ftrace_regs *fregs)
{
/* do nothing */
}
static void ops_func_count(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op,
struct ftrace_regs *fregs)
{
struct sample_ops *self;
self = container_of(op, struct sample_ops, ops);
self->count++;
}
static struct sample_ops *ops_relevant;
static struct sample_ops *ops_irrelevant;
static struct sample_ops *ops_alloc_init(void *tracee, ftrace_func_t func,
unsigned long flags, int nr)
{
struct sample_ops *ops;
ops = kcalloc(nr, sizeof(*ops), GFP_KERNEL);
if (WARN_ON_ONCE(!ops))
return NULL;
for (unsigned int i = 0; i < nr; i++) {
ops[i].ops.func = func;
ops[i].ops.flags = flags;
WARN_ON_ONCE(ftrace_set_filter_ip(&ops[i].ops, (unsigned long)tracee, 0, 0));
WARN_ON_ONCE(register_ftrace_function(&ops[i].ops));
}
return ops;
}
static void ops_destroy(struct sample_ops *ops, int nr)
{
if (!ops)
return;
for (unsigned int i = 0; i < nr; i++) {
WARN_ON_ONCE(unregister_ftrace_function(&ops[i].ops));
ftrace_free_filter(&ops[i].ops);
}
kfree(ops);
}
static void ops_check(struct sample_ops *ops, int nr,
unsigned int expected_count)
{
if (!ops || !check_count)
return;
for (unsigned int i = 0; i < nr; i++) {
if (ops->count == expected_count)
continue;
pr_warn("Counter called %u times (expected %u)\n",
ops->count, expected_count);
}
}
static ftrace_func_t tracer_relevant = ops_func_nop;
static ftrace_func_t tracer_irrelevant = ops_func_nop;
static int __init ftrace_ops_sample_init(void)
{
unsigned long flags = 0;
ktime_t start, end;
u64 period;
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && save_regs) {
pr_info("this kernel does not support saving registers\n");
save_regs = false;
} else if (save_regs) {
flags |= FTRACE_OPS_FL_SAVE_REGS;
}
if (assist_recursion)
flags |= FTRACE_OPS_FL_RECURSION;
if (assist_rcu)
flags |= FTRACE_OPS_FL_RCU;
if (check_count) {
tracer_relevant = ops_func_count;
tracer_irrelevant = ops_func_count;
}
pr_info("registering:\n"
" relevant ops: %u\n"
" tracee: %ps\n"
" tracer: %ps\n"
" irrelevant ops: %u\n"
" tracee: %ps\n"
" tracer: %ps\n"
" saving registers: %s\n"
" assist recursion: %s\n"
" assist RCU: %s\n",
nr_ops_relevant, tracee_relevant, tracer_relevant,
nr_ops_irrelevant, tracee_irrelevant, tracer_irrelevant,
save_regs ? "YES" : "NO",
assist_recursion ? "YES" : "NO",
assist_rcu ? "YES" : "NO");
ops_relevant = ops_alloc_init(tracee_relevant, tracer_relevant,
flags, nr_ops_relevant);
ops_irrelevant = ops_alloc_init(tracee_irrelevant, tracer_irrelevant,
flags, nr_ops_irrelevant);
start = ktime_get();
for (unsigned int i = 0; i < nr_function_calls; i++)
tracee_relevant();
end = ktime_get();
ops_check(ops_relevant, nr_ops_relevant, nr_function_calls);
ops_check(ops_irrelevant, nr_ops_irrelevant, 0);
period = ktime_to_ns(ktime_sub(end, start));
pr_info("Attempted %u calls to %ps in %lluns (%lluns / call)\n",
nr_function_calls, tracee_relevant,
period, div_u64(period, nr_function_calls));
if (persist)
return 0;
ops_destroy(ops_relevant, nr_ops_relevant);
ops_destroy(ops_irrelevant, nr_ops_irrelevant);
/*
* The benchmark completed sucessfully, but there's no reason to keep
* the module around. Return an error do the user doesn't have to
* manually unload the module.
*/
return -EINVAL;
}
module_init(ftrace_ops_sample_init);
static void __exit ftrace_ops_sample_exit(void)
{
ops_destroy(ops_relevant, nr_ops_relevant);
ops_destroy(ops_irrelevant, nr_ops_irrelevant);
}
module_exit(ftrace_ops_sample_exit);
MODULE_AUTHOR("Mark Rutland");
MODULE_DESCRIPTION("Example of using custom ftrace_ops");
MODULE_LICENSE("GPL");
| linux-master | samples/ftrace/ftrace-ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2021, Collabora Ltd.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <err.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <sys/fanotify.h>
#include <sys/types.h>
#include <unistd.h>
#ifndef FAN_FS_ERROR
#define FAN_FS_ERROR 0x00008000
#define FAN_EVENT_INFO_TYPE_ERROR 5
struct fanotify_event_info_error {
struct fanotify_event_info_header hdr;
__s32 error;
__u32 error_count;
};
#endif
#ifndef FILEID_INO32_GEN
#define FILEID_INO32_GEN 1
#endif
#ifndef FILEID_INVALID
#define FILEID_INVALID 0xff
#endif
static void print_fh(struct file_handle *fh)
{
int i;
uint32_t *h = (uint32_t *) fh->f_handle;
printf("\tfh: ");
for (i = 0; i < fh->handle_bytes; i++)
printf("%hhx", fh->f_handle[i]);
printf("\n");
printf("\tdecoded fh: ");
if (fh->handle_type == FILEID_INO32_GEN)
printf("inode=%u gen=%u\n", h[0], h[1]);
else if (fh->handle_type == FILEID_INVALID && !fh->handle_bytes)
printf("Type %d (Superblock error)\n", fh->handle_type);
else
printf("Type %d (Unknown)\n", fh->handle_type);
}
static void handle_notifications(char *buffer, int len)
{
struct fanotify_event_metadata *event =
(struct fanotify_event_metadata *) buffer;
struct fanotify_event_info_header *info;
struct fanotify_event_info_error *err;
struct fanotify_event_info_fid *fid;
int off;
for (; FAN_EVENT_OK(event, len); event = FAN_EVENT_NEXT(event, len)) {
if (event->mask != FAN_FS_ERROR) {
printf("unexpected FAN MARK: %llx\n",
(unsigned long long)event->mask);
goto next_event;
}
if (event->fd != FAN_NOFD) {
printf("Unexpected fd (!= FAN_NOFD)\n");
goto next_event;
}
printf("FAN_FS_ERROR (len=%d)\n", event->event_len);
for (off = sizeof(*event) ; off < event->event_len;
off += info->len) {
info = (struct fanotify_event_info_header *)
((char *) event + off);
switch (info->info_type) {
case FAN_EVENT_INFO_TYPE_ERROR:
err = (struct fanotify_event_info_error *) info;
printf("\tGeneric Error Record: len=%d\n",
err->hdr.len);
printf("\terror: %d\n", err->error);
printf("\terror_count: %d\n", err->error_count);
break;
case FAN_EVENT_INFO_TYPE_FID:
fid = (struct fanotify_event_info_fid *) info;
printf("\tfsid: %x%x\n",
fid->fsid.val[0], fid->fsid.val[1]);
print_fh((struct file_handle *) &fid->handle);
break;
default:
printf("\tUnknown info type=%d len=%d:\n",
info->info_type, info->len);
}
}
next_event:
printf("---\n\n");
}
}
int main(int argc, char **argv)
{
int fd;
char buffer[BUFSIZ];
if (argc < 2) {
printf("Missing path argument\n");
return 1;
}
fd = fanotify_init(FAN_CLASS_NOTIF|FAN_REPORT_FID, O_RDONLY);
if (fd < 0)
errx(1, "fanotify_init");
if (fanotify_mark(fd, FAN_MARK_ADD|FAN_MARK_FILESYSTEM,
FAN_FS_ERROR, AT_FDCWD, argv[1])) {
errx(1, "fanotify_mark");
}
while (1) {
int n = read(fd, buffer, BUFSIZ);
if (n < 0)
errx(1, "read");
handle_notifications(buffer, n);
}
return 0;
}
| linux-master | samples/fanotify/fs-monitor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mediated virtual PCI serial host device driver
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*
* Sample driver that creates mdev device that simulates serial port over PCI
* card.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/vfio.h>
#include <linux/iommu.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/file.h>
#include <linux/mdev.h>
#include <linux/pci.h>
#include <linux/serial.h>
#include <uapi/linux/serial_reg.h>
#include <linux/eventfd.h>
/*
* #defines
*/
#define VERSION_STRING "0.1"
#define DRIVER_AUTHOR "NVIDIA Corporation"
#define MTTY_CLASS_NAME "mtty"
#define MTTY_NAME "mtty"
#define MTTY_STRING_LEN 16
#define MTTY_CONFIG_SPACE_SIZE 0xff
#define MTTY_IO_BAR_SIZE 0x8
#define MTTY_MMIO_BAR_SIZE 0x100000
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
#define MAX_FIFO_SIZE 16
#define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
#define MTTY_VFIO_PCI_OFFSET_SHIFT 40
#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
#define MTTY_VFIO_PCI_OFFSET_MASK \
(((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
#define MAX_MTTYS 24
/*
* Global Structures
*/
static struct mtty_dev {
dev_t vd_devt;
struct class *vd_class;
struct cdev vd_cdev;
struct idr vd_idr;
struct device dev;
struct mdev_parent parent;
} mtty_dev;
struct mdev_region_info {
u64 start;
u64 phys_start;
u32 size;
u64 vfio_offset;
};
#if defined(DEBUG_REGS)
static const char *wr_reg[] = {
"TX",
"IER",
"FCR",
"LCR",
"MCR",
"LSR",
"MSR",
"SCR"
};
static const char *rd_reg[] = {
"RX",
"IER",
"IIR",
"LCR",
"MCR",
"LSR",
"MSR",
"SCR"
};
#endif
/* loop back buffer */
struct rxtx {
u8 fifo[MAX_FIFO_SIZE];
u8 head, tail;
u8 count;
};
struct serial_port {
u8 uart_reg[8]; /* 8 registers */
struct rxtx rxtx; /* loop back buffer */
bool dlab;
bool overrun;
u16 divisor;
u8 fcr; /* FIFO control register */
u8 max_fifo_size;
u8 intr_trigger_level; /* interrupt trigger level */
};
/* State of each mdev device */
struct mdev_state {
struct vfio_device vdev;
int irq_fd;
struct eventfd_ctx *intx_evtfd;
struct eventfd_ctx *msi_evtfd;
int irq_index;
u8 *vconfig;
struct mutex ops_lock;
struct mdev_device *mdev;
struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
u32 bar_mask[VFIO_PCI_NUM_REGIONS];
struct list_head next;
struct serial_port s[2];
struct mutex rxtx_lock;
struct vfio_device_info dev_info;
int nr_ports;
};
static struct mtty_type {
struct mdev_type type;
int nr_ports;
} mtty_types[2] = {
{ .nr_ports = 1, .type.sysfs_name = "1",
.type.pretty_name = "Single port serial" },
{ .nr_ports = 2, .type.sysfs_name = "2",
.type.pretty_name = "Dual port serial" },
};
static struct mdev_type *mtty_mdev_types[] = {
&mtty_types[0].type,
&mtty_types[1].type,
};
static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
static const struct file_operations vd_fops = {
.owner = THIS_MODULE,
};
static const struct vfio_device_ops mtty_dev_ops;
/* function prototypes */
static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
/* Helper functions */
static void dump_buffer(u8 *buf, uint32_t count)
{
#if defined(DEBUG)
int i;
pr_info("Buffer:\n");
for (i = 0; i < count; i++) {
pr_info("%2x ", *(buf + i));
if ((i + 1) % 16 == 0)
pr_info("\n");
}
#endif
}
static void mtty_create_config_space(struct mdev_state *mdev_state)
{
/* PCI dev ID */
STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
/* Control: I/O+, Mem-, BusMaster- */
STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
/* Status: capabilities list absent */
STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
/* Rev ID */
mdev_state->vconfig[0x8] = 0x10;
/* programming interface class : 16550-compatible serial controller */
mdev_state->vconfig[0x9] = 0x02;
/* Sub class : 00 */
mdev_state->vconfig[0xa] = 0x00;
/* Base class : Simple Communication controllers */
mdev_state->vconfig[0xb] = 0x07;
/* base address registers */
/* BAR0: IO space */
STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
if (mdev_state->nr_ports == 2) {
/* BAR1: IO space */
STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
}
/* Subsystem ID */
STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
/* Vendor specific data */
mdev_state->vconfig[0x40] = 0x23;
mdev_state->vconfig[0x43] = 0x80;
mdev_state->vconfig[0x44] = 0x23;
mdev_state->vconfig[0x48] = 0x23;
mdev_state->vconfig[0x4c] = 0x23;
mdev_state->vconfig[0x60] = 0x50;
mdev_state->vconfig[0x61] = 0x43;
mdev_state->vconfig[0x62] = 0x49;
mdev_state->vconfig[0x63] = 0x20;
mdev_state->vconfig[0x64] = 0x53;
mdev_state->vconfig[0x65] = 0x65;
mdev_state->vconfig[0x66] = 0x72;
mdev_state->vconfig[0x67] = 0x69;
mdev_state->vconfig[0x68] = 0x61;
mdev_state->vconfig[0x69] = 0x6c;
mdev_state->vconfig[0x6a] = 0x2f;
mdev_state->vconfig[0x6b] = 0x55;
mdev_state->vconfig[0x6c] = 0x41;
mdev_state->vconfig[0x6d] = 0x52;
mdev_state->vconfig[0x6e] = 0x54;
}
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
u8 *buf, u32 count)
{
u32 cfg_addr, bar_mask, bar_index = 0;
switch (offset) {
case 0x04: /* device control */
case 0x06: /* device status */
/* do nothing */
break;
case 0x3c: /* interrupt line */
mdev_state->vconfig[0x3c] = buf[0];
break;
case 0x3d:
/*
* Interrupt Pin is hardwired to INTA.
* This field is write protected by hardware
*/
break;
case 0x10: /* BAR0 */
case 0x14: /* BAR1 */
if (offset == 0x10)
bar_index = 0;
else if (offset == 0x14)
bar_index = 1;
if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
STORE_LE32(&mdev_state->vconfig[offset], 0);
break;
}
cfg_addr = *(u32 *)buf;
pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
if (cfg_addr == 0xffffffff) {
bar_mask = mdev_state->bar_mask[bar_index];
cfg_addr = (cfg_addr & bar_mask);
}
cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
break;
case 0x18: /* BAR2 */
case 0x1c: /* BAR3 */
case 0x20: /* BAR4 */
STORE_LE32(&mdev_state->vconfig[offset], 0);
break;
default:
pr_info("PCI config write @0x%x of %d bytes not handled\n",
offset, count);
break;
}
}
static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
u16 offset, u8 *buf, u32 count)
{
u8 data = *buf;
/* Handle data written by guest */
switch (offset) {
case UART_TX:
/* if DLAB set, data is LSB of divisor */
if (mdev_state->s[index].dlab) {
mdev_state->s[index].divisor |= data;
break;
}
mutex_lock(&mdev_state->rxtx_lock);
/* save in TX buffer */
if (mdev_state->s[index].rxtx.count <
mdev_state->s[index].max_fifo_size) {
mdev_state->s[index].rxtx.fifo[
mdev_state->s[index].rxtx.head] = data;
mdev_state->s[index].rxtx.count++;
CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
mdev_state->s[index].overrun = false;
/*
* Trigger interrupt if receive data interrupt is
* enabled and fifo reached trigger level
*/
if ((mdev_state->s[index].uart_reg[UART_IER] &
UART_IER_RDI) &&
(mdev_state->s[index].rxtx.count ==
mdev_state->s[index].intr_trigger_level)) {
/* trigger interrupt */
#if defined(DEBUG_INTR)
pr_err("Serial port %d: Fifo level trigger\n",
index);
#endif
mtty_trigger_interrupt(mdev_state);
}
} else {
#if defined(DEBUG_INTR)
pr_err("Serial port %d: Buffer Overflow\n", index);
#endif
mdev_state->s[index].overrun = true;
/*
* Trigger interrupt if receiver line status interrupt
* is enabled
*/
if (mdev_state->s[index].uart_reg[UART_IER] &
UART_IER_RLSI)
mtty_trigger_interrupt(mdev_state);
}
mutex_unlock(&mdev_state->rxtx_lock);
break;
case UART_IER:
/* if DLAB set, data is MSB of divisor */
if (mdev_state->s[index].dlab)
mdev_state->s[index].divisor |= (u16)data << 8;
else {
mdev_state->s[index].uart_reg[offset] = data;
mutex_lock(&mdev_state->rxtx_lock);
if ((data & UART_IER_THRI) &&
(mdev_state->s[index].rxtx.head ==
mdev_state->s[index].rxtx.tail)) {
#if defined(DEBUG_INTR)
pr_err("Serial port %d: IER_THRI write\n",
index);
#endif
mtty_trigger_interrupt(mdev_state);
}
mutex_unlock(&mdev_state->rxtx_lock);
}
break;
case UART_FCR:
mdev_state->s[index].fcr = data;
mutex_lock(&mdev_state->rxtx_lock);
if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
/* clear loop back FIFO */
mdev_state->s[index].rxtx.count = 0;
mdev_state->s[index].rxtx.head = 0;
mdev_state->s[index].rxtx.tail = 0;
}
mutex_unlock(&mdev_state->rxtx_lock);
switch (data & UART_FCR_TRIGGER_MASK) {
case UART_FCR_TRIGGER_1:
mdev_state->s[index].intr_trigger_level = 1;
break;
case UART_FCR_TRIGGER_4:
mdev_state->s[index].intr_trigger_level = 4;
break;
case UART_FCR_TRIGGER_8:
mdev_state->s[index].intr_trigger_level = 8;
break;
case UART_FCR_TRIGGER_14:
mdev_state->s[index].intr_trigger_level = 14;
break;
}
/*
* Set trigger level to 1 otherwise or implement timer with
* timeout of 4 characters and on expiring that timer set
* Recevice data timeout in IIR register
*/
mdev_state->s[index].intr_trigger_level = 1;
if (data & UART_FCR_ENABLE_FIFO)
mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
else {
mdev_state->s[index].max_fifo_size = 1;
mdev_state->s[index].intr_trigger_level = 1;
}
break;
case UART_LCR:
if (data & UART_LCR_DLAB) {
mdev_state->s[index].dlab = true;
mdev_state->s[index].divisor = 0;
} else
mdev_state->s[index].dlab = false;
mdev_state->s[index].uart_reg[offset] = data;
break;
case UART_MCR:
mdev_state->s[index].uart_reg[offset] = data;
if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
(data & UART_MCR_OUT2)) {
#if defined(DEBUG_INTR)
pr_err("Serial port %d: MCR_OUT2 write\n", index);
#endif
mtty_trigger_interrupt(mdev_state);
}
if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
(data & (UART_MCR_RTS | UART_MCR_DTR))) {
#if defined(DEBUG_INTR)
pr_err("Serial port %d: MCR RTS/DTR write\n", index);
#endif
mtty_trigger_interrupt(mdev_state);
}
break;
case UART_LSR:
case UART_MSR:
/* do nothing */
break;
case UART_SCR:
mdev_state->s[index].uart_reg[offset] = data;
break;
default:
break;
}
}
static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
u16 offset, u8 *buf, u32 count)
{
/* Handle read requests by guest */
switch (offset) {
case UART_RX:
/* if DLAB set, data is LSB of divisor */
if (mdev_state->s[index].dlab) {
*buf = (u8)mdev_state->s[index].divisor;
break;
}
mutex_lock(&mdev_state->rxtx_lock);
/* return data in tx buffer */
if (mdev_state->s[index].rxtx.head !=
mdev_state->s[index].rxtx.tail) {
*buf = mdev_state->s[index].rxtx.fifo[
mdev_state->s[index].rxtx.tail];
mdev_state->s[index].rxtx.count--;
CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
}
if (mdev_state->s[index].rxtx.head ==
mdev_state->s[index].rxtx.tail) {
/*
* Trigger interrupt if tx buffer empty interrupt is
* enabled and fifo is empty
*/
#if defined(DEBUG_INTR)
pr_err("Serial port %d: Buffer Empty\n", index);
#endif
if (mdev_state->s[index].uart_reg[UART_IER] &
UART_IER_THRI)
mtty_trigger_interrupt(mdev_state);
}
mutex_unlock(&mdev_state->rxtx_lock);
break;
case UART_IER:
if (mdev_state->s[index].dlab) {
*buf = (u8)(mdev_state->s[index].divisor >> 8);
break;
}
*buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
break;
case UART_IIR:
{
u8 ier = mdev_state->s[index].uart_reg[UART_IER];
*buf = 0;
mutex_lock(&mdev_state->rxtx_lock);
/* Interrupt priority 1: Parity, overrun, framing or break */
if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
*buf |= UART_IIR_RLSI;
/* Interrupt priority 2: Fifo trigger level reached */
if ((ier & UART_IER_RDI) &&
(mdev_state->s[index].rxtx.count >=
mdev_state->s[index].intr_trigger_level))
*buf |= UART_IIR_RDI;
/* Interrupt priotiry 3: transmitter holding register empty */
if ((ier & UART_IER_THRI) &&
(mdev_state->s[index].rxtx.head ==
mdev_state->s[index].rxtx.tail))
*buf |= UART_IIR_THRI;
/* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
if ((ier & UART_IER_MSI) &&
(mdev_state->s[index].uart_reg[UART_MCR] &
(UART_MCR_RTS | UART_MCR_DTR)))
*buf |= UART_IIR_MSI;
/* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
if (*buf == 0)
*buf = UART_IIR_NO_INT;
/* set bit 6 & 7 to be 16550 compatible */
*buf |= 0xC0;
mutex_unlock(&mdev_state->rxtx_lock);
}
break;
case UART_LCR:
case UART_MCR:
*buf = mdev_state->s[index].uart_reg[offset];
break;
case UART_LSR:
{
u8 lsr = 0;
mutex_lock(&mdev_state->rxtx_lock);
/* atleast one char in FIFO */
if (mdev_state->s[index].rxtx.head !=
mdev_state->s[index].rxtx.tail)
lsr |= UART_LSR_DR;
/* if FIFO overrun */
if (mdev_state->s[index].overrun)
lsr |= UART_LSR_OE;
/* transmit FIFO empty and tramsitter empty */
if (mdev_state->s[index].rxtx.head ==
mdev_state->s[index].rxtx.tail)
lsr |= UART_LSR_TEMT | UART_LSR_THRE;
mutex_unlock(&mdev_state->rxtx_lock);
*buf = lsr;
break;
}
case UART_MSR:
*buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
mutex_lock(&mdev_state->rxtx_lock);
/* if AFE is 1 and FIFO have space, set CTS bit */
if (mdev_state->s[index].uart_reg[UART_MCR] &
UART_MCR_AFE) {
if (mdev_state->s[index].rxtx.count <
mdev_state->s[index].max_fifo_size)
*buf |= UART_MSR_CTS | UART_MSR_DCTS;
} else
*buf |= UART_MSR_CTS | UART_MSR_DCTS;
mutex_unlock(&mdev_state->rxtx_lock);
break;
case UART_SCR:
*buf = mdev_state->s[index].uart_reg[offset];
break;
default:
break;
}
}
static void mdev_read_base(struct mdev_state *mdev_state)
{
int index, pos;
u32 start_lo, start_hi;
u32 mem_type;
pos = PCI_BASE_ADDRESS_0;
for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
if (!mdev_state->region_info[index].size)
continue;
start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
PCI_BASE_ADDRESS_MEM_MASK;
mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_64:
start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
pos += 4;
break;
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
/* 1M mem BAR treated as 32-bit BAR */
default:
/* mem unknown type treated as 32-bit BAR */
start_hi = 0;
break;
}
pos += 4;
mdev_state->region_info[index].start = ((u64)start_hi << 32) |
start_lo;
}
}
static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
loff_t pos, bool is_write)
{
unsigned int index;
loff_t offset;
int ret = 0;
if (!buf)
return -EINVAL;
mutex_lock(&mdev_state->ops_lock);
index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
#if defined(DEBUG)
pr_info("%s: PCI config space %s at offset 0x%llx\n",
__func__, is_write ? "write" : "read", offset);
#endif
if (is_write) {
dump_buffer(buf, count);
handle_pci_cfg_write(mdev_state, offset, buf, count);
} else {
memcpy(buf, (mdev_state->vconfig + offset), count);
dump_buffer(buf, count);
}
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
if (!mdev_state->region_info[index].start)
mdev_read_base(mdev_state);
if (is_write) {
dump_buffer(buf, count);
#if defined(DEBUG_REGS)
pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
__func__, index, offset, wr_reg[offset],
*buf, mdev_state->s[index].dlab);
#endif
handle_bar_write(index, mdev_state, offset, buf, count);
} else {
handle_bar_read(index, mdev_state, offset, buf, count);
dump_buffer(buf, count);
#if defined(DEBUG_REGS)
pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
__func__, index, offset, rd_reg[offset],
*buf, mdev_state->s[index].dlab);
#endif
}
break;
default:
ret = -1;
goto accessfailed;
}
ret = count;
accessfailed:
mutex_unlock(&mdev_state->ops_lock);
return ret;
}
static int mtty_init_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
struct mdev_device *mdev = to_mdev_device(vdev->dev);
struct mtty_type *type =
container_of(mdev->type, struct mtty_type, type);
int avail_ports = atomic_read(&mdev_avail_ports);
int ret;
do {
if (avail_ports < type->nr_ports)
return -ENOSPC;
} while (!atomic_try_cmpxchg(&mdev_avail_ports,
&avail_ports,
avail_ports - type->nr_ports));
mdev_state->nr_ports = type->nr_ports;
mdev_state->irq_index = -1;
mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
mutex_init(&mdev_state->rxtx_lock);
mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (!mdev_state->vconfig) {
ret = -ENOMEM;
goto err_nr_ports;
}
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
mtty_create_config_space(mdev_state);
return 0;
err_nr_ports:
atomic_add(type->nr_ports, &mdev_avail_ports);
return ret;
}
static int mtty_probe(struct mdev_device *mdev)
{
struct mdev_state *mdev_state;
int ret;
mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
&mtty_dev_ops);
if (IS_ERR(mdev_state))
return PTR_ERR(mdev_state);
ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret)
goto err_put_vdev;
dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
err_put_vdev:
vfio_put_device(&mdev_state->vdev);
return ret;
}
static void mtty_release_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
kfree(mdev_state->vconfig);
}
static void mtty_remove(struct mdev_device *mdev)
{
struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&mdev_state->vdev);
vfio_put_device(&mdev_state->vdev);
}
static int mtty_reset(struct mdev_state *mdev_state)
{
pr_info("%s: called\n", __func__);
return 0;
}
static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 2;
} else {
u8 val;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
read_err:
return -EFAULT;
}
static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 2;
} else {
u8 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
write_err:
return -EFAULT;
}
static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
unsigned int index, unsigned int start,
unsigned int count, void *data)
{
int ret = 0;
mutex_lock(&mdev_state->ops_lock);
switch (index) {
case VFIO_PCI_INTX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
case VFIO_IRQ_SET_ACTION_UNMASK:
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
{
if (flags & VFIO_IRQ_SET_DATA_NONE) {
pr_info("%s: disable INTx\n", __func__);
if (mdev_state->intx_evtfd)
eventfd_ctx_put(mdev_state->intx_evtfd);
break;
}
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int fd = *(int *)data;
if (fd > 0) {
struct eventfd_ctx *evt;
evt = eventfd_ctx_fdget(fd);
if (IS_ERR(evt)) {
ret = PTR_ERR(evt);
break;
}
mdev_state->intx_evtfd = evt;
mdev_state->irq_fd = fd;
mdev_state->irq_index = index;
break;
}
}
break;
}
}
break;
case VFIO_PCI_MSI_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
case VFIO_IRQ_SET_ACTION_UNMASK:
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
if (flags & VFIO_IRQ_SET_DATA_NONE) {
if (mdev_state->msi_evtfd)
eventfd_ctx_put(mdev_state->msi_evtfd);
pr_info("%s: disable MSI\n", __func__);
mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
break;
}
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int fd = *(int *)data;
struct eventfd_ctx *evt;
if (fd <= 0)
break;
if (mdev_state->msi_evtfd)
break;
evt = eventfd_ctx_fdget(fd);
if (IS_ERR(evt)) {
ret = PTR_ERR(evt);
break;
}
mdev_state->msi_evtfd = evt;
mdev_state->irq_fd = fd;
mdev_state->irq_index = index;
}
break;
}
break;
case VFIO_PCI_MSIX_IRQ_INDEX:
pr_info("%s: MSIX_IRQ\n", __func__);
break;
case VFIO_PCI_ERR_IRQ_INDEX:
pr_info("%s: ERR_IRQ\n", __func__);
break;
case VFIO_PCI_REQ_IRQ_INDEX:
pr_info("%s: REQ_IRQ\n", __func__);
break;
}
mutex_unlock(&mdev_state->ops_lock);
return ret;
}
static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
{
int ret = -1;
if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
(!mdev_state->msi_evtfd))
return -EINVAL;
else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
(!mdev_state->intx_evtfd)) {
pr_info("%s: Intr eventfd not found\n", __func__);
return -EINVAL;
}
if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
ret = eventfd_signal(mdev_state->msi_evtfd, 1);
else
ret = eventfd_signal(mdev_state->intx_evtfd, 1);
#if defined(DEBUG_INTR)
pr_info("Intx triggered\n");
#endif
if (ret != 1)
pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
return ret;
}
static int mtty_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info *region_info,
u16 *cap_type_id, void **cap_type)
{
unsigned int size = 0;
u32 bar_index;
bar_index = region_info->index;
if (bar_index >= VFIO_PCI_NUM_REGIONS)
return -EINVAL;
mutex_lock(&mdev_state->ops_lock);
switch (bar_index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
size = MTTY_CONFIG_SPACE_SIZE;
break;
case VFIO_PCI_BAR0_REGION_INDEX:
size = MTTY_IO_BAR_SIZE;
break;
case VFIO_PCI_BAR1_REGION_INDEX:
if (mdev_state->nr_ports == 2)
size = MTTY_IO_BAR_SIZE;
break;
default:
size = 0;
break;
}
mdev_state->region_info[bar_index].size = size;
mdev_state->region_info[bar_index].vfio_offset =
MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
region_info->size = size;
region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
region_info->flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
mutex_unlock(&mdev_state->ops_lock);
return 0;
}
static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
{
switch (irq_info->index) {
case VFIO_PCI_INTX_IRQ_INDEX:
case VFIO_PCI_MSI_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX:
break;
default:
return -EINVAL;
}
irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
irq_info->count = 1;
if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
VFIO_IRQ_INFO_AUTOMASKED);
else
irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
return 0;
}
static int mtty_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
return 0;
}
static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
int ret = 0;
unsigned long minsz;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mtty_get_device_info(&info);
if (ret)
return ret;
memcpy(&mdev_state->dev_info, &info, sizeof(info));
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
u16 cap_type_id = 0;
void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
&cap_type);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if ((info.argsz < minsz) ||
(info.index >= mdev_state->dev_info.num_irqs))
return -EINVAL;
ret = mtty_get_irq_info(&info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_SET_IRQS:
{
struct vfio_irq_set hdr;
u8 *data = NULL, *ptr = NULL;
size_t data_size = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(&hdr,
mdev_state->dev_info.num_irqs,
VFIO_PCI_NUM_IRQS,
&data_size);
if (ret)
return ret;
if (data_size) {
ptr = data = memdup_user((void __user *)(arg + minsz),
data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
hdr.count, data);
kfree(ptr);
return ret;
}
case VFIO_DEVICE_RESET:
return mtty_reset(mdev_state);
}
return -ENOTTY;
}
static ssize_t
sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
}
static DEVICE_ATTR_RO(sample_mdev_dev);
static struct attribute *mdev_dev_attrs[] = {
&dev_attr_sample_mdev_dev.attr,
NULL,
};
static const struct attribute_group mdev_dev_group = {
.name = "vendor",
.attrs = mdev_dev_attrs,
};
static const struct attribute_group *mdev_dev_groups[] = {
&mdev_dev_group,
NULL,
};
static unsigned int mtty_get_available(struct mdev_type *mtype)
{
struct mtty_type *type = container_of(mtype, struct mtty_type, type);
return atomic_read(&mdev_avail_ports) / type->nr_ports;
}
static const struct vfio_device_ops mtty_dev_ops = {
.name = "vfio-mtty",
.init = mtty_init_dev,
.release = mtty_release_dev,
.read = mtty_read,
.write = mtty_write,
.ioctl = mtty_ioctl,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
static struct mdev_driver mtty_driver = {
.device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "mtty",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
.dev_groups = mdev_dev_groups,
},
.probe = mtty_probe,
.remove = mtty_remove,
.get_available = mtty_get_available,
};
static void mtty_device_release(struct device *dev)
{
dev_dbg(dev, "mtty: released\n");
}
static int __init mtty_dev_init(void)
{
int ret = 0;
pr_info("mtty_dev: %s\n", __func__);
memset(&mtty_dev, 0, sizeof(mtty_dev));
idr_init(&mtty_dev.vd_idr);
ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
MTTY_NAME);
if (ret < 0) {
pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
return ret;
}
cdev_init(&mtty_dev.vd_cdev, &vd_fops);
cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
ret = mdev_register_driver(&mtty_driver);
if (ret)
goto err_cdev;
mtty_dev.vd_class = class_create(MTTY_CLASS_NAME);
if (IS_ERR(mtty_dev.vd_class)) {
pr_err("Error: failed to register mtty_dev class\n");
ret = PTR_ERR(mtty_dev.vd_class);
goto err_driver;
}
mtty_dev.dev.class = mtty_dev.vd_class;
mtty_dev.dev.release = mtty_device_release;
dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
ret = device_register(&mtty_dev.dev);
if (ret)
goto err_put;
ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev,
&mtty_driver, mtty_mdev_types,
ARRAY_SIZE(mtty_mdev_types));
if (ret)
goto err_device;
return 0;
err_device:
device_del(&mtty_dev.dev);
err_put:
put_device(&mtty_dev.dev);
class_destroy(mtty_dev.vd_class);
err_driver:
mdev_unregister_driver(&mtty_driver);
err_cdev:
cdev_del(&mtty_dev.vd_cdev);
unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
return ret;
}
static void __exit mtty_dev_exit(void)
{
mtty_dev.dev.bus = NULL;
mdev_unregister_parent(&mtty_dev.parent);
device_unregister(&mtty_dev.dev);
idr_destroy(&mtty_dev.vd_idr);
mdev_unregister_driver(&mtty_driver);
cdev_del(&mtty_dev.vd_cdev);
unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
class_destroy(mtty_dev.vd_class);
mtty_dev.vd_class = NULL;
pr_info("mtty_dev: Unloaded!\n");
}
module_init(mtty_dev_init)
module_exit(mtty_dev_exit)
MODULE_LICENSE("GPL v2");
MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
MODULE_VERSION(VERSION_STRING);
MODULE_AUTHOR(DRIVER_AUTHOR);
| linux-master | samples/vfio-mdev/mtty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mediated virtual PCI display host device driver
*
* See mdpy-defs.h for device specs
*
* (c) Gerd Hoffmann <[email protected]>
*
* based on mtty driver which is:
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/cdev.h>
#include <linux/vfio.h>
#include <linux/iommu.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
#include <linux/pci.h>
#include <drm/drm_fourcc.h>
#include "mdpy-defs.h"
#define MDPY_NAME "mdpy"
#define MDPY_CLASS_NAME "mdpy"
#define MDPY_CONFIG_SPACE_SIZE 0xff
#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
#define MDPY_DISPLAY_REGION 16
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
MODULE_LICENSE("GPL v2");
#define MDPY_TYPE_1 "vga"
#define MDPY_TYPE_2 "xga"
#define MDPY_TYPE_3 "hd"
static struct mdpy_type {
struct mdev_type type;
u32 format;
u32 bytepp;
u32 width;
u32 height;
} mdpy_types[] = {
{
.type.sysfs_name = MDPY_TYPE_1,
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 640,
.height = 480,
}, {
.type.sysfs_name = MDPY_TYPE_2,
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 1024,
.height = 768,
}, {
.type.sysfs_name = MDPY_TYPE_3,
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 1920,
.height = 1080,
},
};
static struct mdev_type *mdpy_mdev_types[] = {
&mdpy_types[0].type,
&mdpy_types[1].type,
&mdpy_types[2].type,
};
static dev_t mdpy_devt;
static struct class *mdpy_class;
static struct cdev mdpy_cdev;
static struct device mdpy_dev;
static struct mdev_parent mdpy_parent;
static const struct vfio_device_ops mdpy_dev_ops;
/* State of each mdev device */
struct mdev_state {
struct vfio_device vdev;
u8 *vconfig;
u32 bar_mask;
struct mutex ops_lock;
struct mdev_device *mdev;
struct vfio_device_info dev_info;
const struct mdpy_type *type;
u32 memsize;
void *memblk;
};
static void mdpy_create_config_space(struct mdev_state *mdev_state)
{
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
MDPY_PCI_VENDOR_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
MDPY_PCI_DEVICE_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
MDPY_PCI_SUBVENDOR_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
MDPY_PCI_SUBDEVICE_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
PCI_STATUS_CAP_LIST);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
PCI_CLASS_DISPLAY_OTHER);
mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_32 |
PCI_BASE_ADDRESS_MEM_PREFETCH);
mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
/* vendor specific capability for the config registers */
mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
mdev_state->type->format);
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
mdev_state->type->width);
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
mdev_state->type->height);
}
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
u32 cfg_addr;
switch (offset) {
case PCI_BASE_ADDRESS_0:
cfg_addr = *(u32 *)buf;
if (cfg_addr == 0xffffffff) {
cfg_addr = (cfg_addr & mdev_state->bar_mask);
} else {
cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
if (cfg_addr)
dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
}
cfg_addr |= (mdev_state->vconfig[offset] &
~PCI_BASE_ADDRESS_MEM_MASK);
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
break;
}
}
static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
size_t count, loff_t pos, bool is_write)
{
int ret = 0;
mutex_lock(&mdev_state->ops_lock);
if (pos < MDPY_CONFIG_SPACE_SIZE) {
if (is_write)
handle_pci_cfg_write(mdev_state, pos, buf, count);
else
memcpy(buf, (mdev_state->vconfig + pos), count);
} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
(pos + count <=
MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
pos -= MDPY_MEMORY_BAR_OFFSET;
if (is_write)
memcpy(mdev_state->memblk, buf, count);
else
memcpy(buf, mdev_state->memblk, count);
} else {
dev_info(mdev_state->vdev.dev,
"%s: %s @0x%llx (unhandled)\n", __func__,
is_write ? "WR" : "RD", pos);
ret = -1;
goto accessfailed;
}
ret = count;
accessfailed:
mutex_unlock(&mdev_state->ops_lock);
return ret;
}
static int mdpy_reset(struct mdev_state *mdev_state)
{
u32 stride, i;
/* initialize with gray gradient */
stride = mdev_state->type->width * mdev_state->type->bytepp;
for (i = 0; i < mdev_state->type->height; i++)
memset(mdev_state->memblk + i * stride,
i * 255 / mdev_state->type->height,
stride);
return 0;
}
static int mdpy_init_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
struct mdev_device *mdev = to_mdev_device(vdev->dev);
const struct mdpy_type *type =
container_of(mdev->type, struct mdpy_type, type);
u32 fbsize;
int ret = -ENOMEM;
mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (!mdev_state->vconfig)
return ret;
fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
mdev_state->memblk = vmalloc_user(fbsize);
if (!mdev_state->memblk)
goto out_vconfig;
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
mdev_state->type = type;
mdev_state->memsize = fbsize;
mdpy_create_config_space(mdev_state);
mdpy_reset(mdev_state);
dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
type->width, type->height);
return 0;
out_vconfig:
kfree(mdev_state->vconfig);
return ret;
}
static int mdpy_probe(struct mdev_device *mdev)
{
struct mdev_state *mdev_state;
int ret;
mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
&mdpy_dev_ops);
if (IS_ERR(mdev_state))
return PTR_ERR(mdev_state);
ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret)
goto err_put_vdev;
dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
err_put_vdev:
vfio_put_device(&mdev_state->vdev);
return ret;
}
static void mdpy_release_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
vfree(mdev_state->memblk);
kfree(mdev_state->vconfig);
}
static void mdpy_remove(struct mdev_device *mdev)
{
struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
dev_info(&mdev->dev, "%s\n", __func__);
vfio_unregister_group_dev(&mdev_state->vdev);
vfio_put_device(&mdev_state->vdev);
}
static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 2;
} else {
u8 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
read_err:
return -EFAULT;
}
static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 2;
} else {
u8 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
write_err:
return -EFAULT;
}
static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if (vma->vm_end - vma->vm_start > mdev_state->memsize)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
return remap_vmalloc_range(vma, mdev_state->memblk, 0);
}
static int mdpy_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info *region_info,
u16 *cap_type_id, void **cap_type)
{
if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
region_info->index != MDPY_DISPLAY_REGION)
return -EINVAL;
switch (region_info->index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
region_info->offset = 0;
region_info->size = MDPY_CONFIG_SPACE_SIZE;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
case MDPY_DISPLAY_REGION:
region_info->offset = MDPY_MEMORY_BAR_OFFSET;
region_info->size = mdev_state->memsize;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP);
break;
default:
region_info->size = 0;
region_info->offset = 0;
region_info->flags = 0;
}
return 0;
}
static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
{
irq_info->count = 0;
return 0;
}
static int mdpy_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
return 0;
}
static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
struct vfio_device_gfx_plane_info *plane)
{
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
VFIO_GFX_PLANE_TYPE_REGION))
return 0;
return -EINVAL;
}
if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
return -EINVAL;
plane->drm_format = mdev_state->type->format;
plane->width = mdev_state->type->width;
plane->height = mdev_state->type->height;
plane->stride = (mdev_state->type->width *
mdev_state->type->bytepp);
plane->size = mdev_state->memsize;
plane->region_index = MDPY_DISPLAY_REGION;
/* unused */
plane->drm_format_mod = 0;
plane->x_pos = 0;
plane->y_pos = 0;
plane->x_hot = 0;
plane->y_hot = 0;
return 0;
}
static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
unsigned long minsz;
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mdpy_get_device_info(&info);
if (ret)
return ret;
memcpy(&mdev_state->dev_info, &info, sizeof(info));
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
u16 cap_type_id = 0;
void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
&cap_type);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if ((info.argsz < minsz) ||
(info.index >= mdev_state->dev_info.num_irqs))
return -EINVAL;
ret = mdpy_get_irq_info(&info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_QUERY_GFX_PLANE:
{
struct vfio_device_gfx_plane_info plane;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
region_index);
if (copy_from_user(&plane, (void __user *)arg, minsz))
return -EFAULT;
if (plane.argsz < minsz)
return -EINVAL;
ret = mdpy_query_gfx_plane(mdev_state, &plane);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &plane, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_SET_IRQS:
return -EINVAL;
case VFIO_DEVICE_RESET:
return mdpy_reset(mdev_state);
}
return -ENOTTY;
}
static ssize_t
resolution_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mdev_state *mdev_state = dev_get_drvdata(dev);
return sprintf(buf, "%dx%d\n",
mdev_state->type->width,
mdev_state->type->height);
}
static DEVICE_ATTR_RO(resolution);
static struct attribute *mdev_dev_attrs[] = {
&dev_attr_resolution.attr,
NULL,
};
static const struct attribute_group mdev_dev_group = {
.name = "vendor",
.attrs = mdev_dev_attrs,
};
static const struct attribute_group *mdev_dev_groups[] = {
&mdev_dev_group,
NULL,
};
static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
{
struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
return sprintf(buf, "virtual display, %dx%d framebuffer\n",
type->width, type->height);
}
static const struct vfio_device_ops mdpy_dev_ops = {
.init = mdpy_init_dev,
.release = mdpy_release_dev,
.read = mdpy_read,
.write = mdpy_write,
.ioctl = mdpy_ioctl,
.mmap = mdpy_mmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
static struct mdev_driver mdpy_driver = {
.device_api = VFIO_DEVICE_API_PCI_STRING,
.max_instances = 4,
.driver = {
.name = "mdpy",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
.dev_groups = mdev_dev_groups,
},
.probe = mdpy_probe,
.remove = mdpy_remove,
.show_description = mdpy_show_description,
};
static const struct file_operations vd_fops = {
.owner = THIS_MODULE,
};
static void mdpy_device_release(struct device *dev)
{
/* nothing */
}
static int __init mdpy_dev_init(void)
{
int ret = 0;
ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
if (ret < 0) {
pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
return ret;
}
cdev_init(&mdpy_cdev, &vd_fops);
cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
ret = mdev_register_driver(&mdpy_driver);
if (ret)
goto err_cdev;
mdpy_class = class_create(MDPY_CLASS_NAME);
if (IS_ERR(mdpy_class)) {
pr_err("Error: failed to register mdpy_dev class\n");
ret = PTR_ERR(mdpy_class);
goto err_driver;
}
mdpy_dev.class = mdpy_class;
mdpy_dev.release = mdpy_device_release;
dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
ret = device_register(&mdpy_dev);
if (ret)
goto err_put;
ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
mdpy_mdev_types,
ARRAY_SIZE(mdpy_mdev_types));
if (ret)
goto err_device;
return 0;
err_device:
device_del(&mdpy_dev);
err_put:
put_device(&mdpy_dev);
class_destroy(mdpy_class);
err_driver:
mdev_unregister_driver(&mdpy_driver);
err_cdev:
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
return ret;
}
static void __exit mdpy_dev_exit(void)
{
mdpy_dev.bus = NULL;
mdev_unregister_parent(&mdpy_parent);
device_unregister(&mdpy_dev);
mdev_unregister_driver(&mdpy_driver);
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
class_destroy(mdpy_class);
mdpy_class = NULL;
}
module_param_named(count, mdpy_driver.max_instances, int, 0444);
MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
module_init(mdpy_dev_init)
module_exit(mdpy_dev_exit)
| linux-master | samples/vfio-mdev/mdpy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mediated virtual PCI display host device driver
*
* Emulate enough of qemu stdvga to make bochs-drm.ko happy. That is
* basically the vram memory bar and the bochs dispi interface vbe
* registers in the mmio register bar. Specifically it does *not*
* include any legacy vga stuff. Device looks a lot like "qemu -device
* secondary-vga".
*
* (c) Gerd Hoffmann <[email protected]>
*
* based on mtty driver which is:
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/cdev.h>
#include <linux/vfio.h>
#include <linux/iommu.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_property.h>
#include <drm/drm_plane.h>
#define VBE_DISPI_INDEX_ID 0x0
#define VBE_DISPI_INDEX_XRES 0x1
#define VBE_DISPI_INDEX_YRES 0x2
#define VBE_DISPI_INDEX_BPP 0x3
#define VBE_DISPI_INDEX_ENABLE 0x4
#define VBE_DISPI_INDEX_BANK 0x5
#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
#define VBE_DISPI_INDEX_X_OFFSET 0x8
#define VBE_DISPI_INDEX_Y_OFFSET 0x9
#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
#define VBE_DISPI_INDEX_COUNT 0xb
#define VBE_DISPI_ID0 0xB0C0
#define VBE_DISPI_ID1 0xB0C1
#define VBE_DISPI_ID2 0xB0C2
#define VBE_DISPI_ID3 0xB0C3
#define VBE_DISPI_ID4 0xB0C4
#define VBE_DISPI_ID5 0xB0C5
#define VBE_DISPI_DISABLED 0x00
#define VBE_DISPI_ENABLED 0x01
#define VBE_DISPI_GETCAPS 0x02
#define VBE_DISPI_8BIT_DAC 0x20
#define VBE_DISPI_LFB_ENABLED 0x40
#define VBE_DISPI_NOCLEARMEM 0x80
#define MBOCHS_NAME "mbochs"
#define MBOCHS_CLASS_NAME "mbochs"
#define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS
#define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1)
#define MBOCHS_CONFIG_SPACE_SIZE 0xff
#define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
#define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
#define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
MBOCHS_MMIO_BAR_SIZE)
#define MBOCHS_EDID_SIZE PAGE_SIZE
#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \
MBOCHS_EDID_SIZE)
#define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2)
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
MODULE_LICENSE("GPL v2");
static int max_mbytes = 256;
module_param_named(count, max_mbytes, int, 0444);
MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices");
#define MBOCHS_TYPE_1 "small"
#define MBOCHS_TYPE_2 "medium"
#define MBOCHS_TYPE_3 "large"
static struct mbochs_type {
struct mdev_type type;
u32 mbytes;
u32 max_x;
u32 max_y;
} mbochs_types[] = {
{
.type.sysfs_name = MBOCHS_TYPE_1,
.type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1,
.mbytes = 4,
.max_x = 800,
.max_y = 600,
}, {
.type.sysfs_name = MBOCHS_TYPE_2,
.type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2,
.mbytes = 16,
.max_x = 1920,
.max_y = 1440,
}, {
.type.sysfs_name = MBOCHS_TYPE_3,
.type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3,
.mbytes = 64,
.max_x = 0,
.max_y = 0,
},
};
static struct mdev_type *mbochs_mdev_types[] = {
&mbochs_types[0].type,
&mbochs_types[1].type,
&mbochs_types[2].type,
};
static dev_t mbochs_devt;
static struct class *mbochs_class;
static struct cdev mbochs_cdev;
static struct device mbochs_dev;
static struct mdev_parent mbochs_parent;
static atomic_t mbochs_avail_mbytes;
static const struct vfio_device_ops mbochs_dev_ops;
struct vfio_region_info_ext {
struct vfio_region_info base;
struct vfio_region_info_cap_type type;
};
struct mbochs_mode {
u32 drm_format;
u32 bytepp;
u32 width;
u32 height;
u32 stride;
u32 __pad;
u64 offset;
u64 size;
};
struct mbochs_dmabuf {
struct mbochs_mode mode;
u32 id;
struct page **pages;
pgoff_t pagecount;
struct dma_buf *buf;
struct mdev_state *mdev_state;
struct list_head next;
bool unlinked;
};
/* State of each mdev device */
struct mdev_state {
struct vfio_device vdev;
u8 *vconfig;
u64 bar_mask[3];
u32 memory_bar_mask;
struct mutex ops_lock;
struct mdev_device *mdev;
const struct mbochs_type *type;
u16 vbe[VBE_DISPI_INDEX_COUNT];
u64 memsize;
struct page **pages;
pgoff_t pagecount;
struct vfio_region_gfx_edid edid_regs;
u8 edid_blob[0x400];
struct list_head dmabufs;
u32 active_id;
u32 next_id;
};
static const char *vbe_name_list[VBE_DISPI_INDEX_COUNT] = {
[VBE_DISPI_INDEX_ID] = "id",
[VBE_DISPI_INDEX_XRES] = "xres",
[VBE_DISPI_INDEX_YRES] = "yres",
[VBE_DISPI_INDEX_BPP] = "bpp",
[VBE_DISPI_INDEX_ENABLE] = "enable",
[VBE_DISPI_INDEX_BANK] = "bank",
[VBE_DISPI_INDEX_VIRT_WIDTH] = "virt-width",
[VBE_DISPI_INDEX_VIRT_HEIGHT] = "virt-height",
[VBE_DISPI_INDEX_X_OFFSET] = "x-offset",
[VBE_DISPI_INDEX_Y_OFFSET] = "y-offset",
[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = "video-mem",
};
static const char *vbe_name(u32 index)
{
if (index < ARRAY_SIZE(vbe_name_list))
return vbe_name_list[index];
return "(invalid)";
}
static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
pgoff_t pgoff);
static struct page *mbochs_get_page(struct mdev_state *mdev_state,
pgoff_t pgoff);
static void mbochs_create_config_space(struct mdev_state *mdev_state)
{
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
0x1234);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
0x1111);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
PCI_SUBVENDOR_ID_REDHAT_QUMRANET);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
PCI_SUBDEVICE_ID_QEMU);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
PCI_CLASS_DISPLAY_OTHER);
mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_32 |
PCI_BASE_ADDRESS_MEM_PREFETCH);
mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1;
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2],
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_32);
mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1;
}
static int mbochs_check_framebuffer(struct mdev_state *mdev_state,
struct mbochs_mode *mode)
{
struct device *dev = mdev_dev(mdev_state->mdev);
u16 *vbe = mdev_state->vbe;
u32 virt_width;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED))
goto nofb;
memset(mode, 0, sizeof(*mode));
switch (vbe[VBE_DISPI_INDEX_BPP]) {
case 32:
mode->drm_format = DRM_FORMAT_XRGB8888;
mode->bytepp = 4;
break;
default:
dev_info_ratelimited(dev, "%s: bpp %d not supported\n",
__func__, vbe[VBE_DISPI_INDEX_BPP]);
goto nofb;
}
mode->width = vbe[VBE_DISPI_INDEX_XRES];
mode->height = vbe[VBE_DISPI_INDEX_YRES];
virt_width = vbe[VBE_DISPI_INDEX_VIRT_WIDTH];
if (virt_width < mode->width)
virt_width = mode->width;
mode->stride = virt_width * mode->bytepp;
mode->size = (u64)mode->stride * mode->height;
mode->offset = ((u64)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp +
(u64)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride);
if (mode->width < 64 || mode->height < 64) {
dev_info_ratelimited(dev, "%s: invalid resolution %dx%d\n",
__func__, mode->width, mode->height);
goto nofb;
}
if (mode->offset + mode->size > mdev_state->memsize) {
dev_info_ratelimited(dev, "%s: framebuffer memory overflow\n",
__func__);
goto nofb;
}
return 0;
nofb:
memset(mode, 0, sizeof(*mode));
return -EINVAL;
}
static bool mbochs_modes_equal(struct mbochs_mode *mode1,
struct mbochs_mode *mode2)
{
return memcmp(mode1, mode2, sizeof(struct mbochs_mode)) == 0;
}
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
int index = (offset - PCI_BASE_ADDRESS_0) / 0x04;
u32 cfg_addr;
switch (offset) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_2:
cfg_addr = *(u32 *)buf;
if (cfg_addr == 0xffffffff) {
cfg_addr = (cfg_addr & mdev_state->bar_mask[index]);
} else {
cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
if (cfg_addr)
dev_info(dev, "BAR #%d @ 0x%x\n",
index, cfg_addr);
}
cfg_addr |= (mdev_state->vconfig[offset] &
~PCI_BASE_ADDRESS_MEM_MASK);
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
break;
}
}
static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
int index;
u16 reg16;
switch (offset) {
case 0x400 ... 0x41f: /* vga ioports remapped */
goto unhandled;
case 0x500 ... 0x515: /* bochs dispi interface */
if (count != 2)
goto unhandled;
index = (offset - 0x500) / 2;
reg16 = *(u16 *)buf;
if (index < ARRAY_SIZE(mdev_state->vbe))
mdev_state->vbe[index] = reg16;
dev_dbg(dev, "%s: vbe write %d = %d (%s)\n",
__func__, index, reg16, vbe_name(index));
break;
case 0x600 ... 0x607: /* qemu extended regs */
goto unhandled;
default:
unhandled:
dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
__func__, offset, count);
break;
}
}
static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
struct vfio_region_gfx_edid *edid;
u16 reg16 = 0;
int index;
switch (offset) {
case 0x000 ... 0x3ff: /* edid block */
edid = &mdev_state->edid_regs;
if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP ||
offset >= edid->edid_size) {
memset(buf, 0, count);
break;
}
memcpy(buf, mdev_state->edid_blob + offset, count);
break;
case 0x500 ... 0x515: /* bochs dispi interface */
if (count != 2)
goto unhandled;
index = (offset - 0x500) / 2;
if (index < ARRAY_SIZE(mdev_state->vbe))
reg16 = mdev_state->vbe[index];
dev_dbg(dev, "%s: vbe read %d = %d (%s)\n",
__func__, index, reg16, vbe_name(index));
*(u16 *)buf = reg16;
break;
default:
unhandled:
dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
__func__, offset, count);
memset(buf, 0, count);
break;
}
}
static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count, bool is_write)
{
char *regs = (void *)&mdev_state->edid_regs;
if (offset + count > sizeof(mdev_state->edid_regs))
return;
if (count != 4)
return;
if (offset % 4)
return;
if (is_write) {
switch (offset) {
case offsetof(struct vfio_region_gfx_edid, link_state):
case offsetof(struct vfio_region_gfx_edid, edid_size):
memcpy(regs + offset, buf, count);
break;
default:
/* read-only regs */
break;
}
} else {
memcpy(buf, regs + offset, count);
}
}
static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count, bool is_write)
{
if (offset + count > mdev_state->edid_regs.edid_max_size)
return;
if (is_write)
memcpy(mdev_state->edid_blob + offset, buf, count);
else
memcpy(buf, mdev_state->edid_blob + offset, count);
}
static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
size_t count, loff_t pos, bool is_write)
{
struct page *pg;
loff_t poff;
char *map;
int ret = 0;
mutex_lock(&mdev_state->ops_lock);
if (pos < MBOCHS_CONFIG_SPACE_SIZE) {
if (is_write)
handle_pci_cfg_write(mdev_state, pos, buf, count);
else
memcpy(buf, (mdev_state->vconfig + pos), count);
} else if (pos >= MBOCHS_MMIO_BAR_OFFSET &&
pos + count <= (MBOCHS_MMIO_BAR_OFFSET +
MBOCHS_MMIO_BAR_SIZE)) {
pos -= MBOCHS_MMIO_BAR_OFFSET;
if (is_write)
handle_mmio_write(mdev_state, pos, buf, count);
else
handle_mmio_read(mdev_state, pos, buf, count);
} else if (pos >= MBOCHS_EDID_OFFSET &&
pos + count <= (MBOCHS_EDID_OFFSET +
MBOCHS_EDID_SIZE)) {
pos -= MBOCHS_EDID_OFFSET;
if (pos < MBOCHS_EDID_BLOB_OFFSET) {
handle_edid_regs(mdev_state, pos, buf, count, is_write);
} else {
pos -= MBOCHS_EDID_BLOB_OFFSET;
handle_edid_blob(mdev_state, pos, buf, count, is_write);
}
} else if (pos >= MBOCHS_MEMORY_BAR_OFFSET &&
pos + count <=
MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
pos -= MBOCHS_MMIO_BAR_OFFSET;
poff = pos & ~PAGE_MASK;
pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
map = kmap(pg);
if (is_write)
memcpy(map + poff, buf, count);
else
memcpy(buf, map + poff, count);
kunmap(pg);
put_page(pg);
} else {
dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n",
__func__, is_write ? "WR" : "RD", pos);
ret = -1;
goto accessfailed;
}
ret = count;
accessfailed:
mutex_unlock(&mdev_state->ops_lock);
return ret;
}
static int mbochs_reset(struct mdev_state *mdev_state)
{
u32 size64k = mdev_state->memsize / (64 * 1024);
int i;
for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++)
mdev_state->vbe[i] = 0;
mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5;
mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k;
return 0;
}
static int mbochs_init_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
struct mdev_device *mdev = to_mdev_device(vdev->dev);
struct mbochs_type *type =
container_of(mdev->type, struct mbochs_type, type);
int avail_mbytes = atomic_read(&mbochs_avail_mbytes);
int ret = -ENOMEM;
do {
if (avail_mbytes < type->mbytes)
return -ENOSPC;
} while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes,
avail_mbytes - type->mbytes));
mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (!mdev_state->vconfig)
goto err_avail;
mdev_state->memsize = type->mbytes * 1024 * 1024;
mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT;
mdev_state->pages = kcalloc(mdev_state->pagecount,
sizeof(struct page *),
GFP_KERNEL);
if (!mdev_state->pages)
goto err_vconfig;
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
INIT_LIST_HEAD(&mdev_state->dmabufs);
mdev_state->next_id = 1;
mdev_state->type = type;
mdev_state->edid_regs.max_xres = type->max_x;
mdev_state->edid_regs.max_yres = type->max_y;
mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
mbochs_create_config_space(mdev_state);
mbochs_reset(mdev_state);
dev_info(vdev->dev, "%s: %s, %d MB, %ld pages\n", __func__,
type->type.pretty_name, type->mbytes, mdev_state->pagecount);
return 0;
err_vconfig:
kfree(mdev_state->vconfig);
err_avail:
atomic_add(type->mbytes, &mbochs_avail_mbytes);
return ret;
}
static int mbochs_probe(struct mdev_device *mdev)
{
struct mdev_state *mdev_state;
int ret = -ENOMEM;
mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
&mbochs_dev_ops);
if (IS_ERR(mdev_state))
return PTR_ERR(mdev_state);
ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret)
goto err_put_vdev;
dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
err_put_vdev:
vfio_put_device(&mdev_state->vdev);
return ret;
}
static void mbochs_release_dev(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes);
kfree(mdev_state->pages);
kfree(mdev_state->vconfig);
}
static void mbochs_remove(struct mdev_device *mdev)
{
struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&mdev_state->vdev);
vfio_put_device(&mdev_state->vdev);
}
static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 2;
} else {
u8 val;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
read_err:
return -EFAULT;
}
static ssize_t mbochs_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 2;
} else {
u8 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
write_err:
return -EFAULT;
}
static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
pgoff_t pgoff)
{
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
if (!mdev_state->pages[pgoff]) {
mdev_state->pages[pgoff] =
alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0);
if (!mdev_state->pages[pgoff])
return NULL;
}
get_page(mdev_state->pages[pgoff]);
return mdev_state->pages[pgoff];
}
static struct page *mbochs_get_page(struct mdev_state *mdev_state,
pgoff_t pgoff)
{
struct page *page;
if (WARN_ON(pgoff >= mdev_state->pagecount))
return NULL;
mutex_lock(&mdev_state->ops_lock);
page = __mbochs_get_page(mdev_state, pgoff);
mutex_unlock(&mdev_state->ops_lock);
return page;
}
static void mbochs_put_pages(struct mdev_state *mdev_state)
{
struct device *dev = mdev_dev(mdev_state->mdev);
int i, count = 0;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
for (i = 0; i < mdev_state->pagecount; i++) {
if (!mdev_state->pages[i])
continue;
put_page(mdev_state->pages[i]);
mdev_state->pages[i] = NULL;
count++;
}
dev_dbg(dev, "%s: %d pages released\n", __func__, count);
}
static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mdev_state *mdev_state = vma->vm_private_data;
pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= mdev_state->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = mbochs_get_page(mdev_state, page_offset);
if (!vmf->page)
return VM_FAULT_SIGBUS;
return 0;
}
static const struct vm_operations_struct mbochs_region_vm_ops = {
.fault = mbochs_region_vm_fault,
};
static int mbochs_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if (vma->vm_end - vma->vm_start > mdev_state->memsize)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
vma->vm_ops = &mbochs_region_vm_ops;
vma->vm_private_data = mdev_state;
return 0;
}
static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
if (WARN_ON(vmf->pgoff >= dmabuf->pagecount))
return VM_FAULT_SIGBUS;
vmf->page = dmabuf->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct mbochs_dmabuf_vm_ops = {
.fault = mbochs_dmabuf_vm_fault,
};
static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct mbochs_dmabuf *dmabuf = buf->priv;
struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
vma->vm_ops = &mbochs_dmabuf_vm_ops;
vma->vm_private_data = dmabuf;
return 0;
}
static void mbochs_print_dmabuf(struct mbochs_dmabuf *dmabuf,
const char *prefix)
{
struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
u32 fourcc = dmabuf->mode.drm_format;
dev_dbg(dev, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n",
prefix, dmabuf->id,
fourcc ? ((fourcc >> 0) & 0xff) : '-',
fourcc ? ((fourcc >> 8) & 0xff) : '-',
fourcc ? ((fourcc >> 16) & 0xff) : '-',
fourcc ? ((fourcc >> 24) & 0xff) : '-',
dmabuf->mode.width, dmabuf->mode.height, dmabuf->mode.stride,
dmabuf->mode.offset, dmabuf->mode.size, dmabuf->pagecount);
}
static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
enum dma_data_direction direction)
{
struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
struct sg_table *sg;
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
goto err1;
if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
0, dmabuf->mode.size, GFP_KERNEL) < 0)
goto err2;
if (dma_map_sgtable(at->dev, sg, direction, 0))
goto err3;
return sg;
err3:
sg_free_table(sg);
err2:
kfree(sg);
err1:
return ERR_PTR(-ENOMEM);
}
static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
dma_unmap_sgtable(at->dev, sg, direction, 0);
sg_free_table(sg);
kfree(sg);
}
static void mbochs_release_dmabuf(struct dma_buf *buf)
{
struct mbochs_dmabuf *dmabuf = buf->priv;
struct mdev_state *mdev_state = dmabuf->mdev_state;
struct device *dev = mdev_dev(mdev_state->mdev);
pgoff_t pg;
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
for (pg = 0; pg < dmabuf->pagecount; pg++)
put_page(dmabuf->pages[pg]);
mutex_lock(&mdev_state->ops_lock);
dmabuf->buf = NULL;
if (dmabuf->unlinked)
kfree(dmabuf);
mutex_unlock(&mdev_state->ops_lock);
}
static struct dma_buf_ops mbochs_dmabuf_ops = {
.map_dma_buf = mbochs_map_dmabuf,
.unmap_dma_buf = mbochs_unmap_dmabuf,
.release = mbochs_release_dmabuf,
.mmap = mbochs_mmap_dmabuf,
};
static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state,
struct mbochs_mode *mode)
{
struct mbochs_dmabuf *dmabuf;
pgoff_t page_offset, pg;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
dmabuf = kzalloc(sizeof(struct mbochs_dmabuf), GFP_KERNEL);
if (!dmabuf)
return NULL;
dmabuf->mode = *mode;
dmabuf->id = mdev_state->next_id++;
dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE);
dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *),
GFP_KERNEL);
if (!dmabuf->pages)
goto err_free_dmabuf;
page_offset = dmabuf->mode.offset >> PAGE_SHIFT;
for (pg = 0; pg < dmabuf->pagecount; pg++) {
dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
page_offset + pg);
if (!dmabuf->pages[pg])
goto err_free_pages;
}
dmabuf->mdev_state = mdev_state;
list_add(&dmabuf->next, &mdev_state->dmabufs);
mbochs_print_dmabuf(dmabuf, __func__);
return dmabuf;
err_free_pages:
while (pg > 0)
put_page(dmabuf->pages[--pg]);
kfree(dmabuf->pages);
err_free_dmabuf:
kfree(dmabuf);
return NULL;
}
static struct mbochs_dmabuf *
mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state,
struct mbochs_mode *mode)
{
struct mbochs_dmabuf *dmabuf;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
if (mbochs_modes_equal(&dmabuf->mode, mode))
return dmabuf;
return NULL;
}
static struct mbochs_dmabuf *
mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
{
struct mbochs_dmabuf *dmabuf;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
if (dmabuf->id == id)
return dmabuf;
return NULL;
}
static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
{
struct mdev_state *mdev_state = dmabuf->mdev_state;
struct device *dev = mdev_state->vdev.dev;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *buf;
WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) {
dev_info_ratelimited(dev, "%s: framebuffer not page-aligned\n",
__func__);
return -EINVAL;
}
exp_info.ops = &mbochs_dmabuf_ops;
exp_info.size = dmabuf->mode.size;
exp_info.priv = dmabuf;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {
dev_info_ratelimited(dev, "%s: dma_buf_export failed: %ld\n",
__func__, PTR_ERR(buf));
return PTR_ERR(buf);
}
dmabuf->buf = buf;
dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
return 0;
}
static int mbochs_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info_ext *ext)
{
struct vfio_region_info *region_info = &ext->base;
if (region_info->index >= MBOCHS_NUM_REGIONS)
return -EINVAL;
switch (region_info->index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
region_info->offset = 0;
region_info->size = MBOCHS_CONFIG_SPACE_SIZE;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
region_info->offset = MBOCHS_MEMORY_BAR_OFFSET;
region_info->size = mdev_state->memsize;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
region_info->offset = MBOCHS_MMIO_BAR_OFFSET;
region_info->size = MBOCHS_MMIO_BAR_SIZE;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE);
break;
case MBOCHS_EDID_REGION_INDEX:
ext->base.argsz = sizeof(*ext);
ext->base.offset = MBOCHS_EDID_OFFSET;
ext->base.size = MBOCHS_EDID_SIZE;
ext->base.flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_CAPS);
ext->base.cap_offset = offsetof(typeof(*ext), type);
ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE;
ext->type.header.version = 1;
ext->type.header.next = 0;
ext->type.type = VFIO_REGION_TYPE_GFX;
ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID;
break;
default:
region_info->size = 0;
region_info->offset = 0;
region_info->flags = 0;
}
return 0;
}
static int mbochs_get_irq_info(struct vfio_irq_info *irq_info)
{
irq_info->count = 0;
return 0;
}
static int mbochs_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = MBOCHS_NUM_REGIONS;
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
return 0;
}
static int mbochs_query_gfx_plane(struct mdev_state *mdev_state,
struct vfio_device_gfx_plane_info *plane)
{
struct mbochs_dmabuf *dmabuf;
struct mbochs_mode mode;
int ret;
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
VFIO_GFX_PLANE_TYPE_DMABUF))
return 0;
return -EINVAL;
}
if (plane->flags != VFIO_GFX_PLANE_TYPE_DMABUF)
return -EINVAL;
plane->drm_format_mod = 0;
plane->x_pos = 0;
plane->y_pos = 0;
plane->x_hot = 0;
plane->y_hot = 0;
mutex_lock(&mdev_state->ops_lock);
ret = -EINVAL;
if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY)
ret = mbochs_check_framebuffer(mdev_state, &mode);
if (ret < 0) {
plane->drm_format = 0;
plane->width = 0;
plane->height = 0;
plane->stride = 0;
plane->size = 0;
plane->dmabuf_id = 0;
goto done;
}
dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode);
if (!dmabuf)
mbochs_dmabuf_alloc(mdev_state, &mode);
if (!dmabuf) {
mutex_unlock(&mdev_state->ops_lock);
return -ENOMEM;
}
plane->drm_format = dmabuf->mode.drm_format;
plane->width = dmabuf->mode.width;
plane->height = dmabuf->mode.height;
plane->stride = dmabuf->mode.stride;
plane->size = dmabuf->mode.size;
plane->dmabuf_id = dmabuf->id;
done:
if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY &&
mdev_state->active_id != plane->dmabuf_id) {
dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n",
__func__, mdev_state->active_id, plane->dmabuf_id);
mdev_state->active_id = plane->dmabuf_id;
}
mutex_unlock(&mdev_state->ops_lock);
return 0;
}
static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id)
{
struct mbochs_dmabuf *dmabuf;
mutex_lock(&mdev_state->ops_lock);
dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id);
if (!dmabuf) {
mutex_unlock(&mdev_state->ops_lock);
return -ENOENT;
}
if (!dmabuf->buf)
mbochs_dmabuf_export(dmabuf);
mutex_unlock(&mdev_state->ops_lock);
if (!dmabuf->buf)
return -EINVAL;
return dma_buf_fd(dmabuf->buf, 0);
}
static long mbochs_ioctl(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
int ret = 0;
unsigned long minsz, outsz;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mbochs_get_device_info(&info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info_ext info;
minsz = offsetofend(typeof(info), base.offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
outsz = info.base.argsz;
if (outsz < minsz)
return -EINVAL;
if (outsz > sizeof(info))
return -EINVAL;
ret = mbochs_get_region_info(mdev_state, &info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, outsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if ((info.argsz < minsz) ||
(info.index >= VFIO_PCI_NUM_IRQS))
return -EINVAL;
ret = mbochs_get_irq_info(&info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_QUERY_GFX_PLANE:
{
struct vfio_device_gfx_plane_info plane;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
region_index);
if (copy_from_user(&plane, (void __user *)arg, minsz))
return -EFAULT;
if (plane.argsz < minsz)
return -EINVAL;
ret = mbochs_query_gfx_plane(mdev_state, &plane);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &plane, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_GFX_DMABUF:
{
u32 dmabuf_id;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id);
}
case VFIO_DEVICE_SET_IRQS:
return -EINVAL;
case VFIO_DEVICE_RESET:
return mbochs_reset(mdev_state);
}
return -ENOTTY;
}
static void mbochs_close_device(struct vfio_device *vdev)
{
struct mdev_state *mdev_state =
container_of(vdev, struct mdev_state, vdev);
struct mbochs_dmabuf *dmabuf, *tmp;
mutex_lock(&mdev_state->ops_lock);
list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) {
list_del(&dmabuf->next);
if (dmabuf->buf) {
/* free in mbochs_release_dmabuf() */
dmabuf->unlinked = true;
} else {
kfree(dmabuf);
}
}
mbochs_put_pages(mdev_state);
mutex_unlock(&mdev_state->ops_lock);
}
static ssize_t
memory_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mdev_state *mdev_state = dev_get_drvdata(dev);
return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);
}
static DEVICE_ATTR_RO(memory);
static struct attribute *mdev_dev_attrs[] = {
&dev_attr_memory.attr,
NULL,
};
static const struct attribute_group mdev_dev_group = {
.name = "vendor",
.attrs = mdev_dev_attrs,
};
static const struct attribute_group *mdev_dev_groups[] = {
&mdev_dev_group,
NULL,
};
static ssize_t mbochs_show_description(struct mdev_type *mtype, char *buf)
{
struct mbochs_type *type =
container_of(mtype, struct mbochs_type, type);
return sprintf(buf, "virtual display, %d MB video memory\n",
type ? type->mbytes : 0);
}
static unsigned int mbochs_get_available(struct mdev_type *mtype)
{
struct mbochs_type *type =
container_of(mtype, struct mbochs_type, type);
return atomic_read(&mbochs_avail_mbytes) / type->mbytes;
}
static const struct vfio_device_ops mbochs_dev_ops = {
.close_device = mbochs_close_device,
.init = mbochs_init_dev,
.release = mbochs_release_dev,
.read = mbochs_read,
.write = mbochs_write,
.ioctl = mbochs_ioctl,
.mmap = mbochs_mmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
static struct mdev_driver mbochs_driver = {
.device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "mbochs",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
.dev_groups = mdev_dev_groups,
},
.probe = mbochs_probe,
.remove = mbochs_remove,
.get_available = mbochs_get_available,
.show_description = mbochs_show_description,
};
static const struct file_operations vd_fops = {
.owner = THIS_MODULE,
};
static void mbochs_device_release(struct device *dev)
{
/* nothing */
}
static int __init mbochs_dev_init(void)
{
int ret = 0;
atomic_set(&mbochs_avail_mbytes, max_mbytes);
ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME);
if (ret < 0) {
pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
return ret;
}
cdev_init(&mbochs_cdev, &vd_fops);
cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
ret = mdev_register_driver(&mbochs_driver);
if (ret)
goto err_cdev;
mbochs_class = class_create(MBOCHS_CLASS_NAME);
if (IS_ERR(mbochs_class)) {
pr_err("Error: failed to register mbochs_dev class\n");
ret = PTR_ERR(mbochs_class);
goto err_driver;
}
mbochs_dev.class = mbochs_class;
mbochs_dev.release = mbochs_device_release;
dev_set_name(&mbochs_dev, "%s", MBOCHS_NAME);
ret = device_register(&mbochs_dev);
if (ret)
goto err_put;
ret = mdev_register_parent(&mbochs_parent, &mbochs_dev, &mbochs_driver,
mbochs_mdev_types,
ARRAY_SIZE(mbochs_mdev_types));
if (ret)
goto err_device;
return 0;
err_device:
device_del(&mbochs_dev);
err_put:
put_device(&mbochs_dev);
class_destroy(mbochs_class);
err_driver:
mdev_unregister_driver(&mbochs_driver);
err_cdev:
cdev_del(&mbochs_cdev);
unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
return ret;
}
static void __exit mbochs_dev_exit(void)
{
mbochs_dev.bus = NULL;
mdev_unregister_parent(&mbochs_parent);
device_unregister(&mbochs_dev);
mdev_unregister_driver(&mbochs_driver);
cdev_del(&mbochs_cdev);
unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
class_destroy(mbochs_class);
mbochs_class = NULL;
}
MODULE_IMPORT_NS(DMA_BUF);
module_init(mbochs_dev_init)
module_exit(mbochs_dev_exit)
| linux-master | samples/vfio-mdev/mbochs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Framebuffer driver for mdpy (mediated virtual pci display device).
*
* See mdpy-defs.h for device specs
*
* (c) Gerd Hoffmann <[email protected]>
*
* Using some code snippets from simplefb and cirrusfb.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <drm/drm_fourcc.h>
#include "mdpy-defs.h"
static const struct fb_fix_screeninfo mdpy_fb_fix = {
.id = "mdpy-fb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE,
};
static const struct fb_var_screeninfo mdpy_fb_var = {
.height = -1,
.width = -1,
.activate = FB_ACTIVATE_NOW,
.vmode = FB_VMODE_NONINTERLACED,
.bits_per_pixel = 32,
.transp.offset = 24,
.red.offset = 16,
.green.offset = 8,
.blue.offset = 0,
.transp.length = 8,
.red.length = 8,
.green.length = 8,
.blue.length = 8,
};
#define PSEUDO_PALETTE_SIZE 16
struct mdpy_fb_par {
u32 palette[PSEUDO_PALETTE_SIZE];
};
static int mdpy_fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
u32 *pal = info->pseudo_palette;
u32 cr = red >> (16 - info->var.red.length);
u32 cg = green >> (16 - info->var.green.length);
u32 cb = blue >> (16 - info->var.blue.length);
u32 value, mask;
if (regno >= PSEUDO_PALETTE_SIZE)
return -EINVAL;
value = (cr << info->var.red.offset) |
(cg << info->var.green.offset) |
(cb << info->var.blue.offset);
if (info->var.transp.length > 0) {
mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
pal[regno] = value;
return 0;
}
static void mdpy_fb_destroy(struct fb_info *info)
{
if (info->screen_base)
iounmap(info->screen_base);
}
static const struct fb_ops mdpy_fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_destroy = mdpy_fb_destroy,
.fb_setcolreg = mdpy_fb_setcolreg,
};
static int mdpy_fb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct fb_info *info;
struct mdpy_fb_par *par;
u32 format, width, height;
int ret;
ret = pci_enable_device(pdev);
if (ret < 0)
return ret;
ret = pci_request_regions(pdev, "mdpy-fb");
if (ret < 0)
goto err_disable_dev;
pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width);
pci_read_config_dword(pdev, MDPY_HEIGHT_OFFSET, &height);
if (format != DRM_FORMAT_XRGB8888) {
pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
format, DRM_FORMAT_XRGB8888);
ret = -EINVAL;
goto err_release_regions;
}
if (width < 100 || width > 10000) {
pci_err(pdev, "width (%d) out of range\n", width);
ret = -EINVAL;
goto err_release_regions;
}
if (height < 100 || height > 10000) {
pci_err(pdev, "height (%d) out of range\n", height);
ret = -EINVAL;
goto err_release_regions;
}
pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
width, height);
info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
if (!info) {
ret = -ENOMEM;
goto err_release_regions;
}
pci_set_drvdata(pdev, info);
par = info->par;
info->fix = mdpy_fb_fix;
info->fix.smem_start = pci_resource_start(pdev, 0);
info->fix.smem_len = pci_resource_len(pdev, 0);
info->fix.line_length = width * 4;
info->var = mdpy_fb_var;
info->var.xres = width;
info->var.yres = height;
info->var.xres_virtual = width;
info->var.yres_virtual = height;
info->screen_size = info->fix.smem_len;
info->screen_base = ioremap(info->fix.smem_start,
info->screen_size);
if (!info->screen_base) {
pci_err(pdev, "ioremap(pcibar) failed\n");
ret = -EIO;
goto err_release_fb;
}
info->fbops = &mdpy_fb_ops;
info->pseudo_palette = par->palette;
ret = register_framebuffer(info);
if (ret < 0) {
pci_err(pdev, "mdpy-fb device register failed: %d\n", ret);
goto err_unmap;
}
pci_info(pdev, "fb%d registered\n", info->node);
return 0;
err_unmap:
iounmap(info->screen_base);
err_release_fb:
framebuffer_release(info);
err_release_regions:
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
return ret;
}
static void mdpy_fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
unregister_framebuffer(info);
iounmap(info->screen_base);
framebuffer_release(info);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_device_id mdpy_fb_pci_table[] = {
{
.vendor = MDPY_PCI_VENDOR_ID,
.device = MDPY_PCI_DEVICE_ID,
.subvendor = MDPY_PCI_SUBVENDOR_ID,
.subdevice = MDPY_PCI_SUBDEVICE_ID,
}, {
/* end of list */
}
};
static struct pci_driver mdpy_fb_pci_driver = {
.name = "mdpy-fb",
.id_table = mdpy_fb_pci_table,
.probe = mdpy_fb_probe,
.remove = mdpy_fb_remove,
};
static int __init mdpy_fb_init(void)
{
int ret;
ret = pci_register_driver(&mdpy_fb_pci_driver);
if (ret)
return ret;
return 0;
}
module_init(mdpy_fb_init);
MODULE_DEVICE_TABLE(pci, mdpy_fb_pci_table);
MODULE_LICENSE("GPL v2");
| linux-master | samples/vfio-mdev/mdpy-fb.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <limits.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#ifndef CLONE_PIDFD
#define CLONE_PIDFD 0x00001000
#endif
#ifndef __NR_pidfd_send_signal
#define __NR_pidfd_send_signal -1
#endif
static int do_child(void *args)
{
printf("%d\n", getpid());
_exit(EXIT_SUCCESS);
}
static pid_t pidfd_clone(int flags, int *pidfd)
{
size_t stack_size = 1024;
char *stack[1024] = { 0 };
#ifdef __ia64__
return __clone2(do_child, stack, stack_size, flags | SIGCHLD, NULL, pidfd);
#else
return clone(do_child, stack + stack_size, flags | SIGCHLD, NULL, pidfd);
#endif
}
static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
unsigned int flags)
{
return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
}
static int pidfd_metadata_fd(pid_t pid, int pidfd)
{
int procfd, ret;
char path[100];
snprintf(path, sizeof(path), "/proc/%d", pid);
procfd = open(path, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
if (procfd < 0) {
warn("Failed to open %s\n", path);
return -1;
}
/*
* Verify that the pid has not been recycled and our /proc/<pid> handle
* is still valid.
*/
ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
if (ret < 0) {
switch (errno) {
case EPERM:
/* Process exists, just not allowed to signal it. */
break;
default:
warn("Failed to signal process\n");
close(procfd);
procfd = -1;
}
}
return procfd;
}
int main(int argc, char *argv[])
{
int pidfd = -1, ret = EXIT_FAILURE;
char buf[4096] = { 0 };
pid_t pid;
int procfd, statusfd;
ssize_t bytes;
pid = pidfd_clone(CLONE_PIDFD, &pidfd);
if (pid < 0)
err(ret, "CLONE_PIDFD");
if (pidfd == -1) {
warnx("CLONE_PIDFD is not supported by the kernel");
goto out;
}
procfd = pidfd_metadata_fd(pid, pidfd);
close(pidfd);
if (procfd < 0)
goto out;
statusfd = openat(procfd, "status", O_RDONLY | O_CLOEXEC);
close(procfd);
if (statusfd < 0)
goto out;
bytes = read(statusfd, buf, sizeof(buf));
if (bytes > 0)
bytes = write(STDOUT_FILENO, buf, bytes);
close(statusfd);
ret = EXIT_SUCCESS;
out:
(void)wait(NULL);
exit(ret);
}
| linux-master | samples/pidfd/pidfd-metadata.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Benjamin Tissoires
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "hid_bpf_attach.h"
#include "hid_bpf_helpers.h"
SEC("syscall")
int attach_prog(struct attach_prog_args *ctx)
{
ctx->retval = hid_bpf_attach_prog(ctx->hid,
ctx->prog_fd,
0);
return 0;
}
| linux-master | samples/hid/hid_bpf_attach.bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Benjamin Tissoires
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "hid_bpf_helpers.h"
#define HID_UP_BUTTON 0x0009
#define HID_GD_WHEEL 0x0038
SEC("fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
if (!data)
return 0; /* EPERM check */
/* Touch */
data[1] &= 0xfd;
/* X */
data[4] = 0;
data[5] = 0;
/* Y */
data[6] = 0;
data[7] = 0;
return 0;
}
/* 72 == 360 / 5 -> 1 report every 5 degrees */
int resolution = 72;
int physical = 5;
struct haptic_syscall_args {
unsigned int hid;
int retval;
};
static __u8 haptic_data[8];
SEC("syscall")
int set_haptic(struct haptic_syscall_args *args)
{
struct hid_bpf_ctx *ctx;
const size_t size = sizeof(haptic_data);
u16 *res;
int ret;
if (size > sizeof(haptic_data))
return -7; /* -E2BIG */
ctx = hid_bpf_allocate_context(args->hid);
if (!ctx)
return -1; /* EPERM check */
haptic_data[0] = 1; /* report ID */
ret = hid_bpf_hw_request(ctx, haptic_data, size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
bpf_printk("probed/remove event ret value: %d", ret);
bpf_printk("buf: %02x %02x %02x",
haptic_data[0],
haptic_data[1],
haptic_data[2]);
bpf_printk(" %02x %02x %02x",
haptic_data[3],
haptic_data[4],
haptic_data[5]);
bpf_printk(" %02x %02x",
haptic_data[6],
haptic_data[7]);
/* whenever resolution multiplier is not 3600, we have the fixed report descriptor */
res = (u16 *)&haptic_data[1];
if (*res != 3600) {
// haptic_data[1] = 72; /* resolution multiplier */
// haptic_data[2] = 0; /* resolution multiplier */
// haptic_data[3] = 0; /* Repeat Count */
haptic_data[4] = 3; /* haptic Auto Trigger */
// haptic_data[5] = 5; /* Waveform Cutoff Time */
// haptic_data[6] = 80; /* Retrigger Period */
// haptic_data[7] = 0; /* Retrigger Period */
} else {
haptic_data[4] = 0;
}
ret = hid_bpf_hw_request(ctx, haptic_data, size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
bpf_printk("set haptic ret value: %d -> %d", ret, haptic_data[4]);
args->retval = ret;
hid_bpf_release_context(ctx);
return 0;
}
/* Convert REL_DIAL into REL_WHEEL */
SEC("fmod_ret/hid_bpf_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
__u16 *res, *phys;
if (!data)
return 0; /* EPERM check */
/* Convert TOUCH into a button */
data[31] = HID_UP_BUTTON;
data[33] = 2;
/* Convert REL_DIAL into REL_WHEEL */
data[45] = HID_GD_WHEEL;
/* Change Resolution Multiplier */
phys = (__u16 *)&data[61];
*phys = physical;
res = (__u16 *)&data[66];
*res = resolution;
/* Convert X,Y from Abs to Rel */
data[88] = 0x06;
data[98] = 0x06;
return 0;
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = 1;
| linux-master | samples/hid/hid_surface_dial.bpf.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "hid_bpf_helpers.h"
SEC("fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx)
{
s16 y;
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
if (!data)
return 0; /* EPERM check */
bpf_printk("event: size: %d", hctx->size);
bpf_printk("incoming event: %02x %02x %02x",
data[0],
data[1],
data[2]);
bpf_printk(" %02x %02x %02x",
data[3],
data[4],
data[5]);
bpf_printk(" %02x %02x %02x",
data[6],
data[7],
data[8]);
y = data[3] | (data[4] << 8);
y = -y;
data[3] = y & 0xFF;
data[4] = (y >> 8) & 0xFF;
bpf_printk("modified event: %02x %02x %02x",
data[0],
data[1],
data[2]);
bpf_printk(" %02x %02x %02x",
data[3],
data[4],
data[5]);
bpf_printk(" %02x %02x %02x",
data[6],
data[7],
data[8]);
return 0;
}
SEC("fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx)
{
s16 x;
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */);
if (!data)
return 0; /* EPERM check */
x = data[1] | (data[2] << 8);
x = -x;
data[1] = x & 0xFF;
data[2] = (x >> 8) & 0xFF;
return 0;
}
SEC("fmod_ret/hid_bpf_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
{
__u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
if (!data)
return 0; /* EPERM check */
bpf_printk("rdesc: %02x %02x %02x",
data[0],
data[1],
data[2]);
bpf_printk(" %02x %02x %02x",
data[3],
data[4],
data[5]);
bpf_printk(" %02x %02x %02x ...",
data[6],
data[7],
data[8]);
/*
* The original report descriptor contains:
*
* 0x05, 0x01, // Usage Page (Generic Desktop) 30
* 0x16, 0x01, 0x80, // Logical Minimum (-32767) 32
* 0x26, 0xff, 0x7f, // Logical Maximum (32767) 35
* 0x09, 0x30, // Usage (X) 38
* 0x09, 0x31, // Usage (Y) 40
*
* So byte 39 contains Usage X and byte 41 Usage Y.
*
* We simply swap the axes here.
*/
data[39] = 0x31;
data[41] = 0x30;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/hid/hid_mouse.bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Benjamin Tissoires
*
* This program will morph the Microsoft Surface Dial into a mouse,
* and depending on the chosen resolution enable or not the haptic feedback:
* - a resolution (-r) of 3600 will report 3600 "ticks" in one full rotation
* without haptic feedback
* - any other resolution will report N "ticks" in a full rotation with haptic
* feedback
*
* A good default for low resolution haptic scrolling is 72 (1 "tick" every 5
* degrees), and set to 3600 for smooth scrolling.
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <linux/errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "hid_surface_dial.skel.h"
#include "hid_bpf_attach.h"
static bool running = true;
struct haptic_syscall_args {
unsigned int hid;
int retval;
};
static void int_exit(int sig)
{
running = false;
exit(0);
}
static void usage(const char *prog)
{
fprintf(stderr,
"%s: %s [OPTIONS] /sys/bus/hid/devices/0BUS:0VID:0PID:00ID\n\n"
" OPTIONS:\n"
" -r N\t set the given resolution to the device (number of ticks per 360°)\n\n",
__func__, prog);
fprintf(stderr,
"This program will morph the Microsoft Surface Dial into a mouse,\n"
"and depending on the chosen resolution enable or not the haptic feedback:\n"
"- a resolution (-r) of 3600 will report 3600 'ticks' in one full rotation\n"
" without haptic feedback\n"
"- any other resolution will report N 'ticks' in a full rotation with haptic\n"
" feedback\n"
"\n"
"A good default for low resolution haptic scrolling is 72 (1 'tick' every 5\n"
"degrees), and set to 3600 for smooth scrolling.\n");
}
static int get_hid_id(const char *path)
{
const char *str_id, *dir;
char uevent[1024];
int fd;
memset(uevent, 0, sizeof(uevent));
snprintf(uevent, sizeof(uevent) - 1, "%s/uevent", path);
fd = open(uevent, O_RDONLY | O_NONBLOCK);
if (fd < 0)
return -ENOENT;
close(fd);
dir = basename((char *)path);
str_id = dir + sizeof("0003:0001:0A37.");
return (int)strtol(str_id, NULL, 16);
}
static int attach_prog(struct hid_surface_dial *skel, struct bpf_program *prog, int hid_id)
{
struct attach_prog_args args = {
.hid = hid_id,
.retval = -1,
};
int attach_fd, err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &args,
.ctx_size_in = sizeof(args),
);
attach_fd = bpf_program__fd(skel->progs.attach_prog);
if (attach_fd < 0) {
fprintf(stderr, "can't locate attach prog: %m\n");
return 1;
}
args.prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(attach_fd, &tattr);
if (err) {
fprintf(stderr, "can't attach prog to hid device %d: %m (err: %d)\n",
hid_id, err);
return 1;
}
return 0;
}
static int set_haptic(struct hid_surface_dial *skel, int hid_id)
{
struct haptic_syscall_args args = {
.hid = hid_id,
.retval = -1,
};
int haptic_fd, err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &args,
.ctx_size_in = sizeof(args),
);
haptic_fd = bpf_program__fd(skel->progs.set_haptic);
if (haptic_fd < 0) {
fprintf(stderr, "can't locate haptic prog: %m\n");
return 1;
}
err = bpf_prog_test_run_opts(haptic_fd, &tattr);
if (err) {
fprintf(stderr, "can't set haptic configuration to hid device %d: %m (err: %d)\n",
hid_id, err);
return 1;
}
return 0;
}
int main(int argc, char **argv)
{
struct hid_surface_dial *skel;
struct bpf_program *prog;
const char *optstr = "r:";
const char *sysfs_path;
int opt, hid_id, resolution = 72;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 'r':
{
char *endp = NULL;
long l = -1;
if (optarg) {
l = strtol(optarg, &endp, 10);
if (endp && *endp)
l = -1;
}
if (l < 0) {
fprintf(stderr,
"invalid r option %s - expecting a number\n",
optarg ? optarg : "");
exit(EXIT_FAILURE);
};
resolution = (int) l;
break;
}
default:
usage(basename(argv[0]));
return 1;
}
}
if (optind == argc) {
usage(basename(argv[0]));
return 1;
}
sysfs_path = argv[optind];
if (!sysfs_path) {
perror("sysfs");
return 1;
}
skel = hid_surface_dial__open_and_load();
if (!skel) {
fprintf(stderr, "%s %s:%d", __func__, __FILE__, __LINE__);
return -1;
}
hid_id = get_hid_id(sysfs_path);
if (hid_id < 0) {
fprintf(stderr, "can not open HID device: %m\n");
return 1;
}
skel->data->resolution = resolution;
skel->data->physical = (int)(resolution / 72);
bpf_object__for_each_program(prog, *skel->skeleton->obj) {
/* ignore syscalls */
if (bpf_program__get_type(prog) != BPF_PROG_TYPE_TRACING)
continue;
attach_prog(skel, prog, hid_id);
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
set_haptic(skel, hid_id);
while (running)
sleep(1);
hid_surface_dial__destroy(skel);
return 0;
}
| linux-master | samples/hid/hid_surface_dial.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Benjamin Tissoires
*
* This is a pure HID-BPF example, and should be considered as such:
* on the Etekcity Scroll 6E, the X and Y axes will be swapped and
* inverted. On any other device... Not sure what this will do.
*
* This C main file is generic though. To adapt the code and test, users
* must amend only the .bpf.c file, which this program will load any
* eBPF program it finds.
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <linux/errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "hid_mouse.skel.h"
#include "hid_bpf_attach.h"
static bool running = true;
static void int_exit(int sig)
{
running = false;
exit(0);
}
static void usage(const char *prog)
{
fprintf(stderr,
"%s: %s /sys/bus/hid/devices/0BUS:0VID:0PID:00ID\n\n",
__func__, prog);
fprintf(stderr,
"This program will upload and attach a HID-BPF program to the given device.\n"
"On the Etekcity Scroll 6E, the X and Y axis will be inverted, but on any other\n"
"device, chances are high that the device will not be working anymore\n\n"
"consider this as a demo and adapt the eBPF program to your needs\n"
"Hit Ctrl-C to unbind the program and reset the device\n");
}
static int get_hid_id(const char *path)
{
const char *str_id, *dir;
char uevent[1024];
int fd;
memset(uevent, 0, sizeof(uevent));
snprintf(uevent, sizeof(uevent) - 1, "%s/uevent", path);
fd = open(uevent, O_RDONLY | O_NONBLOCK);
if (fd < 0)
return -ENOENT;
close(fd);
dir = basename((char *)path);
str_id = dir + sizeof("0003:0001:0A37.");
return (int)strtol(str_id, NULL, 16);
}
int main(int argc, char **argv)
{
struct hid_mouse *skel;
struct bpf_program *prog;
int err;
const char *optstr = "";
const char *sysfs_path;
int opt, hid_id, attach_fd;
struct attach_prog_args args = {
.retval = -1,
};
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &args,
.ctx_size_in = sizeof(args),
);
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
default:
usage(basename(argv[0]));
return 1;
}
}
if (optind == argc) {
usage(basename(argv[0]));
return 1;
}
sysfs_path = argv[optind];
if (!sysfs_path) {
perror("sysfs");
return 1;
}
skel = hid_mouse__open_and_load();
if (!skel) {
fprintf(stderr, "%s %s:%d", __func__, __FILE__, __LINE__);
return -1;
}
hid_id = get_hid_id(sysfs_path);
if (hid_id < 0) {
fprintf(stderr, "can not open HID device: %m\n");
return 1;
}
args.hid = hid_id;
attach_fd = bpf_program__fd(skel->progs.attach_prog);
if (attach_fd < 0) {
fprintf(stderr, "can't locate attach prog: %m\n");
return 1;
}
bpf_object__for_each_program(prog, *skel->skeleton->obj) {
/* ignore syscalls */
if (bpf_program__get_type(prog) != BPF_PROG_TYPE_TRACING)
continue;
args.retval = -1;
args.prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(attach_fd, &tattr);
if (err) {
fprintf(stderr, "can't attach prog to hid device %d: %m (err: %d)\n",
hid_id, err);
return 1;
}
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
while (running)
sleep(1);
hid_mouse__destroy(skel);
return 0;
}
| linux-master | samples/hid/hid_mouse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864b-example.c
* Version: 0.1.0
* Description: cfag12864b LCD userspace example program
*
* Author: Copyright (C) Miguel Ojeda <[email protected]>
* Date: 2006-10-31
*/
/*
* ------------------------
* start of cfag12864b code
* ------------------------
*/
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#define CFAG12864B_WIDTH (128)
#define CFAG12864B_HEIGHT (64)
#define CFAG12864B_SIZE (128 * 64 / 8)
#define CFAG12864B_BPB (8)
#define CFAG12864B_ADDRESS(x, y) ((y) * CFAG12864B_WIDTH / \
CFAG12864B_BPB + (x) / CFAG12864B_BPB)
#define CFAG12864B_BIT(n) (((unsigned char) 1) << (n))
#undef CFAG12864B_DOCHECK
#ifdef CFAG12864B_DOCHECK
#define CFAG12864B_CHECK(x, y) ((x) < CFAG12864B_WIDTH && \
(y) < CFAG12864B_HEIGHT)
#else
#define CFAG12864B_CHECK(x, y) (1)
#endif
int cfag12864b_fd;
unsigned char * cfag12864b_mem;
unsigned char cfag12864b_buffer[CFAG12864B_SIZE];
/*
* init a cfag12864b framebuffer device
*
* No error: return = 0
* Unable to open: return = -1
* Unable to mmap: return = -2
*/
static int cfag12864b_init(char *path)
{
cfag12864b_fd = open(path, O_RDWR);
if (cfag12864b_fd == -1)
return -1;
cfag12864b_mem = mmap(0, CFAG12864B_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, cfag12864b_fd, 0);
if (cfag12864b_mem == MAP_FAILED) {
close(cfag12864b_fd);
return -2;
}
return 0;
}
/*
* exit a cfag12864b framebuffer device
*/
static void cfag12864b_exit(void)
{
munmap(cfag12864b_mem, CFAG12864B_SIZE);
close(cfag12864b_fd);
}
/*
* set (x, y) pixel
*/
static void cfag12864b_set(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] |=
CFAG12864B_BIT(x % CFAG12864B_BPB);
}
/*
* unset (x, y) pixel
*/
static void cfag12864b_unset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &=
~CFAG12864B_BIT(x % CFAG12864B_BPB);
}
/*
* is set (x, y) pixel?
*
* Pixel off: return = 0
* Pixel on: return = 1
*/
static unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
if (cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &
CFAG12864B_BIT(x % CFAG12864B_BPB))
return 1;
return 0;
}
/*
* not (x, y) pixel
*/
static void cfag12864b_not(unsigned char x, unsigned char y)
{
if (cfag12864b_isset(x, y))
cfag12864b_unset(x, y);
else
cfag12864b_set(x, y);
}
/*
* fill (set all pixels)
*/
static void cfag12864b_fill(void)
{
unsigned short i;
for (i = 0; i < CFAG12864B_SIZE; i++)
cfag12864b_buffer[i] = 0xFF;
}
/*
* clear (unset all pixels)
*/
static void cfag12864b_clear(void)
{
unsigned short i;
for (i = 0; i < CFAG12864B_SIZE; i++)
cfag12864b_buffer[i] = 0;
}
/*
* format a [128*64] matrix
*
* Pixel off: src[i] = 0
* Pixel on: src[i] > 0
*/
static void cfag12864b_format(unsigned char * matrix)
{
unsigned char i, j, n;
for (i = 0; i < CFAG12864B_HEIGHT; i++)
for (j = 0; j < CFAG12864B_WIDTH / CFAG12864B_BPB; j++) {
cfag12864b_buffer[i * CFAG12864B_WIDTH / CFAG12864B_BPB +
j] = 0;
for (n = 0; n < CFAG12864B_BPB; n++)
if (matrix[i * CFAG12864B_WIDTH +
j * CFAG12864B_BPB + n])
cfag12864b_buffer[i * CFAG12864B_WIDTH /
CFAG12864B_BPB + j] |=
CFAG12864B_BIT(n);
}
}
/*
* blit buffer to lcd
*/
static void cfag12864b_blit(void)
{
memcpy(cfag12864b_mem, cfag12864b_buffer, CFAG12864B_SIZE);
}
/*
* ----------------------
* end of cfag12864b code
* ----------------------
*/
#include <stdio.h>
#define EXAMPLES 6
static void example(unsigned char n)
{
unsigned short i, j;
unsigned char matrix[CFAG12864B_WIDTH * CFAG12864B_HEIGHT];
if (n > EXAMPLES)
return;
printf("Example %i/%i - ", n, EXAMPLES);
switch (n) {
case 1:
printf("Draw points setting bits");
cfag12864b_clear();
for (i = 0; i < CFAG12864B_WIDTH; i += 2)
for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
cfag12864b_set(i, j);
break;
case 2:
printf("Clear the LCD");
cfag12864b_clear();
break;
case 3:
printf("Draw rows formatting a [128*64] matrix");
memset(matrix, 0, CFAG12864B_WIDTH * CFAG12864B_HEIGHT);
for (i = 0; i < CFAG12864B_WIDTH; i++)
for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
matrix[j * CFAG12864B_WIDTH + i] = 1;
cfag12864b_format(matrix);
break;
case 4:
printf("Fill the lcd");
cfag12864b_fill();
break;
case 5:
printf("Draw columns unsetting bits");
for (i = 0; i < CFAG12864B_WIDTH; i += 2)
for (j = 0; j < CFAG12864B_HEIGHT; j++)
cfag12864b_unset(i, j);
break;
case 6:
printf("Do negative not-ing all bits");
for (i = 0; i < CFAG12864B_WIDTH; i++)
for (j = 0; j < CFAG12864B_HEIGHT; j ++)
cfag12864b_not(i, j);
break;
}
puts(" - [Press Enter]");
}
int main(int argc, char *argv[])
{
unsigned char n;
if (argc != 2) {
printf(
"Syntax: %s fbdev\n"
"Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
return -1;
}
if (cfag12864b_init(argv[1])) {
printf("Can't init %s fbdev\n", argv[1]);
return -2;
}
for (n = 1; n <= EXAMPLES; n++) {
example(n);
cfag12864b_blit();
while (getchar() != '\n');
}
cfag12864b_exit();
return 0;
}
| linux-master | samples/auxdisplay/cfag12864b-example.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/kthread.h>
/*
* Any file that uses trace points, must include the header.
* But only one file, must include the header by defining
* CREATE_TRACE_POINTS first. This will make the C code that
* creates the handles for the trace points.
*/
#define CREATE_TRACE_POINTS
#include "trace-events-sample.h"
static const char *random_strings[] = {
"Mother Goose",
"Snoopy",
"Gandalf",
"Frodo",
"One ring to rule them all"
};
static void do_simple_thread_func(int cnt, const char *fmt, ...)
{
unsigned long bitmask[1] = {0xdeadbeefUL};
va_list va;
int array[6];
int len = cnt % 5;
int i;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
for (i = 0; i < len; i++)
array[i] = i + 1;
array[i] = 0;
va_start(va, fmt);
/* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len],
current->cpus_ptr, fmt, &va);
va_end(va);
trace_foo_with_template_simple("HELLO", cnt);
trace_foo_bar_with_cond("Some times print", cnt);
trace_foo_with_template_cond("prints other times", cnt);
trace_foo_with_template_print("I have to be different", cnt);
trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask, current->cpus_ptr);
}
static void simple_thread_func(int cnt)
{
do_simple_thread_func(cnt, "iter=%d", cnt);
}
static int simple_thread(void *arg)
{
int cnt = 0;
while (!kthread_should_stop())
simple_thread_func(cnt++);
return 0;
}
static struct task_struct *simple_tsk;
static struct task_struct *simple_tsk_fn;
static void simple_thread_func_fn(int cnt)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
/* More silly tracepoints */
trace_foo_bar_with_fn("Look at me", cnt);
trace_foo_with_template_fn("Look at me too", cnt);
}
static int simple_thread_fn(void *arg)
{
int cnt = 0;
while (!kthread_should_stop())
simple_thread_func_fn(cnt++);
return 0;
}
static DEFINE_MUTEX(thread_mutex);
static int simple_thread_cnt;
int foo_bar_reg(void)
{
mutex_lock(&thread_mutex);
if (simple_thread_cnt++)
goto out;
pr_info("Starting thread for foo_bar_fn\n");
/*
* We shouldn't be able to start a trace when the module is
* unloading (there's other locks to prevent that). But
* for consistency sake, we still take the thread_mutex.
*/
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
out:
mutex_unlock(&thread_mutex);
return 0;
}
void foo_bar_unreg(void)
{
mutex_lock(&thread_mutex);
if (--simple_thread_cnt)
goto out;
pr_info("Killing thread for foo_bar_fn\n");
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
out:
mutex_unlock(&thread_mutex);
}
static int __init trace_event_init(void)
{
simple_tsk = kthread_run(simple_thread, NULL, "event-sample");
if (IS_ERR(simple_tsk))
return -1;
return 0;
}
static void __exit trace_event_exit(void)
{
kthread_stop(simple_tsk);
mutex_lock(&thread_mutex);
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
mutex_unlock(&thread_mutex);
}
module_init(trace_event_init);
module_exit(trace_event_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("trace-events-sample");
MODULE_LICENSE("GPL");
| linux-master | samples/trace_events/trace-events-sample.c |
// SPDX-License-Identifier: GPL-2.0
/*
* event tracer
*
* Copyright (C) 2022 Google Inc, Steven Rostedt <[email protected]>
*/
#define pr_fmt(fmt) fmt
#include <linux/trace_events.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/sched.h>
/*
* Must include the event header that the custom event will attach to,
* from the C file, and not in the custom header file.
*/
#include <trace/events/sched.h>
/* Declare CREATE_CUSTOM_TRACE_EVENTS before including custom header */
#define CREATE_CUSTOM_TRACE_EVENTS
#include "trace_custom_sched.h"
/*
* As the trace events are not exported to modules, the use of
* for_each_kernel_tracepoint() is needed to find the trace event
* to attach to. The fct() function below, is a callback that
* will be called for every event.
*
* Helper functions are created by the TRACE_CUSTOM_EVENT() macro
* update the event. Those are of the form:
*
* trace_custom_event_<event>_update()
*
* Where <event> is the event to attach.
*/
static void fct(struct tracepoint *tp, void *priv)
{
trace_custom_event_sched_switch_update(tp);
trace_custom_event_sched_waking_update(tp);
}
static int __init trace_sched_init(void)
{
for_each_kernel_tracepoint(fct, NULL);
return 0;
}
static void __exit trace_sched_exit(void)
{
}
module_init(trace_sched_init);
module_exit(trace_sched_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("Custom scheduling events");
MODULE_LICENSE("GPL");
| linux-master | samples/trace_events/trace_custom_sched.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* configfs_example_macros.c - This file is a demonstration module
* containing a number of configfs subsystems. It uses the helper
* macros defined by configfs.h
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/configfs.h>
/*
* 01-childless
*
* This first example is a childless subsystem. It cannot create
* any config_items. It just has attributes.
*
* Note that we are enclosing the configfs_subsystem inside a container.
* This is not necessary if a subsystem has no attributes directly
* on the subsystem. See the next example, 02-simple-children, for
* such a subsystem.
*/
struct childless {
struct configfs_subsystem subsys;
int showme;
int storeme;
};
static inline struct childless *to_childless(struct config_item *item)
{
return container_of(to_configfs_subsystem(to_config_group(item)),
struct childless, subsys);
}
static ssize_t childless_showme_show(struct config_item *item, char *page)
{
struct childless *childless = to_childless(item);
ssize_t pos;
pos = sprintf(page, "%d\n", childless->showme);
childless->showme++;
return pos;
}
static ssize_t childless_storeme_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_childless(item)->storeme);
}
static ssize_t childless_storeme_store(struct config_item *item,
const char *page, size_t count)
{
struct childless *childless = to_childless(item);
int ret;
ret = kstrtoint(page, 10, &childless->storeme);
if (ret)
return ret;
return count;
}
static ssize_t childless_description_show(struct config_item *item, char *page)
{
return sprintf(page,
"[01-childless]\n"
"\n"
"The childless subsystem is the simplest possible subsystem in\n"
"configfs. It does not support the creation of child config_items.\n"
"It only has a few attributes. In fact, it isn't much different\n"
"than a directory in /proc.\n");
}
CONFIGFS_ATTR_RO(childless_, showme);
CONFIGFS_ATTR(childless_, storeme);
CONFIGFS_ATTR_RO(childless_, description);
static struct configfs_attribute *childless_attrs[] = {
&childless_attr_showme,
&childless_attr_storeme,
&childless_attr_description,
NULL,
};
static const struct config_item_type childless_type = {
.ct_attrs = childless_attrs,
.ct_owner = THIS_MODULE,
};
static struct childless childless_subsys = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "01-childless",
.ci_type = &childless_type,
},
},
},
};
/* ----------------------------------------------------------------- */
/*
* 02-simple-children
*
* This example merely has a simple one-attribute child. Note that
* there is no extra attribute structure, as the child's attribute is
* known from the get-go. Also, there is no container for the
* subsystem, as it has no attributes of its own.
*/
struct simple_child {
struct config_item item;
int storeme;
};
static inline struct simple_child *to_simple_child(struct config_item *item)
{
return container_of(item, struct simple_child, item);
}
static ssize_t simple_child_storeme_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_simple_child(item)->storeme);
}
static ssize_t simple_child_storeme_store(struct config_item *item,
const char *page, size_t count)
{
struct simple_child *simple_child = to_simple_child(item);
int ret;
ret = kstrtoint(page, 10, &simple_child->storeme);
if (ret)
return ret;
return count;
}
CONFIGFS_ATTR(simple_child_, storeme);
static struct configfs_attribute *simple_child_attrs[] = {
&simple_child_attr_storeme,
NULL,
};
static void simple_child_release(struct config_item *item)
{
kfree(to_simple_child(item));
}
static struct configfs_item_operations simple_child_item_ops = {
.release = simple_child_release,
};
static const struct config_item_type simple_child_type = {
.ct_item_ops = &simple_child_item_ops,
.ct_attrs = simple_child_attrs,
.ct_owner = THIS_MODULE,
};
struct simple_children {
struct config_group group;
};
static inline struct simple_children *to_simple_children(struct config_item *item)
{
return container_of(to_config_group(item),
struct simple_children, group);
}
static struct config_item *simple_children_make_item(struct config_group *group,
const char *name)
{
struct simple_child *simple_child;
simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL);
if (!simple_child)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&simple_child->item, name,
&simple_child_type);
return &simple_child->item;
}
static ssize_t simple_children_description_show(struct config_item *item,
char *page)
{
return sprintf(page,
"[02-simple-children]\n"
"\n"
"This subsystem allows the creation of child config_items. These\n"
"items have only one attribute that is readable and writeable.\n");
}
CONFIGFS_ATTR_RO(simple_children_, description);
static struct configfs_attribute *simple_children_attrs[] = {
&simple_children_attr_description,
NULL,
};
static void simple_children_release(struct config_item *item)
{
kfree(to_simple_children(item));
}
static struct configfs_item_operations simple_children_item_ops = {
.release = simple_children_release,
};
/*
* Note that, since no extra work is required on ->drop_item(),
* no ->drop_item() is provided.
*/
static struct configfs_group_operations simple_children_group_ops = {
.make_item = simple_children_make_item,
};
static const struct config_item_type simple_children_type = {
.ct_item_ops = &simple_children_item_ops,
.ct_group_ops = &simple_children_group_ops,
.ct_attrs = simple_children_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem simple_children_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "02-simple-children",
.ci_type = &simple_children_type,
},
},
};
/* ----------------------------------------------------------------- */
/*
* 03-group-children
*
* This example reuses the simple_children group from above. However,
* the simple_children group is not the subsystem itself, it is a
* child of the subsystem. Creation of a group in the subsystem creates
* a new simple_children group. That group can then have simple_child
* children of its own.
*/
static struct config_group *group_children_make_group(
struct config_group *group, const char *name)
{
struct simple_children *simple_children;
simple_children = kzalloc(sizeof(struct simple_children),
GFP_KERNEL);
if (!simple_children)
return ERR_PTR(-ENOMEM);
config_group_init_type_name(&simple_children->group, name,
&simple_children_type);
return &simple_children->group;
}
static ssize_t group_children_description_show(struct config_item *item,
char *page)
{
return sprintf(page,
"[03-group-children]\n"
"\n"
"This subsystem allows the creation of child config_groups. These\n"
"groups are like the subsystem simple-children.\n");
}
CONFIGFS_ATTR_RO(group_children_, description);
static struct configfs_attribute *group_children_attrs[] = {
&group_children_attr_description,
NULL,
};
/*
* Note that, since no extra work is required on ->drop_item(),
* no ->drop_item() is provided.
*/
static struct configfs_group_operations group_children_group_ops = {
.make_group = group_children_make_group,
};
static const struct config_item_type group_children_type = {
.ct_group_ops = &group_children_group_ops,
.ct_attrs = group_children_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem group_children_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "03-group-children",
.ci_type = &group_children_type,
},
},
};
/* ----------------------------------------------------------------- */
/*
* We're now done with our subsystem definitions.
* For convenience in this module, here's a list of them all. It
* allows the init function to easily register them. Most modules
* will only have one subsystem, and will only call register_subsystem
* on it directly.
*/
static struct configfs_subsystem *example_subsys[] = {
&childless_subsys.subsys,
&simple_children_subsys,
&group_children_subsys,
NULL,
};
static int __init configfs_example_init(void)
{
struct configfs_subsystem *subsys;
int ret, i;
for (i = 0; example_subsys[i]; i++) {
subsys = example_subsys[i];
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
ret = configfs_register_subsystem(subsys);
if (ret) {
pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_unregister;
}
}
return 0;
out_unregister:
for (i--; i >= 0; i--)
configfs_unregister_subsystem(example_subsys[i]);
return ret;
}
static void __exit configfs_example_exit(void)
{
int i;
for (i = 0; example_subsys[i]; i++)
configfs_unregister_subsystem(example_subsys[i]);
}
module_init(configfs_example_init);
module_exit(configfs_example_exit);
MODULE_LICENSE("GPL");
| linux-master | samples/configfs/configfs_sample.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hidraw Userspace Example
*
* Copyright (c) 2010 Alan Ott <[email protected]>
* Copyright (c) 2010 Signal 11 Software
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using hidraw.
*/
/* Linux */
#include <linux/types.h>
#include <linux/input.h>
#include <linux/hidraw.h>
/*
* Ugly hack to work around failing compilation on systems that don't
* yet populate new version of hidraw.h to userspace.
*/
#ifndef HIDIOCSFEATURE
#warning Please have your distro update the userspace kernel headers
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
#endif
/* Unix */
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
/* C */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
const char *bus_str(int bus);
int main(int argc, char **argv)
{
int fd;
int i, res, desc_size = 0;
char buf[256];
struct hidraw_report_descriptor rpt_desc;
struct hidraw_devinfo info;
char *device = "/dev/hidraw0";
if (argc > 1)
device = argv[1];
/* Open the Device with non-blocking reads. In real life,
don't use a hard coded path; use libudev instead. */
fd = open(device, O_RDWR|O_NONBLOCK);
if (fd < 0) {
perror("Unable to open device");
return 1;
}
memset(&rpt_desc, 0x0, sizeof(rpt_desc));
memset(&info, 0x0, sizeof(info));
memset(buf, 0x0, sizeof(buf));
/* Get Report Descriptor Size */
res = ioctl(fd, HIDIOCGRDESCSIZE, &desc_size);
if (res < 0)
perror("HIDIOCGRDESCSIZE");
else
printf("Report Descriptor Size: %d\n", desc_size);
/* Get Report Descriptor */
rpt_desc.size = desc_size;
res = ioctl(fd, HIDIOCGRDESC, &rpt_desc);
if (res < 0) {
perror("HIDIOCGRDESC");
} else {
printf("Report Descriptor:\n");
for (i = 0; i < rpt_desc.size; i++)
printf("%hhx ", rpt_desc.value[i]);
puts("\n");
}
/* Get Raw Name */
res = ioctl(fd, HIDIOCGRAWNAME(256), buf);
if (res < 0)
perror("HIDIOCGRAWNAME");
else
printf("Raw Name: %s\n", buf);
/* Get Physical Location */
res = ioctl(fd, HIDIOCGRAWPHYS(256), buf);
if (res < 0)
perror("HIDIOCGRAWPHYS");
else
printf("Raw Phys: %s\n", buf);
/* Get Raw Info */
res = ioctl(fd, HIDIOCGRAWINFO, &info);
if (res < 0) {
perror("HIDIOCGRAWINFO");
} else {
printf("Raw Info:\n");
printf("\tbustype: %d (%s)\n",
info.bustype, bus_str(info.bustype));
printf("\tvendor: 0x%04hx\n", info.vendor);
printf("\tproduct: 0x%04hx\n", info.product);
}
/* Set Feature */
buf[0] = 0x9; /* Report Number */
buf[1] = 0xff;
buf[2] = 0xff;
buf[3] = 0xff;
res = ioctl(fd, HIDIOCSFEATURE(4), buf);
if (res < 0)
perror("HIDIOCSFEATURE");
else
printf("ioctl HIDIOCSFEATURE returned: %d\n", res);
/* Get Feature */
buf[0] = 0x9; /* Report Number */
res = ioctl(fd, HIDIOCGFEATURE(256), buf);
if (res < 0) {
perror("HIDIOCGFEATURE");
} else {
printf("ioctl HIDIOCGFEATURE returned: %d\n", res);
printf("Report data:\n\t");
for (i = 0; i < res; i++)
printf("%hhx ", buf[i]);
puts("\n");
}
/* Send a Report to the Device */
buf[0] = 0x1; /* Report Number */
buf[1] = 0x77;
res = write(fd, buf, 2);
if (res < 0) {
printf("Error: %d\n", errno);
perror("write");
} else {
printf("write() wrote %d bytes\n", res);
}
/* Get a report from the device */
res = read(fd, buf, 16);
if (res < 0) {
perror("read");
} else {
printf("read() read %d bytes:\n\t", res);
for (i = 0; i < res; i++)
printf("%hhx ", buf[i]);
puts("\n");
}
close(fd);
return 0;
}
const char *
bus_str(int bus)
{
switch (bus) {
case BUS_USB:
return "USB";
break;
case BUS_HIL:
return "HIL";
break;
case BUS_BLUETOOTH:
return "Bluetooth";
break;
case BUS_VIRTUAL:
return "Virtual";
break;
default:
return "Other";
break;
}
}
| linux-master | samples/hidraw/hid-example.c |
/*
* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#define KBUILD_MODNAME "foo"
#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define MAX_NR_PORTS 65536
#define EINVAL 22
#define ENOENT 2
/* map #0 */
struct inner_a {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, int);
__uint(max_entries, MAX_NR_PORTS);
} port_a SEC(".maps");
/* map #1 */
struct inner_h {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, int);
__uint(max_entries, 1);
} port_h SEC(".maps");
/* map #2 */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, int);
__uint(max_entries, 1);
} reg_result_h SEC(".maps");
/* map #3 */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, int);
__uint(max_entries, 1);
} inline_result_h SEC(".maps");
/* map #4 */ /* Test case #0 */
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, MAX_NR_PORTS);
__uint(key_size, sizeof(u32));
__array(values, struct inner_a); /* use inner_a as inner map */
} a_of_port_a SEC(".maps");
/* map #5 */ /* Test case #1 */
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(key_size, sizeof(u32));
__array(values, struct inner_a); /* use inner_a as inner map */
} h_of_port_a SEC(".maps");
/* map #6 */ /* Test case #2 */
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(key_size, sizeof(u32));
__array(values, struct inner_h); /* use inner_h as inner map */
} h_of_port_h SEC(".maps");
static __always_inline int do_reg_lookup(void *inner_map, u32 port)
{
int *result;
result = bpf_map_lookup_elem(inner_map, &port);
return result ? *result : -ENOENT;
}
static __always_inline int do_inline_array_lookup(void *inner_map, u32 port)
{
int *result;
if (inner_map != &port_a)
return -EINVAL;
result = bpf_map_lookup_elem(&port_a, &port);
return result ? *result : -ENOENT;
}
static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
{
int *result;
if (inner_map != &port_h)
return -EINVAL;
result = bpf_map_lookup_elem(&port_h, &port);
return result ? *result : -ENOENT;
}
SEC("ksyscall/connect")
int BPF_KSYSCALL(trace_sys_connect, unsigned int fd, struct sockaddr_in6 *in6, int addrlen)
{
u16 test_case, port, dst6[8];
int ret, inline_ret, ret_key = 0;
u32 port_key;
void *outer_map, *inner_map;
bool inline_hash = false;
if (addrlen != sizeof(*in6))
return 0;
ret = bpf_probe_read_user(dst6, sizeof(dst6), &in6->sin6_addr);
if (ret) {
inline_ret = ret;
goto done;
}
if (dst6[0] != 0xdead || dst6[1] != 0xbeef)
return 0;
test_case = dst6[7];
ret = bpf_probe_read_user(&port, sizeof(port), &in6->sin6_port);
if (ret) {
inline_ret = ret;
goto done;
}
port_key = port;
ret = -ENOENT;
if (test_case == 0) {
outer_map = &a_of_port_a;
} else if (test_case == 1) {
outer_map = &h_of_port_a;
} else if (test_case == 2) {
outer_map = &h_of_port_h;
} else {
ret = __LINE__;
inline_ret = ret;
goto done;
}
inner_map = bpf_map_lookup_elem(outer_map, &port_key);
if (!inner_map) {
ret = __LINE__;
inline_ret = ret;
goto done;
}
ret = do_reg_lookup(inner_map, port_key);
if (test_case == 0 || test_case == 1)
inline_ret = do_inline_array_lookup(inner_map, port_key);
else
inline_ret = do_inline_hash_lookup(inner_map, port_key);
done:
bpf_map_update_elem(®_result_h, &ret_key, &ret, BPF_ANY);
bpf_map_update_elem(&inline_result_h, &ret_key, &inline_ret, BPF_ANY);
return 0;
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/test_map_in_map.bpf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Facebook
*/
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
static int map_fd[7];
#define PORT_A (map_fd[0])
#define PORT_H (map_fd[1])
#define REG_RESULT_H (map_fd[2])
#define INLINE_RESULT_H (map_fd[3])
#define A_OF_PORT_A (map_fd[4]) /* Test case #0 */
#define H_OF_PORT_A (map_fd[5]) /* Test case #1 */
#define H_OF_PORT_H (map_fd[6]) /* Test case #2 */
static const char * const test_names[] = {
"Array of Array",
"Hash of Array",
"Hash of Hash",
};
#define NR_TESTS ARRAY_SIZE(test_names)
static void check_map_id(int inner_map_fd, int map_in_map_fd, uint32_t key)
{
struct bpf_map_info info = {};
uint32_t info_len = sizeof(info);
int ret, id;
ret = bpf_map_get_info_by_fd(inner_map_fd, &info, &info_len);
assert(!ret);
ret = bpf_map_lookup_elem(map_in_map_fd, &key, &id);
assert(!ret);
assert(id == info.id);
}
static void populate_map(uint32_t port_key, int magic_result)
{
int ret;
ret = bpf_map_update_elem(PORT_A, &port_key, &magic_result, BPF_ANY);
assert(!ret);
ret = bpf_map_update_elem(PORT_H, &port_key, &magic_result,
BPF_NOEXIST);
assert(!ret);
ret = bpf_map_update_elem(A_OF_PORT_A, &port_key, &PORT_A, BPF_ANY);
assert(!ret);
check_map_id(PORT_A, A_OF_PORT_A, port_key);
ret = bpf_map_update_elem(H_OF_PORT_A, &port_key, &PORT_A, BPF_NOEXIST);
assert(!ret);
check_map_id(PORT_A, H_OF_PORT_A, port_key);
ret = bpf_map_update_elem(H_OF_PORT_H, &port_key, &PORT_H, BPF_NOEXIST);
assert(!ret);
check_map_id(PORT_H, H_OF_PORT_H, port_key);
}
static void test_map_in_map(void)
{
struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
uint32_t result_key = 0, port_key;
int result, inline_result;
int magic_result = 0xfaceb00c;
int ret;
int i;
port_key = rand() & 0x00FF;
populate_map(port_key, magic_result);
in6.sin6_addr.s6_addr16[0] = 0xdead;
in6.sin6_addr.s6_addr16[1] = 0xbeef;
in6.sin6_port = port_key;
for (i = 0; i < NR_TESTS; i++) {
printf("%s: ", test_names[i]);
in6.sin6_addr.s6_addr16[7] = i;
ret = connect(-1, (struct sockaddr *)&in6, sizeof(in6));
assert(ret == -1 && errno == EBADF);
ret = bpf_map_lookup_elem(REG_RESULT_H, &result_key, &result);
assert(!ret);
ret = bpf_map_lookup_elem(INLINE_RESULT_H, &result_key,
&inline_result);
assert(!ret);
if (result != magic_result || inline_result != magic_result) {
printf("Error. result:%d inline_result:%d\n",
result, inline_result);
exit(1);
}
bpf_map_delete_elem(REG_RESULT_H, &result_key);
bpf_map_delete_elem(INLINE_RESULT_H, &result_key);
printf("Pass\n");
}
}
int main(int argc, char **argv)
{
struct bpf_link *link = NULL;
struct bpf_program *prog;
struct bpf_object *obj;
char filename[256];
snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return 0;
}
prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
if (!prog) {
printf("finding a prog in obj file failed\n");
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a");
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h");
map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h");
map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h");
map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a");
map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a");
map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h");
if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 ||
map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
link = bpf_program__attach(prog);
if (libbpf_get_error(link)) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
link = NULL;
goto cleanup;
}
test_map_in_map();
cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
return 0;
}
| linux-master | samples/bpf/test_map_in_map_user.c |
// SPDX-License-Identifier: GPL-2.0
/* eBPF example program:
*
* - Loads eBPF program
*
* The eBPF program loads a filter from file and attaches the
* program to a cgroup using BPF_PROG_ATTACH
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <net/if.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_insn.h"
static int usage(const char *argv0)
{
printf("Usage: %s cg-path filter-path [filter-id]\n", argv0);
return EXIT_FAILURE;
}
int main(int argc, char **argv)
{
int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0;
const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2";
struct bpf_link *link = NULL;
struct bpf_program *progs[2];
struct bpf_program *prog;
struct bpf_object *obj;
if (argc < 3)
return usage(argv[0]);
if (argc > 3)
filter_id = atoi(argv[3]);
cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
if (cg_fd < 0) {
printf("Failed to open cgroup path: '%s'\n", strerror(errno));
return ret;
}
obj = bpf_object__open_file(argv[2], NULL);
if (libbpf_get_error(obj)) {
printf("ERROR: opening BPF object file failed\n");
return ret;
}
bpf_object__for_each_program(prog, obj) {
progs[prog_cnt] = prog;
prog_cnt++;
}
if (filter_id >= prog_cnt) {
printf("Invalid program id; program not found in file\n");
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
printf("ERROR: loading BPF object file failed\n");
goto cleanup;
}
link = bpf_program__attach_cgroup(progs[filter_id], cg_fd);
if (libbpf_get_error(link)) {
printf("ERROR: bpf_program__attach failed\n");
link = NULL;
goto cleanup;
}
err = bpf_link__pin(link, link_pin_path);
if (err < 0) {
printf("ERROR: bpf_link__pin failed: %d\n", err);
goto cleanup;
}
ret = EXIT_SUCCESS;
cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
return ret;
}
| linux-master | samples/bpf/test_cgrp2_sock2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Facebook
*/
#define _GNU_SOURCE
#include <linux/types.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sched.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <time.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#define min(a, b) ((a) < (b) ? (a) : (b))
#ifndef offsetof
# define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#endif
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
static int nr_cpus;
static unsigned long long *dist_keys;
static unsigned int dist_key_counts;
struct list_head {
struct list_head *next, *prev;
};
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
static inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
static inline void __list_del_entry(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del_entry(list);
list_add(list, head);
}
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
struct pfect_lru_node {
struct list_head list;
unsigned long long key;
};
struct pfect_lru {
struct list_head list;
struct pfect_lru_node *free_nodes;
unsigned int cur_size;
unsigned int lru_size;
unsigned int nr_unique;
unsigned int nr_misses;
unsigned int total;
int map_fd;
};
static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size,
unsigned int nr_possible_elems)
{
lru->map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
sizeof(unsigned long long),
sizeof(struct pfect_lru_node *),
nr_possible_elems, NULL);
assert(lru->map_fd != -1);
lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node));
assert(lru->free_nodes);
INIT_LIST_HEAD(&lru->list);
lru->cur_size = 0;
lru->lru_size = lru_size;
lru->nr_unique = lru->nr_misses = lru->total = 0;
}
static void pfect_lru_destroy(struct pfect_lru *lru)
{
close(lru->map_fd);
free(lru->free_nodes);
}
static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
unsigned long long key)
{
struct pfect_lru_node *node = NULL;
int seen = 0;
lru->total++;
if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
if (node) {
list_move(&node->list, &lru->list);
return 1;
}
seen = 1;
}
if (lru->cur_size < lru->lru_size) {
node = &lru->free_nodes[lru->cur_size++];
INIT_LIST_HEAD(&node->list);
} else {
struct pfect_lru_node *null_node = NULL;
node = list_last_entry(&lru->list,
struct pfect_lru_node,
list);
bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
}
node->key = key;
list_move(&node->list, &lru->list);
lru->nr_misses++;
if (seen) {
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
} else {
lru->nr_unique++;
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
}
return seen;
}
static unsigned int read_keys(const char *dist_file,
unsigned long long **keys)
{
struct stat fst;
unsigned long long *retkeys;
unsigned int counts = 0;
int dist_fd;
char *b, *l;
int i;
dist_fd = open(dist_file, 0);
assert(dist_fd != -1);
assert(fstat(dist_fd, &fst) == 0);
b = malloc(fst.st_size);
assert(b);
assert(read(dist_fd, b, fst.st_size) == fst.st_size);
close(dist_fd);
for (i = 0; i < fst.st_size; i++) {
if (b[i] == '\n')
counts++;
}
counts++; /* in case the last line has no \n */
retkeys = malloc(counts * sizeof(unsigned long long));
assert(retkeys);
counts = 0;
for (l = strtok(b, "\n"); l; l = strtok(NULL, "\n"))
retkeys[counts++] = strtoull(l, NULL, 10);
free(b);
*keys = retkeys;
return counts;
}
static int create_map(int map_type, int map_flags, unsigned int size)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.map_flags = map_flags,
);
int map_fd;
map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
sizeof(unsigned long long), size, &opts);
if (map_fd == -1)
perror("bpf_create_map");
return map_fd;
}
static int sched_next_online(int pid, int next_to_try)
{
cpu_set_t cpuset;
if (next_to_try == nr_cpus)
return -1;
while (next_to_try < nr_cpus) {
CPU_ZERO(&cpuset);
CPU_SET(next_to_try++, &cpuset);
if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
break;
}
return next_to_try;
}
static void run_parallel(unsigned int tasks, void (*fn)(int i, void *data),
void *data)
{
int next_sched_cpu = 0;
pid_t pid[tasks];
int i;
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
next_sched_cpu = sched_next_online(0, next_sched_cpu);
fn(i, data);
exit(0);
} else if (pid[i] == -1) {
printf("couldn't spawn #%d process\n", i);
exit(1);
}
/* It is mostly redundant and just allow the parent
* process to update next_shced_cpu for the next child
* process
*/
next_sched_cpu = sched_next_online(pid[i], next_sched_cpu);
}
for (i = 0; i < tasks; i++) {
int status;
assert(waitpid(pid[i], &status, 0) == pid[i]);
assert(status == 0);
}
}
static void do_test_lru_dist(int task, void *data)
{
unsigned int nr_misses = 0;
struct pfect_lru pfect_lru;
unsigned long long key, value = 1234;
unsigned int i;
unsigned int lru_map_fd = ((unsigned int *)data)[0];
unsigned int lru_size = ((unsigned int *)data)[1];
unsigned long long key_offset = task * dist_key_counts;
pfect_lru_init(&pfect_lru, lru_size, dist_key_counts);
for (i = 0; i < dist_key_counts; i++) {
key = dist_keys[i] + key_offset;
pfect_lru_lookup_or_insert(&pfect_lru, key);
if (!bpf_map_lookup_elem(lru_map_fd, &key, &value))
continue;
if (bpf_map_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
printf("bpf_map_update_elem(lru_map_fd, %llu): errno:%d\n",
key, errno);
assert(0);
}
nr_misses++;
}
printf(" task:%d BPF LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
task, pfect_lru.nr_unique, dist_key_counts, nr_misses,
dist_key_counts);
printf(" task:%d Perfect LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
task, pfect_lru.nr_unique, pfect_lru.total,
pfect_lru.nr_misses, pfect_lru.total);
pfect_lru_destroy(&pfect_lru);
close(lru_map_fd);
}
static void test_parallel_lru_dist(int map_type, int map_flags,
int nr_tasks, unsigned int lru_size)
{
int child_data[2];
int lru_map_fd;
printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
map_flags);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags,
nr_cpus * lru_size);
else
lru_map_fd = create_map(map_type, map_flags,
nr_tasks * lru_size);
assert(lru_map_fd != -1);
child_data[0] = lru_map_fd;
child_data[1] = lru_size;
run_parallel(nr_tasks, do_test_lru_dist, child_data);
close(lru_map_fd);
}
static void test_lru_loss0(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
unsigned int old_unused_losses = 0;
unsigned int new_unused_losses = 0;
unsigned int used_losses = 0;
int map_fd;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, 0) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
map_fd = create_map(map_type, map_flags, 900 * nr_cpus);
else
map_fd = create_map(map_type, map_flags, 900);
assert(map_fd != -1);
value[0] = 1234;
for (key = 1; key <= 1000; key++) {
int start_key, end_key;
assert(bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
start_key = 101;
end_key = min(key, 900);
while (start_key <= end_key) {
bpf_map_lookup_elem(map_fd, &start_key, value);
start_key++;
}
}
for (key = 1; key <= 1000; key++) {
if (bpf_map_lookup_elem(map_fd, &key, value)) {
if (key <= 100)
old_unused_losses++;
else if (key <= 900)
used_losses++;
else
new_unused_losses++;
}
}
close(map_fd);
printf("older-elem-losses:%d(/100) active-elem-losses:%d(/800) "
"newer-elem-losses:%d(/100)\n",
old_unused_losses, used_losses, new_unused_losses);
}
static void test_lru_loss1(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int map_fd;
unsigned int nr_losses = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, 0) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
map_fd = create_map(map_type, map_flags, 1000 * nr_cpus);
else
map_fd = create_map(map_type, map_flags, 1000);
assert(map_fd != -1);
value[0] = 1234;
for (key = 1; key <= 1000; key++)
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
for (key = 1; key <= 1000; key++) {
if (bpf_map_lookup_elem(map_fd, &key, value))
nr_losses++;
}
close(map_fd);
printf("nr_losses:%d(/1000)\n", nr_losses);
}
static void do_test_parallel_lru_loss(int task, void *data)
{
const unsigned int nr_stable_elems = 1000;
const unsigned int nr_repeats = 100000;
int map_fd = *(int *)data;
unsigned long long stable_base;
unsigned long long key, value[nr_cpus];
unsigned long long next_ins_key;
unsigned int nr_losses = 0;
unsigned int i;
stable_base = task * nr_repeats * 2 + 1;
next_ins_key = stable_base;
value[0] = 1234;
for (i = 0; i < nr_stable_elems; i++) {
assert(bpf_map_update_elem(map_fd, &next_ins_key, value,
BPF_NOEXIST) == 0);
next_ins_key++;
}
for (i = 0; i < nr_repeats; i++) {
int rn;
rn = rand();
if (rn % 10) {
key = rn % nr_stable_elems + stable_base;
bpf_map_lookup_elem(map_fd, &key, value);
} else {
bpf_map_update_elem(map_fd, &next_ins_key, value,
BPF_NOEXIST);
next_ins_key++;
}
}
key = stable_base;
for (i = 0; i < nr_stable_elems; i++) {
if (bpf_map_lookup_elem(map_fd, &key, value))
nr_losses++;
key++;
}
printf(" task:%d nr_losses:%u\n", task, nr_losses);
}
static void test_parallel_lru_loss(int map_type, int map_flags, int nr_tasks)
{
int map_fd;
printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
map_flags);
/* Give 20% more than the active working set */
if (map_flags & BPF_F_NO_COMMON_LRU)
map_fd = create_map(map_type, map_flags,
nr_cpus * (1000 + 200));
else
map_fd = create_map(map_type, map_flags,
nr_tasks * (1000 + 200));
assert(map_fd != -1);
run_parallel(nr_tasks, do_test_parallel_lru_loss, &map_fd);
close(map_fd);
}
int main(int argc, char **argv)
{
int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
const char *dist_file;
int nr_tasks = 1;
int lru_size;
int f;
if (argc < 4) {
printf("Usage: %s <dist-file> <lru-size> <nr-tasks>\n",
argv[0]);
return -1;
}
dist_file = argv[1];
lru_size = atoi(argv[2]);
nr_tasks = atoi(argv[3]);
setbuf(stdout, NULL);
srand(time(NULL));
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
nr_tasks = min(nr_tasks, nr_cpus);
dist_key_counts = read_keys(dist_file, &dist_keys);
if (!dist_key_counts) {
printf("%s has no key\n", dist_file);
return -1;
}
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
test_lru_loss0(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
test_lru_loss1(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
test_parallel_lru_loss(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
nr_tasks);
test_parallel_lru_dist(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
nr_tasks, lru_size);
printf("\n");
}
free(dist_keys);
return 0;
}
| linux-master | samples/bpf/test_lru_dist.c |
/* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/ptrace.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/bpf_perf_event.h>
#include <uapi/linux/perf_event.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct key_t {
char comm[TASK_COMM_LEN];
u32 kernstack;
u32 userstack;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct key_t);
__type(value, u64);
__uint(max_entries, 10000);
} counts SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(key_size, sizeof(u32));
__uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
__uint(max_entries, 10000);
} stackmap SEC(".maps");
#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
SEC("perf_event")
int bpf_prog1(struct bpf_perf_event_data *ctx)
{
char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu";
char time_fmt2[] = "Get Time Failed, ErrCode: %d";
char addr_fmt[] = "Address recorded on event: %llx";
char fmt[] = "CPU-%d period %lld ip %llx";
u32 cpu = bpf_get_smp_processor_id();
struct bpf_perf_event_value value_buf;
struct key_t key;
u64 *val, one = 1;
int ret;
if (ctx->sample_period < 10000)
/* ignore warmup */
return 0;
bpf_get_current_comm(&key.comm, sizeof(key.comm));
key.kernstack = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS);
key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
PT_REGS_IP(&ctx->regs));
return 0;
}
ret = bpf_perf_prog_read_value(ctx, (void *)&value_buf, sizeof(struct bpf_perf_event_value));
if (!ret)
bpf_trace_printk(time_fmt1, sizeof(time_fmt1), value_buf.enabled, value_buf.running);
else
bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret);
if (ctx->addr != 0)
bpf_trace_printk(addr_fmt, sizeof(addr_fmt), ctx->addr);
val = bpf_map_lookup_elem(&counts, &key);
if (val)
(*val)++;
else
bpf_map_update_elem(&counts, &key, &one, BPF_NOEXIST);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/trace_event_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#define MAX_INDEX 64
#define MAX_STARS 38
/* my_map, my_hist_map */
static int map_fd[2];
static void stars(char *str, long val, long max, int width)
{
int i;
for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
str[i] = '*';
if (val > max)
str[i - 1] = '+';
str[i] = '\0';
}
struct task {
char comm[16];
__u64 pid_tgid;
__u64 uid_gid;
};
struct hist_key {
struct task t;
__u32 index;
};
#define SIZE sizeof(struct task)
static void print_hist_for_pid(int fd, void *task)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct hist_key key = {}, next_key;
long values[nr_cpus];
char starstr[MAX_STARS];
long value;
long data[MAX_INDEX] = {};
int max_ind = -1;
long max_value = 0;
int i, ind;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (memcmp(&next_key, task, SIZE)) {
key = next_key;
continue;
}
bpf_map_lookup_elem(fd, &next_key, values);
value = 0;
for (i = 0; i < nr_cpus; i++)
value += values[i];
ind = next_key.index;
data[ind] = value;
if (value && ind > max_ind)
max_ind = ind;
if (value > max_value)
max_value = value;
key = next_key;
}
printf(" syscall write() stats\n");
printf(" byte_size : count distribution\n");
for (i = 1; i <= max_ind + 1; i++) {
stars(starstr, data[i - 1], max_value, MAX_STARS);
printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
(1l << i) >> 1, (1l << i) - 1, data[i - 1],
MAX_STARS, starstr);
}
}
static void print_hist(int fd)
{
struct hist_key key = {}, next_key;
static struct task tasks[1024];
int task_cnt = 0;
int i;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
int found = 0;
for (i = 0; i < task_cnt; i++)
if (memcmp(&tasks[i], &next_key, SIZE) == 0)
found = 1;
if (!found)
memcpy(&tasks[task_cnt++], &next_key, SIZE);
key = next_key;
}
for (i = 0; i < task_cnt; i++) {
printf("\npid %d cmd %s uid %d\n",
(__u32) tasks[i].pid_tgid,
tasks[i].comm,
(__u32) tasks[i].uid_gid);
print_hist_for_pid(fd, &tasks[i]);
}
}
static void int_exit(int sig)
{
print_hist(map_fd[1]);
exit(0);
}
int main(int ac, char **argv)
{
long key, next_key, value;
struct bpf_link *links[2];
struct bpf_program *prog;
struct bpf_object *obj;
char filename[256];
int i, j = 0;
FILE *f;
snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return 0;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "my_map");
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "my_hist_map");
if (map_fd[0] < 0 || map_fd[1] < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
/* start 'ping' in the background to have some kfree_skb_reason
* events */
f = popen("ping -4 -c5 localhost", "r");
(void) f;
/* start 'dd' in the background to have plenty of 'write' syscalls */
f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r");
(void) f;
bpf_object__for_each_program(prog, obj) {
links[j] = bpf_program__attach(prog);
if (libbpf_get_error(links[j])) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
links[j] = NULL;
goto cleanup;
}
j++;
}
for (i = 0; i < 5; i++) {
key = 0;
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
bpf_map_lookup_elem(map_fd[0], &next_key, &value);
printf("location 0x%lx count %ld\n", next_key, value);
key = next_key;
}
if (key)
printf("\n");
sleep(1);
}
print_hist(map_fd[1]);
cleanup:
for (j--; j >= 0; j--)
bpf_link__destroy(links[j]);
bpf_object__close(obj);
return 0;
}
| linux-master | samples/bpf/tracex2_user.c |
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/in.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/filter.h>
#include <uapi/linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
/* compiler workaround */
#define _htonl __builtin_bswap32
static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
{
bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
}
#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
{
__u8 old_tos = load_byte(skb, TOS_OFF);
bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
}
#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
#define IS_PSEUDO 0x10
static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
{
__u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
}
#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
{
__u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
}
SEC("classifier")
int bpf_prog1(struct __sk_buff *skb)
{
__u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
long *value;
if (proto == IPPROTO_TCP) {
set_ip_tos(skb, 8);
set_tcp_ip_src(skb, 0xA010101);
set_tcp_dest_port(skb, 5001);
}
return 0;
}
SEC("redirect_xmit")
int _redirect_xmit(struct __sk_buff *skb)
{
return bpf_redirect(skb->ifindex + 1, 0);
}
SEC("redirect_recv")
int _redirect_recv(struct __sk_buff *skb)
{
return bpf_redirect(skb->ifindex + 1, 1);
}
SEC("clone_redirect_xmit")
int _clone_redirect_xmit(struct __sk_buff *skb)
{
bpf_clone_redirect(skb, skb->ifindex + 1, 0);
return TC_ACT_SHOT;
}
SEC("clone_redirect_recv")
int _clone_redirect_recv(struct __sk_buff *skb)
{
bpf_clone_redirect(skb, skb->ifindex + 1, 1);
return TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/tcbpf1_kern.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* ibumad BPF sample user side
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* Copyright(c) 2018 Ira Weiny, Intel Corporation
*/
#include <linux/bpf.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <limits.h>
#include <getopt.h>
#include <net/if.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#include <bpf/libbpf.h>
static struct bpf_link *tp_links[3];
static struct bpf_object *obj;
static int map_fd[2];
static int tp_cnt;
static void dump_counts(int fd)
{
__u32 key;
__u64 value;
for (key = 0; key < 256; key++) {
if (bpf_map_lookup_elem(fd, &key, &value)) {
printf("failed to read key %u\n", key);
continue;
}
if (value)
printf("0x%02x : %llu\n", key, value);
}
}
static void dump_all_counts(void)
{
printf("Read 'Class : count'\n");
dump_counts(map_fd[0]);
printf("Write 'Class : count'\n");
dump_counts(map_fd[1]);
}
static void dump_exit(int sig)
{
dump_all_counts();
/* Detach tracepoints */
while (tp_cnt)
bpf_link__destroy(tp_links[--tp_cnt]);
bpf_object__close(obj);
exit(0);
}
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h'},
{"delay", required_argument, NULL, 'd'},
};
static void usage(char *cmd)
{
printf("eBPF test program to count packets from various IP addresses\n"
"Usage: %s <options>\n"
" --help, -h this menu\n"
" --delay, -d <delay> wait <delay> sec between prints [1 - 1000000]\n"
, cmd
);
}
int main(int argc, char **argv)
{
struct bpf_program *prog;
unsigned long delay = 5;
char filename[256];
int longindex = 0;
int opt, err = -1;
while ((opt = getopt_long(argc, argv, "hd:rSw",
long_options, &longindex)) != -1) {
switch (opt) {
case 'd':
delay = strtoul(optarg, NULL, 0);
if (delay == ULONG_MAX || delay < 0 ||
delay > 1000000) {
fprintf(stderr, "ERROR: invalid delay : %s\n",
optarg);
usage(argv[0]);
return 1;
}
break;
default:
case 'h':
usage(argv[0]);
return 1;
}
}
/* Do one final dump when exiting */
signal(SIGINT, dump_exit);
signal(SIGTERM, dump_exit);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return err;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "read_count");
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "write_count");
if (map_fd[0] < 0 || map_fd[1] < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
bpf_object__for_each_program(prog, obj) {
tp_links[tp_cnt] = bpf_program__attach(prog);
if (libbpf_get_error(tp_links[tp_cnt])) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
tp_links[tp_cnt] = NULL;
goto cleanup;
}
tp_cnt++;
}
while (1) {
sleep(delay);
dump_all_counts();
}
err = 0;
cleanup:
/* Detach tracepoints */
while (tp_cnt)
bpf_link__destroy(tp_links[--tp_cnt]);
bpf_object__close(obj);
return err;
}
| linux-master | samples/bpf/ibumad_user.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-18 David Ahern <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/bpf.h>
#include <linux/if_link.h>
#include <linux/limits.h>
#include <net/if.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <libgen.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int do_attach(int idx, int prog_fd, int map_fd, const char *name)
{
int err;
err = bpf_xdp_attach(idx, prog_fd, xdp_flags, NULL);
if (err < 0) {
printf("ERROR: failed to attach program to %s\n", name);
return err;
}
/* Adding ifindex as a possible egress TX port */
err = bpf_map_update_elem(map_fd, &idx, &idx, 0);
if (err)
printf("ERROR: failed using device %s as TX-port\n", name);
return err;
}
static int do_detach(int ifindex, const char *ifname, const char *app_name)
{
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
struct bpf_prog_info prog_info = {};
char prog_name[BPF_OBJ_NAME_LEN];
__u32 info_len, curr_prog_id;
int prog_fd;
int err = 1;
if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
printf("ERROR: bpf_xdp_query_id failed (%s)\n",
strerror(errno));
return err;
}
if (!curr_prog_id) {
printf("ERROR: flags(0x%x) xdp prog is not attached to %s\n",
xdp_flags, ifname);
return err;
}
info_len = sizeof(prog_info);
prog_fd = bpf_prog_get_fd_by_id(curr_prog_id);
if (prog_fd < 0) {
printf("ERROR: bpf_prog_get_fd_by_id failed (%s)\n",
strerror(errno));
return prog_fd;
}
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (err) {
printf("ERROR: bpf_prog_get_info_by_fd failed (%s)\n",
strerror(errno));
goto close_out;
}
snprintf(prog_name, sizeof(prog_name), "%s_prog", app_name);
prog_name[BPF_OBJ_NAME_LEN - 1] = '\0';
if (strcmp(prog_info.name, prog_name)) {
printf("ERROR: %s isn't attached to %s\n", app_name, ifname);
err = 1;
goto close_out;
}
opts.old_prog_fd = prog_fd;
err = bpf_xdp_detach(ifindex, xdp_flags, &opts);
if (err < 0)
printf("ERROR: failed to detach program from %s (%s)\n",
ifname, strerror(errno));
/* TODO: Remember to cleanup map, when adding use of shared map
* bpf_map_delete_elem((map_fd, &idx);
*/
close_out:
close(prog_fd);
return err;
}
static void usage(const char *prog)
{
fprintf(stderr,
"usage: %s [OPTS] interface-list\n"
"\nOPTS:\n"
" -d detach program\n"
" -S use skb-mode\n"
" -F force loading prog\n"
" -D direct table lookups (skip fib rules)\n",
prog);
}
int main(int argc, char **argv)
{
const char *prog_name = "xdp_fwd";
struct bpf_program *prog = NULL;
struct bpf_program *pos;
const char *sec_name;
int prog_fd = -1, map_fd = -1;
char filename[PATH_MAX];
struct bpf_object *obj;
int opt, i, idx, err;
int attach = 1;
int ret = 0;
while ((opt = getopt(argc, argv, ":dDSF")) != -1) {
switch (opt) {
case 'd':
attach = 0;
break;
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
case 'F':
xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
break;
case 'D':
prog_name = "xdp_fwd_direct";
break;
default:
usage(basename(argv[0]));
return 1;
}
}
if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
xdp_flags |= XDP_FLAGS_DRV_MODE;
if (optind == argc) {
usage(basename(argv[0]));
return 1;
}
if (attach) {
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (access(filename, O_RDONLY) < 0) {
printf("error accessing file %s: %s\n",
filename, strerror(errno));
return 1;
}
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj))
return 1;
prog = bpf_object__next_program(obj, NULL);
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
err = bpf_object__load(obj);
if (err) {
printf("Does kernel support devmap lookup?\n");
/* If not, the error message will be:
* "cannot pass map_type 14 into func bpf_map_lookup_elem#1"
*/
return 1;
}
bpf_object__for_each_program(pos, obj) {
sec_name = bpf_program__section_name(pos);
if (sec_name && !strcmp(sec_name, prog_name)) {
prog = pos;
break;
}
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
printf("program not found: %s\n", strerror(prog_fd));
return 1;
}
map_fd = bpf_map__fd(bpf_object__find_map_by_name(obj,
"xdp_tx_ports"));
if (map_fd < 0) {
printf("map not found: %s\n", strerror(map_fd));
return 1;
}
}
for (i = optind; i < argc; ++i) {
idx = if_nametoindex(argv[i]);
if (!idx)
idx = strtoul(argv[i], NULL, 0);
if (!idx) {
fprintf(stderr, "Invalid arg\n");
return 1;
}
if (!attach) {
err = do_detach(idx, argv[i], prog_name);
if (err)
ret = err;
} else {
err = do_attach(idx, prog_fd, map_fd, argv[i]);
if (err)
ret = err;
}
}
return ret;
}
| linux-master | samples/bpf/xdp_fwd_user.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* BPF program to set SYN and SYN-ACK RTOs to 10ms when using IPv6 addresses
* and the first 5.5 bytes of the IPv6 addresses are the same (in this example
* that means both hosts are in the same datacenter).
*
* Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define DEBUG 1
SEC("sockops")
int bpf_synrto(struct bpf_sock_ops *skops)
{
int rv = -1;
int op;
/* For testing purposes, only execute rest of BPF program
* if neither port numberis 55601
*/
if (bpf_ntohl(skops->remote_port) != 55601 &&
skops->local_port != 55601) {
skops->reply = -1;
return 1;
}
op = (int) skops->op;
#ifdef DEBUG
bpf_printk("BPF command: %d\n", op);
#endif
/* Check for TIMEOUT_INIT operation and IPv6 addresses */
if (op == BPF_SOCK_OPS_TIMEOUT_INIT &&
skops->family == AF_INET6) {
/* If the first 5.5 bytes of the IPv6 address are the same
* then both hosts are in the same datacenter
* so use an RTO of 10ms
*/
if (skops->local_ip6[0] == skops->remote_ip6[0] &&
(bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
(bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000))
rv = 10;
}
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/tcp_synrto_kern.c |
/* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/bpf.h>
#include <linux/if_link.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <net/if.h>
#include <arpa/inet.h>
#include <netinet/ether.h>
#include <unistd.h>
#include <time.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#define STATS_INTERVAL_S 2U
#define MAX_PCKT_SIZE 600
static int ifindex = -1;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static __u32 prog_id;
static void int_exit(int sig)
{
__u32 curr_prog_id = 0;
if (ifindex > -1) {
if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
printf("bpf_xdp_query_id failed\n");
exit(1);
}
if (prog_id == curr_prog_id)
bpf_xdp_detach(ifindex, xdp_flags, NULL);
else if (!curr_prog_id)
printf("couldn't find a prog id on a given iface\n");
else
printf("program on interface changed, not removing\n");
}
exit(0);
}
/* simple "icmp packet too big sent" counter
*/
static void poll_stats(unsigned int map_fd, unsigned int kill_after_s)
{
time_t started_at = time(NULL);
__u64 value = 0;
int key = 0;
while (!kill_after_s || time(NULL) - started_at <= kill_after_s) {
sleep(STATS_INTERVAL_S);
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
printf("icmp \"packet too big\" sent: %10llu pkts\n", value);
}
}
static void usage(const char *cmd)
{
printf("Start a XDP prog which send ICMP \"packet too big\" \n"
"messages if ingress packet is bigger then MAX_SIZE bytes\n");
printf("Usage: %s [...]\n", cmd);
printf(" -i <ifname|ifindex> Interface\n");
printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
printf(" -P <MAX_PCKT_SIZE> Default: %u\n", MAX_PCKT_SIZE);
printf(" -S use skb-mode\n");
printf(" -N enforce native mode\n");
printf(" -F force loading prog\n");
printf(" -h Display this help\n");
}
int main(int argc, char **argv)
{
unsigned char opt_flags[256] = {};
const char *optstr = "i:T:P:SNFh";
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
unsigned int kill_after_s = 0;
int i, prog_fd, map_fd, opt;
struct bpf_program *prog;
struct bpf_object *obj;
__u32 max_pckt_size = 0;
__u32 key = 0;
char filename[256];
int err;
for (i = 0; i < strlen(optstr); i++)
if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z')
opt_flags[(unsigned char)optstr[i]] = 1;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 'i':
ifindex = if_nametoindex(optarg);
if (!ifindex)
ifindex = atoi(optarg);
break;
case 'T':
kill_after_s = atoi(optarg);
break;
case 'P':
max_pckt_size = atoi(optarg);
break;
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
case 'N':
/* default, set below */
break;
case 'F':
xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
break;
default:
usage(argv[0]);
return 1;
}
opt_flags[opt] = 0;
}
if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
xdp_flags |= XDP_FLAGS_DRV_MODE;
for (i = 0; i < strlen(optstr); i++) {
if (opt_flags[(unsigned int)optstr[i]]) {
fprintf(stderr, "Missing argument -%c\n", optstr[i]);
usage(argv[0]);
return 1;
}
}
if (!ifindex) {
fprintf(stderr, "Invalid ifname\n");
return 1;
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj))
return 1;
prog = bpf_object__next_program(obj, NULL);
bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
err = bpf_object__load(obj);
if (err)
return 1;
prog_fd = bpf_program__fd(prog);
/* static global var 'max_pcktsz' is accessible from .data section */
if (max_pckt_size) {
map_fd = bpf_object__find_map_fd_by_name(obj, "xdp_adju.data");
if (map_fd < 0) {
printf("finding a max_pcktsz map in obj file failed\n");
return 1;
}
bpf_map_update_elem(map_fd, &key, &max_pckt_size, BPF_ANY);
}
/* fetch icmpcnt map */
map_fd = bpf_object__find_map_fd_by_name(obj, "icmpcnt");
if (map_fd < 0) {
printf("finding a icmpcnt map in obj file failed\n");
return 1;
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
printf("link set xdp fd failed\n");
return 1;
}
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err) {
printf("can't get prog info - %s\n", strerror(errno));
return 1;
}
prog_id = info.id;
poll_stats(map_fd, kill_after_s);
int_exit(0);
return 0;
}
| linux-master | samples/bpf/xdp_adjust_tail_user.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/perf_event.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "perf-sys.h"
#include "trace_helpers.h"
static struct bpf_program *progs[2];
static struct bpf_link *links[2];
#define CHECK_PERROR_RET(condition) ({ \
int __ret = !!(condition); \
if (__ret) { \
printf("FAIL: %s:\n", __func__); \
perror(" "); \
return -1; \
} \
})
#define CHECK_AND_RET(condition) ({ \
int __ret = !!(condition); \
if (__ret) \
return -1; \
})
static __u64 ptr_to_u64(void *ptr)
{
return (__u64) (unsigned long) ptr;
}
#define PMU_TYPE_FILE "/sys/bus/event_source/devices/%s/type"
static int bpf_find_probe_type(const char *event_type)
{
char buf[256];
int fd, ret;
ret = snprintf(buf, sizeof(buf), PMU_TYPE_FILE, event_type);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
fd = open(buf, O_RDONLY);
CHECK_PERROR_RET(fd < 0);
ret = read(fd, buf, sizeof(buf));
close(fd);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
errno = 0;
ret = (int)strtol(buf, NULL, 10);
CHECK_PERROR_RET(errno);
return ret;
}
#define PMU_RETPROBE_FILE "/sys/bus/event_source/devices/%s/format/retprobe"
static int bpf_get_retprobe_bit(const char *event_type)
{
char buf[256];
int fd, ret;
ret = snprintf(buf, sizeof(buf), PMU_RETPROBE_FILE, event_type);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
fd = open(buf, O_RDONLY);
CHECK_PERROR_RET(fd < 0);
ret = read(fd, buf, sizeof(buf));
close(fd);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
CHECK_PERROR_RET(strlen(buf) < strlen("config:"));
errno = 0;
ret = (int)strtol(buf + strlen("config:"), NULL, 10);
CHECK_PERROR_RET(errno);
return ret;
}
static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
__u32 expected_fd_type)
{
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
int err, event_fd;
char buf[256];
len = sizeof(buf);
event_fd = bpf_link__fd(links[link_idx]);
err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
__func__, link_idx, fn_name);
perror(" :");
return -1;
}
if (strcmp(buf, fn_name) != 0 ||
fd_type != expected_fd_type ||
probe_offset != 0x0 || probe_addr != 0x0) {
printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
link_idx);
printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
" probe_addr: 0x%llx\n",
buf, fd_type, probe_offset, probe_addr);
return -1;
}
return 0;
}
static int test_nondebug_fs_kuprobe_common(const char *event_type,
const char *name, __u64 offset, __u64 addr, bool is_return,
char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr)
{
int is_return_bit = bpf_get_retprobe_bit(event_type);
int type = bpf_find_probe_type(event_type);
struct perf_event_attr attr = {};
struct bpf_link *link;
int fd, err = -1;
if (type < 0 || is_return_bit < 0) {
printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
__func__, type, is_return_bit);
return err;
}
attr.sample_period = 1;
attr.wakeup_events = 1;
if (is_return)
attr.config |= 1 << is_return_bit;
if (name) {
attr.config1 = ptr_to_u64((void *)name);
attr.config2 = offset;
} else {
attr.config1 = 0;
attr.config2 = addr;
}
attr.size = sizeof(attr);
attr.type = type;
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
link = bpf_program__attach_perf_event(progs[0], fd);
if (libbpf_get_error(link)) {
printf("ERROR: bpf_program__attach_perf_event failed\n");
link = NULL;
close(fd);
goto cleanup;
}
CHECK_PERROR_RET(bpf_task_fd_query(getpid(), fd, 0, buf, buf_len,
prog_id, fd_type, probe_offset, probe_addr) < 0);
err = 0;
cleanup:
bpf_link__destroy(link);
return err;
}
static int test_nondebug_fs_probe(const char *event_type, const char *name,
__u64 offset, __u64 addr, bool is_return,
__u32 expected_fd_type,
__u32 expected_ret_fd_type,
char *buf, __u32 buf_len)
{
__u64 probe_offset, probe_addr;
__u32 prog_id, fd_type;
int err;
err = test_nondebug_fs_kuprobe_common(event_type, name,
offset, addr, is_return,
buf, &buf_len, &prog_id,
&fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, "
"for name %s, offset 0x%llx, addr 0x%llx, is_return %d\n",
__func__, name ? name : "", offset, addr, is_return);
perror(" :");
return -1;
}
if ((is_return && fd_type != expected_ret_fd_type) ||
(!is_return && fd_type != expected_fd_type)) {
printf("FAIL: %s, incorrect fd_type %u\n",
__func__, fd_type);
return -1;
}
if (name) {
if (strcmp(name, buf) != 0) {
printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
return -1;
}
if (probe_offset != offset) {
printf("FAIL: %s, incorrect probe_offset 0x%llx\n",
__func__, probe_offset);
return -1;
}
} else {
if (buf_len != 0) {
printf("FAIL: %s, incorrect buf %p\n",
__func__, buf);
return -1;
}
if (probe_addr != addr) {
printf("FAIL: %s, incorrect probe_addr 0x%llx\n",
__func__, probe_addr);
return -1;
}
}
return 0;
}
static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
{
char buf[256], event_alias[sizeof("test_1234567890")];
const char *event_type = "uprobe";
struct perf_event_attr attr = {};
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
int err = -1, res, kfd, efd;
struct bpf_link *link;
ssize_t bytes;
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/%s_events",
event_type);
kfd = open(buf, O_WRONLY | O_TRUNC, 0);
CHECK_PERROR_RET(kfd < 0);
res = snprintf(event_alias, sizeof(event_alias), "test_%d", getpid());
CHECK_PERROR_RET(res < 0 || res >= sizeof(event_alias));
res = snprintf(buf, sizeof(buf), "%c:%ss/%s %s:0x%lx",
is_return ? 'r' : 'p', event_type, event_alias,
binary_path, offset);
CHECK_PERROR_RET(res < 0 || res >= sizeof(buf));
CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0);
close(kfd);
kfd = -1;
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/events/%ss/%s/id",
event_type, event_alias);
efd = open(buf, O_RDONLY, 0);
CHECK_PERROR_RET(efd < 0);
bytes = read(efd, buf, sizeof(buf));
CHECK_PERROR_RET(bytes <= 0 || bytes >= sizeof(buf));
close(efd);
buf[bytes] = '\0';
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_period = 1;
attr.wakeup_events = 1;
kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
link = bpf_program__attach_perf_event(progs[0], kfd);
if (libbpf_get_error(link)) {
printf("ERROR: bpf_program__attach_perf_event failed\n");
link = NULL;
close(kfd);
goto cleanup;
}
len = sizeof(buf);
err = bpf_task_fd_query(getpid(), kfd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, binary_path %s\n", __func__, binary_path);
perror(" :");
return -1;
}
if ((is_return && fd_type != BPF_FD_TYPE_URETPROBE) ||
(!is_return && fd_type != BPF_FD_TYPE_UPROBE)) {
printf("FAIL: %s, incorrect fd_type %u\n", __func__,
fd_type);
return -1;
}
if (strcmp(binary_path, buf) != 0) {
printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
return -1;
}
if (probe_offset != offset) {
printf("FAIL: %s, incorrect probe_offset 0x%llx\n", __func__,
probe_offset);
return -1;
}
err = 0;
cleanup:
bpf_link__destroy(link);
return err;
}
int main(int argc, char **argv)
{
extern char __executable_start;
char filename[256], buf[256];
__u64 uprobe_file_offset;
struct bpf_program *prog;
struct bpf_object *obj;
int i = 0, err = -1;
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
return err;
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return err;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
bpf_object__for_each_program(prog, obj) {
progs[i] = prog;
links[i] = bpf_program__attach(progs[i]);
if (libbpf_get_error(links[i])) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
links[i] = NULL;
goto cleanup;
}
i++;
}
/* test two functions in the corresponding *_kern.c file */
CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
BPF_FD_TYPE_KPROBE));
CHECK_AND_RET(test_debug_fs_kprobe(1, "__blk_account_io_done",
BPF_FD_TYPE_KRETPROBE));
/* test nondebug fs kprobe */
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
false, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
#ifdef __x86_64__
/* set a kprobe on "bpf_check + 0x5", which is x64 specific */
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x5, 0x0,
false, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
#endif
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
true, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), false,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), false,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
NULL, 0));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), true,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), true,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
0, 0));
/* test nondebug fs uprobe */
/* the calculation of uprobe file offset is based on gcc 7.3.1 on x64
* and the default linker script, which defines __executable_start as
* the start of the .text section. The calculation could be different
* on different systems with different compilers. The right way is
* to parse the ELF file. We took a shortcut here.
*/
uprobe_file_offset = (unsigned long)main - (unsigned long)&__executable_start;
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, false,
BPF_FD_TYPE_UPROBE,
BPF_FD_TYPE_URETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, true,
BPF_FD_TYPE_UPROBE,
BPF_FD_TYPE_URETPROBE,
buf, sizeof(buf)));
/* test debug fs uprobe */
CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
false));
CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
true));
err = 0;
cleanup:
for (i--; i >= 0; i--)
bpf_link__destroy(links[i]);
bpf_object__close(obj);
return err;
}
| linux-master | samples/bpf/task_fd_query_user.c |
/* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#define KBUILD_MODNAME "foo"
#include "vmlinux.h"
#include "net_shared.h"
#include <bpf/bpf_helpers.h>
/* copy of 'struct ethhdr' without __packed */
struct eth_hdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
unsigned short h_proto;
};
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(pinning, LIBBPF_PIN_BY_NAME);
__uint(max_entries, 1);
} test_cgrp2_array_pin SEC(".maps");
SEC("filter")
int handle_egress(struct __sk_buff *skb)
{
void *data = (void *)(long)skb->data;
struct eth_hdr *eth = data;
struct ipv6hdr *ip6h = data + sizeof(*eth);
void *data_end = (void *)(long)skb->data_end;
char dont_care_msg[] = "dont care %04x %d\n";
char pass_msg[] = "pass\n";
char reject_msg[] = "reject\n";
/* single length check */
if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
return TC_ACT_OK;
if (eth->h_proto != bpf_htons(ETH_P_IPV6) ||
ip6h->nexthdr != IPPROTO_ICMPV6) {
bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
eth->h_proto, ip6h->nexthdr);
return TC_ACT_OK;
} else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
bpf_trace_printk(pass_msg, sizeof(pass_msg));
return TC_ACT_OK;
} else {
bpf_trace_printk(reject_msg, sizeof(reject_msg));
return TC_ACT_SHOT;
}
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/test_cgrp2_tc.bpf.c |
#include <linux/unistd.h>
#include <linux/bpf.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_insn.h"
#include "sock_example.h"
#include "bpf_util.h"
#define BPF_F_PIN (1 << 0)
#define BPF_F_GET (1 << 1)
#define BPF_F_PIN_GET (BPF_F_PIN | BPF_F_GET)
#define BPF_F_KEY (1 << 2)
#define BPF_F_VAL (1 << 3)
#define BPF_F_KEY_VAL (BPF_F_KEY | BPF_F_VAL)
#define BPF_M_UNSPEC 0
#define BPF_M_MAP 1
#define BPF_M_PROG 2
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static void usage(void)
{
printf("Usage: fds_example [...]\n");
printf(" -F <file> File to pin/get object\n");
printf(" -P |- pin object\n");
printf(" -G `- get object\n");
printf(" -m eBPF map mode\n");
printf(" -k <key> |- map key\n");
printf(" -v <value> `- map value\n");
printf(" -p eBPF prog mode\n");
printf(" -o <object> `- object file\n");
printf(" -h Display this help.\n");
}
static int bpf_prog_create(const char *object)
{
static struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(insns);
struct bpf_object *obj;
int err;
if (object) {
obj = bpf_object__open_file(object, NULL);
assert(!libbpf_get_error(obj));
err = bpf_object__load(obj);
assert(!err);
return bpf_program__fd(bpf_object__next_program(obj, NULL));
} else {
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = bpf_log_buf,
.log_size = BPF_LOG_BUF_SIZE,
);
return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
insns, insns_cnt, &opts);
}
}
static int bpf_do_map(const char *file, uint32_t flags, uint32_t key,
uint32_t value)
{
int fd, ret;
if (flags & BPF_F_PIN) {
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(uint32_t),
sizeof(uint32_t), 1024, NULL);
printf("bpf: map fd:%d (%s)\n", fd, strerror(errno));
assert(fd > 0);
ret = bpf_obj_pin(fd, file);
printf("bpf: pin ret:(%d,%s)\n", ret, strerror(errno));
assert(ret == 0);
} else {
fd = bpf_obj_get(file);
printf("bpf: get fd:%d (%s)\n", fd, strerror(errno));
assert(fd > 0);
}
if ((flags & BPF_F_KEY_VAL) == BPF_F_KEY_VAL) {
ret = bpf_map_update_elem(fd, &key, &value, 0);
printf("bpf: fd:%d u->(%u:%u) ret:(%d,%s)\n", fd, key, value,
ret, strerror(errno));
assert(ret == 0);
} else if (flags & BPF_F_KEY) {
ret = bpf_map_lookup_elem(fd, &key, &value);
printf("bpf: fd:%d l->(%u):%u ret:(%d,%s)\n", fd, key, value,
ret, strerror(errno));
assert(ret == 0);
}
return 0;
}
static int bpf_do_prog(const char *file, uint32_t flags, const char *object)
{
int fd, sock, ret;
if (flags & BPF_F_PIN) {
fd = bpf_prog_create(object);
printf("bpf: prog fd:%d (%s)\n", fd, strerror(errno));
assert(fd > 0);
ret = bpf_obj_pin(fd, file);
printf("bpf: pin ret:(%d,%s)\n", ret, strerror(errno));
assert(ret == 0);
} else {
fd = bpf_obj_get(file);
printf("bpf: get fd:%d (%s)\n", fd, strerror(errno));
assert(fd > 0);
}
sock = open_raw_sock("lo");
assert(sock > 0);
ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &fd, sizeof(fd));
printf("bpf: sock:%d <- fd:%d attached ret:(%d,%s)\n", sock, fd,
ret, strerror(errno));
assert(ret == 0);
return 0;
}
int main(int argc, char **argv)
{
const char *file = NULL, *object = NULL;
uint32_t key = 0, value = 0, flags = 0;
int opt, mode = BPF_M_UNSPEC;
while ((opt = getopt(argc, argv, "F:PGmk:v:po:")) != -1) {
switch (opt) {
/* General args */
case 'F':
file = optarg;
break;
case 'P':
flags |= BPF_F_PIN;
break;
case 'G':
flags |= BPF_F_GET;
break;
/* Map-related args */
case 'm':
mode = BPF_M_MAP;
break;
case 'k':
key = strtoul(optarg, NULL, 0);
flags |= BPF_F_KEY;
break;
case 'v':
value = strtoul(optarg, NULL, 0);
flags |= BPF_F_VAL;
break;
/* Prog-related args */
case 'p':
mode = BPF_M_PROG;
break;
case 'o':
object = optarg;
break;
default:
goto out;
}
}
if (!(flags & BPF_F_PIN_GET) || !file)
goto out;
switch (mode) {
case BPF_M_MAP:
return bpf_do_map(file, flags, key, value);
case BPF_M_PROG:
return bpf_do_prog(file, flags, object);
}
out:
usage();
return -1;
}
| linux-master | samples/bpf/fds_example.c |
/* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
#define MINBLOCK_US 1
#define MAX_ENTRIES 10000
struct key_t {
char waker[TASK_COMM_LEN];
char target[TASK_COMM_LEN];
u32 wret;
u32 tret;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct key_t);
__type(value, u64);
__uint(max_entries, MAX_ENTRIES);
} counts SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, u64);
__uint(max_entries, MAX_ENTRIES);
} start SEC(".maps");
struct wokeby_t {
char name[TASK_COMM_LEN];
u32 ret;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, struct wokeby_t);
__uint(max_entries, MAX_ENTRIES);
} wokeby SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(key_size, sizeof(u32));
__uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
__uint(max_entries, MAX_ENTRIES);
} stackmap SEC(".maps");
#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
SEC("kprobe/try_to_wake_up")
int waker(struct pt_regs *ctx)
{
struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
u32 pid = BPF_CORE_READ(p, pid);
struct wokeby_t woke;
bpf_get_current_comm(&woke.name, sizeof(woke.name));
woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
bpf_map_update_elem(&wokeby, &pid, &woke, BPF_ANY);
return 0;
}
static inline int update_counts(void *ctx, u32 pid, u64 delta)
{
struct wokeby_t *woke;
u64 zero = 0, *val;
struct key_t key;
__builtin_memset(&key.waker, 0, sizeof(key.waker));
bpf_get_current_comm(&key.target, sizeof(key.target));
key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
key.wret = 0;
woke = bpf_map_lookup_elem(&wokeby, &pid);
if (woke) {
key.wret = woke->ret;
__builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
bpf_map_delete_elem(&wokeby, &pid);
}
val = bpf_map_lookup_elem(&counts, &key);
if (!val) {
bpf_map_update_elem(&counts, &key, &zero, BPF_NOEXIST);
val = bpf_map_lookup_elem(&counts, &key);
if (!val)
return 0;
}
(*val) += delta;
return 0;
}
#if 1
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
SEC("tracepoint/sched/sched_switch")
int oncpu(struct trace_event_raw_sched_switch *ctx)
{
/* record previous thread sleep time */
u32 pid = ctx->prev_pid;
#else
SEC("kprobe.multi/finish_task_switch*")
int oncpu(struct pt_regs *ctx)
{
struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
/* record previous thread sleep time */
u32 pid = BPF_CORE_READ(p, pid);
#endif
u64 delta, ts, *tsp;
ts = bpf_ktime_get_ns();
bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
/* calculate current thread's delta time */
pid = bpf_get_current_pid_tgid();
tsp = bpf_map_lookup_elem(&start, &pid);
if (!tsp)
/* missed start or filtered */
return 0;
delta = bpf_ktime_get_ns() - *tsp;
bpf_map_delete_elem(&start, &pid);
delta = delta / 1000;
if (delta < MINBLOCK_US)
return 0;
return update_counts(ctx, pid, delta);
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/offwaketime.bpf.c |
#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(u32));
__uint(max_entries, 2);
} my_map SEC(".maps");
SEC("ksyscall/write")
int bpf_prog1(struct pt_regs *ctx)
{
struct S {
u64 pid;
u64 cookie;
} data;
data.pid = bpf_get_current_pid_tgid();
data.cookie = 0x12345678;
bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data));
return 0;
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/trace_output.bpf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-18 David Ahern <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <bpf/bpf_helpers.h>
#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__uint(max_entries, 64);
} xdp_tx_ports SEC(".maps");
/* from include/net/ip.h */
static __always_inline int ip_decrease_ttl(struct iphdr *iph)
{
u32 check = (__force u32)iph->check;
check += (__force u32)htons(0x0100);
iph->check = (__force __sum16)(check + (check >= 0xFFFF));
return --iph->ttl;
}
static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct bpf_fib_lookup fib_params;
struct ethhdr *eth = data;
struct ipv6hdr *ip6h;
struct iphdr *iph;
u16 h_proto;
u64 nh_off;
int rc;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return XDP_DROP;
__builtin_memset(&fib_params, 0, sizeof(fib_params));
h_proto = eth->h_proto;
if (h_proto == htons(ETH_P_IP)) {
iph = data + nh_off;
if (iph + 1 > data_end)
return XDP_DROP;
if (iph->ttl <= 1)
return XDP_PASS;
fib_params.family = AF_INET;
fib_params.tos = iph->tos;
fib_params.l4_protocol = iph->protocol;
fib_params.sport = 0;
fib_params.dport = 0;
fib_params.tot_len = ntohs(iph->tot_len);
fib_params.ipv4_src = iph->saddr;
fib_params.ipv4_dst = iph->daddr;
} else if (h_proto == htons(ETH_P_IPV6)) {
struct in6_addr *src = (struct in6_addr *) fib_params.ipv6_src;
struct in6_addr *dst = (struct in6_addr *) fib_params.ipv6_dst;
ip6h = data + nh_off;
if (ip6h + 1 > data_end)
return XDP_DROP;
if (ip6h->hop_limit <= 1)
return XDP_PASS;
fib_params.family = AF_INET6;
fib_params.flowinfo = *(__be32 *)ip6h & IPV6_FLOWINFO_MASK;
fib_params.l4_protocol = ip6h->nexthdr;
fib_params.sport = 0;
fib_params.dport = 0;
fib_params.tot_len = ntohs(ip6h->payload_len);
*src = ip6h->saddr;
*dst = ip6h->daddr;
} else {
return XDP_PASS;
}
fib_params.ifindex = ctx->ingress_ifindex;
rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
/*
* Some rc (return codes) from bpf_fib_lookup() are important,
* to understand how this XDP-prog interacts with network stack.
*
* BPF_FIB_LKUP_RET_NO_NEIGH:
* Even if route lookup was a success, then the MAC-addresses are also
* needed. This is obtained from arp/neighbour table, but if table is
* (still) empty then BPF_FIB_LKUP_RET_NO_NEIGH is returned. To avoid
* doing ARP lookup directly from XDP, then send packet to normal
* network stack via XDP_PASS and expect it will do ARP resolution.
*
* BPF_FIB_LKUP_RET_FWD_DISABLED:
* The bpf_fib_lookup respect sysctl net.ipv{4,6}.conf.all.forwarding
* setting, and will return BPF_FIB_LKUP_RET_FWD_DISABLED if not
* enabled this on ingress device.
*/
if (rc == BPF_FIB_LKUP_RET_SUCCESS) {
/* Verify egress index has been configured as TX-port.
* (Note: User can still have inserted an egress ifindex that
* doesn't support XDP xmit, which will result in packet drops).
*
* Note: lookup in devmap supported since 0cdbb4b09a0.
* If not supported will fail with:
* cannot pass map_type 14 into func bpf_map_lookup_elem#1:
*/
if (!bpf_map_lookup_elem(&xdp_tx_ports, &fib_params.ifindex))
return XDP_PASS;
if (h_proto == htons(ETH_P_IP))
ip_decrease_ttl(iph);
else if (h_proto == htons(ETH_P_IPV6))
ip6h->hop_limit--;
memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
return bpf_redirect_map(&xdp_tx_ports, fib_params.ifindex, 0);
}
return XDP_PASS;
}
SEC("xdp_fwd")
int xdp_fwd_prog(struct xdp_md *ctx)
{
return xdp_fwd_flags(ctx, 0);
}
SEC("xdp_fwd_direct")
int xdp_fwd_direct_prog(struct xdp_md *ctx)
{
return xdp_fwd_flags(ctx, BPF_FIB_LOOKUP_DIRECT);
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/xdp_fwd_kern.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* ibumad BPF sample kernel side
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* Copyright(c) 2018 Ira Weiny, Intel Corporation
*/
#define KBUILD_MODNAME "ibumad_count_pkts_by_class"
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32); /* class; u32 required */
__type(value, u64); /* count of mads read */
__uint(max_entries, 256); /* Room for all Classes */
} read_count SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32); /* class; u32 required */
__type(value, u64); /* count of mads written */
__uint(max_entries, 256); /* Room for all Classes */
} write_count SEC(".maps");
#undef DEBUG
#ifndef DEBUG
#undef bpf_printk
#define bpf_printk(fmt, ...)
#endif
/* Taken from the current format defined in
* include/trace/events/ib_umad.h
* and
* /sys/kernel/tracing/events/ib_umad/ib_umad_read/format
* /sys/kernel/tracing/events/ib_umad/ib_umad_write/format
*/
struct ib_umad_rw_args {
u64 pad;
u8 port_num;
u8 sl;
u8 path_bits;
u8 grh_present;
u32 id;
u32 status;
u32 timeout_ms;
u32 retires;
u32 length;
u32 qpn;
u32 qkey;
u8 gid_index;
u8 hop_limit;
u16 lid;
u16 attr_id;
u16 pkey_index;
u8 base_version;
u8 mgmt_class;
u8 class_version;
u8 method;
u32 flow_label;
u16 mad_status;
u16 class_specific;
u32 attr_mod;
u64 tid;
u8 gid[16];
u32 dev_index;
u8 traffic_class;
};
SEC("tracepoint/ib_umad/ib_umad_read_recv")
int on_ib_umad_read_recv(struct ib_umad_rw_args *ctx)
{
u64 zero = 0, *val;
u8 class = ctx->mgmt_class;
bpf_printk("ib_umad read recv : class 0x%x\n", class);
val = bpf_map_lookup_elem(&read_count, &class);
if (!val) {
bpf_map_update_elem(&read_count, &class, &zero, BPF_NOEXIST);
val = bpf_map_lookup_elem(&read_count, &class);
if (!val)
return 0;
}
(*val) += 1;
return 0;
}
SEC("tracepoint/ib_umad/ib_umad_read_send")
int on_ib_umad_read_send(struct ib_umad_rw_args *ctx)
{
u64 zero = 0, *val;
u8 class = ctx->mgmt_class;
bpf_printk("ib_umad read send : class 0x%x\n", class);
val = bpf_map_lookup_elem(&read_count, &class);
if (!val) {
bpf_map_update_elem(&read_count, &class, &zero, BPF_NOEXIST);
val = bpf_map_lookup_elem(&read_count, &class);
if (!val)
return 0;
}
(*val) += 1;
return 0;
}
SEC("tracepoint/ib_umad/ib_umad_write")
int on_ib_umad_write(struct ib_umad_rw_args *ctx)
{
u64 zero = 0, *val;
u8 class = ctx->mgmt_class;
bpf_printk("ib_umad write : class 0x%x\n", class);
val = bpf_map_lookup_elem(&write_count, &class);
if (!val) {
bpf_map_update_elem(&write_count, &class, &zero, BPF_NOEXIST);
val = bpf_map_lookup_elem(&write_count, &class);
if (!val)
return 0;
}
(*val) += 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/ibumad_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/unistd.h>
#include <linux/bpf.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <arpa/inet.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#define MAX_INDEX 64
#define MAX_STARS 38
static void stars(char *str, long val, long max, int width)
{
int i;
for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
str[i] = '*';
if (val > max)
str[i - 1] = '+';
str[i] = '\0';
}
int main(int argc, char **argv)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
const char *map_filename = "/sys/fs/bpf/tc/globals/lwt_len_hist_map";
uint64_t values[nr_cpus], sum, max_value = 0, data[MAX_INDEX] = {};
uint64_t key = 0, next_key, max_key = 0;
char starstr[MAX_STARS];
int i, map_fd;
map_fd = bpf_obj_get(map_filename);
if (map_fd < 0) {
fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
map_filename, strerror(errno), errno);
return -1;
}
while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
if (next_key >= MAX_INDEX) {
fprintf(stderr, "Key %lu out of bounds\n", next_key);
continue;
}
bpf_map_lookup_elem(map_fd, &next_key, values);
sum = 0;
for (i = 0; i < nr_cpus; i++)
sum += values[i];
data[next_key] = sum;
if (sum && next_key > max_key)
max_key = next_key;
if (sum > max_value)
max_value = sum;
key = next_key;
}
for (i = 1; i <= max_key + 1; i++) {
stars(starstr, data[i - 1], max_value, MAX_STARS);
printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
(1l << i) >> 1, (1l << i) - 1, data[i - 1],
MAX_STARS, starstr);
}
close(map_fd);
return 0;
}
| linux-master | samples/bpf/lwt_len_hist_user.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 Facebook
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
#include <signal.h>
#include <errno.h>
#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "perf-sys.h"
#include "trace_helpers.h"
#define SAMPLE_FREQ 50
static int pid;
/* counts, stackmap */
static int map_fd[2];
struct bpf_program *prog;
static bool sys_read_seen, sys_write_seen;
static void print_ksym(__u64 addr)
{
struct ksym *sym;
if (!addr)
return;
sym = ksym_search(addr);
if (!sym) {
printf("ksym not found. Is kallsyms loaded?\n");
return;
}
printf("%s;", sym->name);
if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}
static void print_addr(__u64 addr)
{
if (!addr)
return;
printf("%llx;", addr);
}
#define TASK_COMM_LEN 16
struct key_t {
char comm[TASK_COMM_LEN];
__u32 kernstack;
__u32 userstack;
};
static void print_stack(struct key_t *key, __u64 count)
{
__u64 ip[PERF_MAX_STACK_DEPTH] = {};
static bool warned;
int i;
printf("%3lld %s;", count, key->comm);
if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
printf("---;");
} else {
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_ksym(ip[i]);
}
printf("-;");
if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
printf("---;");
} else {
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_addr(ip[i]);
}
if (count < 6)
printf("\r");
else
printf("\n");
if (key->kernstack == -EEXIST && !warned) {
printf("stackmap collisions seen. Consider increasing size\n");
warned = true;
} else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
printf("err stackid %d %d\n", key->kernstack, key->userstack);
}
}
static void err_exit(int err)
{
kill(pid, SIGKILL);
exit(err);
}
static void print_stacks(void)
{
struct key_t key = {}, next_key;
__u64 value;
__u32 stackid = 0, next_id;
int error = 1, fd = map_fd[0], stack_map = map_fd[1];
sys_read_seen = sys_write_seen = false;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
bpf_map_lookup_elem(fd, &next_key, &value);
print_stack(&next_key, value);
bpf_map_delete_elem(fd, &next_key);
key = next_key;
}
printf("\n");
if (!sys_read_seen || !sys_write_seen) {
printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
err_exit(error);
}
/* clear stack map */
while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
bpf_map_delete_elem(stack_map, &next_id);
stackid = next_id;
}
}
static inline int generate_load(void)
{
if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
printf("failed to generate some load with dd: %s\n", strerror(errno));
return -1;
}
return 0;
}
static void test_perf_event_all_cpu(struct perf_event_attr *attr)
{
int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
int i, pmu_fd, error = 1;
if (!links) {
printf("malloc of links failed\n");
goto err;
}
/* system wide perf event, no need to inherit */
attr->inherit = 0;
/* open perf_event on all cpus */
for (i = 0; i < nr_cpus; i++) {
pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
goto all_cpu_err;
}
links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
if (libbpf_get_error(links[i])) {
printf("bpf_program__attach_perf_event failed\n");
links[i] = NULL;
close(pmu_fd);
goto all_cpu_err;
}
}
if (generate_load() < 0)
goto all_cpu_err;
print_stacks();
error = 0;
all_cpu_err:
for (i--; i >= 0; i--)
bpf_link__destroy(links[i]);
err:
free(links);
if (error)
err_exit(error);
}
static void test_perf_event_task(struct perf_event_attr *attr)
{
struct bpf_link *link = NULL;
int pmu_fd, error = 1;
/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
* Enabling inherit will cause bpf_perf_prog_read_time helper failure.
*/
attr->inherit = 1;
/* open task bound event */
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
goto err;
}
link = bpf_program__attach_perf_event(prog, pmu_fd);
if (libbpf_get_error(link)) {
printf("bpf_program__attach_perf_event failed\n");
link = NULL;
close(pmu_fd);
goto err;
}
if (generate_load() < 0)
goto err;
print_stacks();
error = 0;
err:
bpf_link__destroy(link);
if (error)
err_exit(error);
}
static void test_bpf_perf_event(void)
{
struct perf_event_attr attr_type_hw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
struct perf_event_attr attr_type_sw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
struct perf_event_attr attr_hw_cache_l1d = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
};
struct perf_event_attr attr_hw_cache_branch_miss = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_BPU |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
};
struct perf_event_attr attr_type_raw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_RAW,
/* Intel Instruction Retired */
.config = 0xc0,
};
struct perf_event_attr attr_type_raw_lock_load = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_RAW,
/* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
.config = 0x21d0,
/* Request to record lock address from PEBS */
.sample_type = PERF_SAMPLE_ADDR,
/* Record address value requires precise event */
.precise_ip = 2,
};
printf("Test HW_CPU_CYCLES\n");
test_perf_event_all_cpu(&attr_type_hw);
test_perf_event_task(&attr_type_hw);
printf("Test SW_CPU_CLOCK\n");
test_perf_event_all_cpu(&attr_type_sw);
test_perf_event_task(&attr_type_sw);
printf("Test HW_CACHE_L1D\n");
test_perf_event_all_cpu(&attr_hw_cache_l1d);
test_perf_event_task(&attr_hw_cache_l1d);
printf("Test HW_CACHE_BPU\n");
test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
test_perf_event_task(&attr_hw_cache_branch_miss);
printf("Test Instruction Retired\n");
test_perf_event_all_cpu(&attr_type_raw);
test_perf_event_task(&attr_type_raw);
printf("Test Lock Load\n");
test_perf_event_all_cpu(&attr_type_raw_lock_load);
test_perf_event_task(&attr_type_raw_lock_load);
printf("*** PASS ***\n");
}
int main(int argc, char **argv)
{
struct bpf_object *obj = NULL;
char filename[256];
int error = 1;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
signal(SIGINT, err_exit);
signal(SIGTERM, err_exit);
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
goto cleanup;
}
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
printf("opening BPF object file failed\n");
obj = NULL;
goto cleanup;
}
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
if (!prog) {
printf("finding a prog in obj file failed\n");
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
printf("loading BPF object file failed\n");
goto cleanup;
}
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
if (map_fd[0] < 0 || map_fd[1] < 0) {
printf("finding a counts/stackmap map in obj file failed\n");
goto cleanup;
}
pid = fork();
if (pid == 0) {
read_trace_pipe();
return 0;
} else if (pid == -1) {
printf("couldn't spawn process\n");
goto cleanup;
}
test_bpf_perf_event();
error = 0;
cleanup:
bpf_object__close(obj);
err_exit(error);
}
| linux-master | samples/bpf/trace_event_user.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/version.h>
#include <linux/ptrace.h>
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("kprobe/blk_mq_start_request")
int bpf_prog1(struct pt_regs *ctx)
{
return 0;
}
SEC("kretprobe/__blk_account_io_done")
int bpf_prog2(struct pt_regs *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/task_fd_query_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
SEC("raw_tracepoint/task_rename")
int prog(struct bpf_raw_tracepoint_args *ctx)
{
return 0;
}
SEC("raw_tracepoint/fib_table_lookup")
int prog2(struct bpf_raw_tracepoint_args *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/test_overhead_raw_tp.bpf.c |
/* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program shows how to use bpf_xdp_adjust_tail() by
* generating ICMPv4 "packet to big" (unreachable/ df bit set frag needed
* to be more preice in case of v4)" where receiving packets bigger then
* 600 bytes.
*/
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <bpf/bpf_helpers.h>
#define DEFAULT_TTL 64
#define MAX_PCKT_SIZE 600
#define ICMP_TOOBIG_SIZE 98
#define ICMP_TOOBIG_PAYLOAD_SIZE 92
/* volatile to prevent compiler optimizations */
static volatile __u32 max_pcktsz = MAX_PCKT_SIZE;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, 1);
} icmpcnt SEC(".maps");
static __always_inline void count_icmp(void)
{
u64 key = 0;
u64 *icmp_count;
icmp_count = bpf_map_lookup_elem(&icmpcnt, &key);
if (icmp_count)
*icmp_count += 1;
}
static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth)
{
struct ethhdr *eth;
eth = data;
memcpy(eth->h_source, orig_eth->h_dest, ETH_ALEN);
memcpy(eth->h_dest, orig_eth->h_source, ETH_ALEN);
eth->h_proto = orig_eth->h_proto;
}
static __always_inline __u16 csum_fold_helper(__u32 csum)
{
return ~((csum & 0xffff) + (csum >> 16));
}
static __always_inline void ipv4_csum(void *data_start, int data_size,
__u32 *csum)
{
*csum = bpf_csum_diff(0, 0, data_start, data_size, *csum);
*csum = csum_fold_helper(*csum);
}
static __always_inline int send_icmp4_too_big(struct xdp_md *xdp)
{
int headroom = (int)sizeof(struct iphdr) + (int)sizeof(struct icmphdr);
if (bpf_xdp_adjust_head(xdp, 0 - headroom))
return XDP_DROP;
void *data = (void *)(long)xdp->data;
void *data_end = (void *)(long)xdp->data_end;
if (data + (ICMP_TOOBIG_SIZE + headroom) > data_end)
return XDP_DROP;
struct iphdr *iph, *orig_iph;
struct icmphdr *icmp_hdr;
struct ethhdr *orig_eth;
__u32 csum = 0;
__u64 off = 0;
orig_eth = data + headroom;
swap_mac(data, orig_eth);
off += sizeof(struct ethhdr);
iph = data + off;
off += sizeof(struct iphdr);
icmp_hdr = data + off;
off += sizeof(struct icmphdr);
orig_iph = data + off;
icmp_hdr->type = ICMP_DEST_UNREACH;
icmp_hdr->code = ICMP_FRAG_NEEDED;
icmp_hdr->un.frag.mtu = htons(max_pcktsz - sizeof(struct ethhdr));
icmp_hdr->checksum = 0;
ipv4_csum(icmp_hdr, ICMP_TOOBIG_PAYLOAD_SIZE, &csum);
icmp_hdr->checksum = csum;
iph->ttl = DEFAULT_TTL;
iph->daddr = orig_iph->saddr;
iph->saddr = orig_iph->daddr;
iph->version = 4;
iph->ihl = 5;
iph->protocol = IPPROTO_ICMP;
iph->tos = 0;
iph->tot_len = htons(
ICMP_TOOBIG_SIZE + headroom - sizeof(struct ethhdr));
iph->check = 0;
csum = 0;
ipv4_csum(iph, sizeof(struct iphdr), &csum);
iph->check = csum;
count_icmp();
return XDP_TX;
}
static __always_inline int handle_ipv4(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
int pckt_size = data_end - data;
int offset;
if (pckt_size > max(max_pcktsz, ICMP_TOOBIG_SIZE)) {
offset = pckt_size - ICMP_TOOBIG_SIZE;
if (bpf_xdp_adjust_tail(xdp, 0 - offset))
return XDP_PASS;
return send_icmp4_too_big(xdp);
}
return XDP_PASS;
}
SEC("xdp_icmp")
int _xdp_icmp(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eth = data;
__u16 h_proto;
if (eth + 1 > data_end)
return XDP_DROP;
h_proto = eth->h_proto;
if (h_proto == htons(ETH_P_IP))
return handle_ipv4(xdp);
else
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/xdp_adjust_tail_kern.c |
/* eBPF example program:
*
* - Creates arraymap in kernel with 4 bytes keys and 8 byte values
*
* - Loads eBPF program
*
* The eBPF program accesses the map passed in to store two pieces of
* information. The number of invocations of the program, which maps
* to the number of packets received, is stored to key 0. Key 1 is
* incremented on each iteration by the number of bytes stored in
* the skb.
*
* - Attaches the new program to a cgroup using BPF_PROG_ATTACH
*
* - Every second, reads map[0] and map[1] to see how many bytes and
* packets were seen on any socket of tasks in the given cgroup.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "bpf_insn.h"
#include "bpf_util.h"
enum {
MAP_KEY_PACKETS,
MAP_KEY_BYTES,
};
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(int map_fd, int verdict)
{
struct bpf_insn prog[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */
/* Count packets */
BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
/* Count bytes */
BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = bpf_log_buf,
.log_size = BPF_LOG_BUF_SIZE,
);
return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB, NULL, "GPL",
prog, insns_cnt, &opts);
}
static int usage(const char *argv0)
{
printf("Usage: %s [-d] [-D] <cg-path> <egress|ingress>\n", argv0);
printf(" -d Drop Traffic\n");
printf(" -D Detach filter, and exit\n");
return EXIT_FAILURE;
}
static int attach_filter(int cg_fd, int type, int verdict)
{
int prog_fd, map_fd, ret, key;
long long pkt_cnt, byte_cnt;
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL,
sizeof(key), sizeof(byte_cnt),
256, NULL);
if (map_fd < 0) {
printf("Failed to create map: '%s'\n", strerror(errno));
return EXIT_FAILURE;
}
prog_fd = prog_load(map_fd, verdict);
printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf);
if (prog_fd < 0) {
printf("Failed to load prog: '%s'\n", strerror(errno));
return EXIT_FAILURE;
}
ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
if (ret < 0) {
printf("Failed to attach prog to cgroup: '%s'\n",
strerror(errno));
return EXIT_FAILURE;
}
while (1) {
key = MAP_KEY_PACKETS;
assert(bpf_map_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
key = MAP_KEY_BYTES;
assert(bpf_map_lookup_elem(map_fd, &key, &byte_cnt) == 0);
printf("cgroup received %lld packets, %lld bytes\n",
pkt_cnt, byte_cnt);
sleep(1);
}
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
int detach_only = 0, verdict = 1;
enum bpf_attach_type type;
int opt, cg_fd, ret;
while ((opt = getopt(argc, argv, "Dd")) != -1) {
switch (opt) {
case 'd':
verdict = 0;
break;
case 'D':
detach_only = 1;
break;
default:
return usage(argv[0]);
}
}
if (argc - optind < 2)
return usage(argv[0]);
if (strcmp(argv[optind + 1], "ingress") == 0)
type = BPF_CGROUP_INET_INGRESS;
else if (strcmp(argv[optind + 1], "egress") == 0)
type = BPF_CGROUP_INET_EGRESS;
else
return usage(argv[0]);
cg_fd = open(argv[optind], O_DIRECTORY | O_RDONLY);
if (cg_fd < 0) {
printf("Failed to open cgroup path: '%s'\n", strerror(errno));
return EXIT_FAILURE;
}
if (detach_only) {
ret = bpf_prog_detach(cg_fd, type);
printf("bpf_prog_detach() returned '%s' (%d)\n",
strerror(errno), errno);
} else
ret = attach_filter(cg_fd, type, verdict);
return ret;
}
| linux-master | samples/bpf/test_cgrp2_attach.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 Facebook
*/
#define _GNU_SOURCE
#include <sched.h>
#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
#include <asm/unistd.h>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <signal.h>
#include <linux/bpf.h>
#include <string.h>
#include <time.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#define MAX_CNT 1000000
#define DUMMY_IP "127.0.0.1"
#define DUMMY_PORT 80
static struct bpf_link *links[2];
static struct bpf_object *obj;
static int cnt;
static __u64 time_get_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ull + ts.tv_nsec;
}
static void test_task_rename(int cpu)
{
char buf[] = "test\n";
__u64 start_time;
int i, fd;
fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (fd < 0) {
printf("couldn't open /proc\n");
exit(1);
}
start_time = time_get_ns();
for (i = 0; i < MAX_CNT; i++) {
if (write(fd, buf, sizeof(buf)) < 0) {
printf("task rename failed: %s\n", strerror(errno));
close(fd);
return;
}
}
printf("task_rename:%d: %lld events per sec\n",
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
close(fd);
}
static void test_fib_table_lookup(int cpu)
{
struct sockaddr_in addr;
char buf[] = "test\n";
__u64 start_time;
int i, fd;
fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (fd < 0) {
printf("couldn't open socket\n");
exit(1);
}
memset((char *)&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = inet_addr(DUMMY_IP);
addr.sin_port = htons(DUMMY_PORT);
addr.sin_family = AF_INET;
start_time = time_get_ns();
for (i = 0; i < MAX_CNT; i++) {
if (sendto(fd, buf, strlen(buf), 0,
(struct sockaddr *)&addr, sizeof(addr)) < 0) {
printf("failed to start ping: %s\n", strerror(errno));
close(fd);
return;
}
}
printf("fib_table_lookup:%d: %lld events per sec\n",
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
close(fd);
}
static void loop(int cpu, int flags)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (flags & 1)
test_task_rename(cpu);
if (flags & 2)
test_fib_table_lookup(cpu);
}
static void run_perf_test(int tasks, int flags)
{
pid_t pid[tasks];
int i;
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
loop(i, flags);
exit(0);
} else if (pid[i] == -1) {
printf("couldn't spawn #%d process\n", i);
exit(1);
}
}
for (i = 0; i < tasks; i++) {
int status;
assert(waitpid(pid[i], &status, 0) == pid[i]);
assert(status == 0);
}
}
static int load_progs(char *filename)
{
struct bpf_program *prog;
int err = 0;
obj = bpf_object__open_file(filename, NULL);
err = libbpf_get_error(obj);
if (err < 0) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return err;
}
/* load BPF program */
err = bpf_object__load(obj);
if (err < 0) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
return err;
}
bpf_object__for_each_program(prog, obj) {
links[cnt] = bpf_program__attach(prog);
err = libbpf_get_error(links[cnt]);
if (err < 0) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
links[cnt] = NULL;
return err;
}
cnt++;
}
return err;
}
static void unload_progs(void)
{
while (cnt)
bpf_link__destroy(links[--cnt]);
bpf_object__close(obj);
}
int main(int argc, char **argv)
{
int num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
int test_flags = ~0;
char filename[256];
int err = 0;
if (argc > 1)
test_flags = atoi(argv[1]) ? : test_flags;
if (argc > 2)
num_cpu = atoi(argv[2]) ? : num_cpu;
if (test_flags & 0x3) {
printf("BASE\n");
run_perf_test(num_cpu, test_flags);
}
if (test_flags & 0xC) {
snprintf(filename, sizeof(filename),
"%s_kprobe.bpf.o", argv[0]);
printf("w/KPROBE\n");
err = load_progs(filename);
if (!err)
run_perf_test(num_cpu, test_flags >> 2);
unload_progs();
}
if (test_flags & 0x30) {
snprintf(filename, sizeof(filename),
"%s_tp.bpf.o", argv[0]);
printf("w/TRACEPOINT\n");
err = load_progs(filename);
if (!err)
run_perf_test(num_cpu, test_flags >> 4);
unload_progs();
}
if (test_flags & 0xC0) {
snprintf(filename, sizeof(filename),
"%s_raw_tp.bpf.o", argv[0]);
printf("w/RAW_TRACEPOINT\n");
err = load_progs(filename);
if (!err)
run_perf_test(num_cpu, test_flags >> 6);
unload_progs();
}
return err;
}
| linux-master | samples/bpf/test_overhead_user.c |
/* Copyright (c) 2016, Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, long);
__type(value, long);
__uint(max_entries, 1024);
} my_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(key_size, sizeof(long));
__uint(value_size, sizeof(long));
__uint(max_entries, 1024);
} my_map2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(key_size, sizeof(u32));
__uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
__uint(max_entries, 10000);
} stackmap SEC(".maps");
#define PROG(foo) \
int foo(struct pt_regs *ctx) \
{ \
long v = PT_REGS_IP(ctx), *val; \
\
val = bpf_map_lookup_elem(&my_map, &v); \
bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
bpf_map_update_elem(&my_map2, &v, &v, BPF_ANY); \
bpf_map_delete_elem(&my_map2, &v); \
bpf_get_stackid(ctx, &stackmap, BPF_F_REUSE_STACKID); \
return 0; \
}
/* add kprobes to all possible *spin* functions */
SEC("kprobe.multi/spin_*lock*")PROG(spin_lock)
SEC("kprobe.multi/*_spin_on_owner")PROG(spin_on_owner)
SEC("kprobe.multi/_raw_spin_*lock*")PROG(raw_spin_lock)
/* and to inner bpf helpers */
SEC("kprobe/htab_map_update_elem")PROG(p15)
SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16)
SEC("kprobe/htab_map_alloc")PROG(p17)
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/spintest.bpf.c |
// SPDX-License-Identifier: GPL-2.0
/* GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */
#include "xdp_sample.bpf.h"
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
array_map rx_cnt SEC(".maps");
array_map redir_err_cnt SEC(".maps");
array_map cpumap_enqueue_cnt SEC(".maps");
array_map cpumap_kthread_cnt SEC(".maps");
array_map exception_cnt SEC(".maps");
array_map devmap_xmit_cnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 32 * 32);
__type(key, u64);
__type(value, struct datarec);
} devmap_xmit_cnt_multi SEC(".maps");
const volatile int nr_cpus = 0;
/* These can be set before loading so that redundant comparisons can be DCE'd by
* the verifier, and only actual matches are tried after loading tp_btf program.
* This allows sample to filter tracepoint stats based on net_device.
*/
const volatile int from_match[32] = {};
const volatile int to_match[32] = {};
int cpumap_map_id = 0;
/* Find if b is part of set a, but if a is empty set then evaluate to true */
#define IN_SET(a, b) \
({ \
bool __res = !(a)[0]; \
for (int i = 0; i < ARRAY_SIZE(a) && (a)[i]; i++) { \
__res = (a)[i] == (b); \
if (__res) \
break; \
} \
__res; \
})
static __always_inline __u32 xdp_get_err_key(int err)
{
switch (err) {
case 0:
return 0;
case -EINVAL:
return 2;
case -ENETDOWN:
return 3;
case -EMSGSIZE:
return 4;
case -EOPNOTSUPP:
return 5;
case -ENOSPC:
return 6;
default:
return 1;
}
}
static __always_inline int xdp_redirect_collect_stat(int from, int err)
{
u32 cpu = bpf_get_smp_processor_id();
u32 key = XDP_REDIRECT_ERROR;
struct datarec *rec;
u32 idx;
if (!IN_SET(from_match, from))
return 0;
key = xdp_get_err_key(err);
idx = key * nr_cpus + cpu;
rec = bpf_map_lookup_elem(&redir_err_cnt, &idx);
if (!rec)
return 0;
if (key)
NO_TEAR_INC(rec->dropped);
else
NO_TEAR_INC(rec->processed);
return 0; /* Indicate event was filtered (no further processing)*/
/*
* Returning 1 here would allow e.g. a perf-record tracepoint
* to see and record these events, but it doesn't work well
* in-practice as stopping perf-record also unload this
* bpf_prog. Plus, there is additional overhead of doing so.
*/
}
SEC("tp_btf/xdp_redirect_err")
int BPF_PROG(tp_xdp_redirect_err, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect_map_err")
int BPF_PROG(tp_xdp_redirect_map_err, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect")
int BPF_PROG(tp_xdp_redirect, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect_map")
int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_cpumap_enqueue")
int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed,
unsigned int drops, int to_cpu)
{
u32 cpu = bpf_get_smp_processor_id();
struct datarec *rec;
u32 idx;
if (cpumap_map_id && cpumap_map_id != map_id)
return 0;
idx = to_cpu * nr_cpus + cpu;
rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &idx);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, processed);
NO_TEAR_ADD(rec->dropped, drops);
/* Record bulk events, then userspace can calc average bulk size */
if (processed > 0)
NO_TEAR_INC(rec->issue);
/* Inception: It's possible to detect overload situations, via
* this tracepoint. This can be used for creating a feedback
* loop to XDP, which can take appropriate actions to mitigate
* this overload situation.
*/
return 0;
}
SEC("tp_btf/xdp_cpumap_kthread")
int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
{
struct datarec *rec;
u32 cpu;
if (cpumap_map_id && cpumap_map_id != map_id)
return 0;
cpu = bpf_get_smp_processor_id();
rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, processed);
NO_TEAR_ADD(rec->dropped, drops);
NO_TEAR_ADD(rec->xdp_pass, xdp_stats->pass);
NO_TEAR_ADD(rec->xdp_drop, xdp_stats->drop);
NO_TEAR_ADD(rec->xdp_redirect, xdp_stats->redirect);
/* Count times kthread yielded CPU via schedule call */
if (sched)
NO_TEAR_INC(rec->issue);
return 0;
}
SEC("tp_btf/xdp_exception")
int BPF_PROG(tp_xdp_exception, const struct net_device *dev,
const struct bpf_prog *xdp, u32 act)
{
u32 cpu = bpf_get_smp_processor_id();
struct datarec *rec;
u32 key = act, idx;
if (!IN_SET(from_match, dev->ifindex))
return 0;
if (!IN_SET(to_match, dev->ifindex))
return 0;
if (key > XDP_REDIRECT)
key = XDP_REDIRECT + 1;
idx = key * nr_cpus + cpu;
rec = bpf_map_lookup_elem(&exception_cnt, &idx);
if (!rec)
return 0;
NO_TEAR_INC(rec->dropped);
return 0;
}
SEC("tp_btf/xdp_devmap_xmit")
int BPF_PROG(tp_xdp_devmap_xmit, const struct net_device *from_dev,
const struct net_device *to_dev, int sent, int drops, int err)
{
struct datarec *rec;
int idx_in, idx_out;
u32 cpu;
idx_in = from_dev->ifindex;
idx_out = to_dev->ifindex;
if (!IN_SET(from_match, idx_in))
return 0;
if (!IN_SET(to_match, idx_out))
return 0;
cpu = bpf_get_smp_processor_id();
rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &cpu);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, sent);
NO_TEAR_ADD(rec->dropped, drops);
/* Record bulk events, then userspace can calc average bulk size */
NO_TEAR_INC(rec->info);
/* Record error cases, where no frame were sent */
/* Catch API error of drv ndo_xdp_xmit sent more than count */
if (err || drops < 0)
NO_TEAR_INC(rec->issue);
return 0;
}
SEC("tp_btf/xdp_devmap_xmit")
int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device *from_dev,
const struct net_device *to_dev, int sent, int drops, int err)
{
struct datarec empty = {};
struct datarec *rec;
int idx_in, idx_out;
u64 idx;
idx_in = from_dev->ifindex;
idx_out = to_dev->ifindex;
idx = idx_in;
idx = idx << 32 | idx_out;
if (!IN_SET(from_match, idx_in))
return 0;
if (!IN_SET(to_match, idx_out))
return 0;
bpf_map_update_elem(&devmap_xmit_cnt_multi, &idx, &empty, BPF_NOEXIST);
rec = bpf_map_lookup_elem(&devmap_xmit_cnt_multi, &idx);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, sent);
NO_TEAR_ADD(rec->dropped, drops);
NO_TEAR_INC(rec->info);
if (err || drops < 0)
NO_TEAR_INC(rec->issue);
return 0;
}
| linux-master | samples/bpf/xdp_sample.bpf.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* BPF program to set initial congestion window and initial receive
* window to 40 packets and send and receive buffers to 1.5MB. This
* would usually be done after doing appropriate checks that indicate
* the hosts are far enough away (i.e. large RTT).
*
* Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define DEBUG 1
SEC("sockops")
int bpf_iw(struct bpf_sock_ops *skops)
{
int bufsize = 1500000;
int rwnd_init = 40;
int iw = 40;
int rv = 0;
int op;
/* For testing purposes, only execute rest of BPF program
* if neither port numberis 55601
*/
if (bpf_ntohl(skops->remote_port) != 55601 &&
skops->local_port != 55601) {
skops->reply = -1;
return 1;
}
op = (int) skops->op;
#ifdef DEBUG
bpf_printk("BPF command: %d\n", op);
#endif
/* Usually there would be a check to insure the hosts are far
* from each other so it makes sense to increase buffer sizes
*/
switch (op) {
case BPF_SOCK_OPS_RWND_INIT:
rv = rwnd_init;
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
/* Set sndbuf and rcvbuf of active connections */
rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
sizeof(bufsize));
rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
&bufsize, sizeof(bufsize));
break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
rv = bpf_setsockopt(skops, SOL_TCP, TCP_BPF_IW, &iw,
sizeof(iw));
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
/* Set sndbuf and rcvbuf of passive connections */
rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
sizeof(bufsize));
rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
&bufsize, sizeof(bufsize));
break;
default:
rv = -1;
}
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/tcp_iw_kern.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stdio.h>
#include <fcntl.h>
#include <poll.h>
#include <time.h>
#include <signal.h>
#include <bpf/libbpf.h>
static __u64 time_get_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ull + ts.tv_nsec;
}
static __u64 start_time;
static __u64 cnt;
#define MAX_CNT 100000ll
static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
{
struct {
__u64 pid;
__u64 cookie;
} *e = data;
if (e->cookie != 0x12345678) {
printf("BUG pid %llx cookie %llx sized %d\n",
e->pid, e->cookie, size);
return;
}
cnt++;
if (cnt == MAX_CNT) {
printf("recv %lld events per sec\n",
MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
return;
}
}
int main(int argc, char **argv)
{
struct bpf_link *link = NULL;
struct bpf_program *prog;
struct perf_buffer *pb;
struct bpf_object *obj;
int map_fd, ret = 0;
char filename[256];
FILE *f;
snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return 0;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
if (map_fd < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
if (libbpf_get_error(prog)) {
fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
goto cleanup;
}
link = bpf_program__attach(prog);
if (libbpf_get_error(link)) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
link = NULL;
goto cleanup;
}
pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL);
ret = libbpf_get_error(pb);
if (ret) {
printf("failed to setup perf_buffer: %d\n", ret);
return 1;
}
f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
(void) f;
start_time = time_get_ns();
while ((ret = perf_buffer__poll(pb, 1000)) >= 0 && cnt < MAX_CNT) {
}
kill(0, SIGINT);
cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
return ret;
}
| linux-master | samples/bpf/trace_output_user.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* BPF program to set initial receive window to 40 packets and send
* and receive buffers to 1.5MB. This would usually be done after
* doing appropriate checks that indicate the hosts are far enough
* away (i.e. large RTT).
*
* Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define DEBUG 1
SEC("sockops")
int bpf_bufs(struct bpf_sock_ops *skops)
{
int bufsize = 1500000;
int rwnd_init = 40;
int rv = 0;
int op;
/* For testing purposes, only execute rest of BPF program
* if neither port numberis 55601
*/
if (bpf_ntohl(skops->remote_port) != 55601 &&
skops->local_port != 55601) {
skops->reply = -1;
return 1;
}
op = (int) skops->op;
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
/* Usually there would be a check to insure the hosts are far
* from each other so it makes sense to increase buffer sizes
*/
switch (op) {
case BPF_SOCK_OPS_RWND_INIT:
rv = rwnd_init;
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
/* Set sndbuf and rcvbuf of active connections */
rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
sizeof(bufsize));
rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
&bufsize, sizeof(bufsize));
break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
/* Nothing to do */
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
/* Set sndbuf and rcvbuf of passive connections */
rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
sizeof(bufsize));
rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
&bufsize, sizeof(bufsize));
break;
default:
rv = -1;
}
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/tcp_bufs_kern.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
struct pair {
long long val;
__u64 ip;
};
static __u64 time_get_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ull + ts.tv_nsec;
}
static void print_old_objects(int fd)
{
long long val = time_get_ns();
__u64 key, next_key;
struct pair v;
key = write(1, "\e[1;1H\e[2J", 11); /* clear screen */
key = -1;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
bpf_map_lookup_elem(fd, &next_key, &v);
key = next_key;
if (val - v.val < 1000000000ll)
/* object was allocated more then 1 sec ago */
continue;
printf("obj 0x%llx is %2lldsec old was allocated at ip %llx\n",
next_key, (val - v.val) / 1000000000ll, v.ip);
}
}
int main(int ac, char **argv)
{
struct bpf_link *links[2];
struct bpf_program *prog;
struct bpf_object *obj;
char filename[256];
int map_fd, j = 0;
snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return 0;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
if (map_fd < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
bpf_object__for_each_program(prog, obj) {
links[j] = bpf_program__attach(prog);
if (libbpf_get_error(links[j])) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
links[j] = NULL;
goto cleanup;
}
j++;
}
while (1) {
print_old_objects(map_fd);
sleep(1);
}
cleanup:
for (j--; j >= 0; j--)
bpf_link__destroy(links[j]);
bpf_object__close(obj);
return 0;
}
| linux-master | samples/bpf/tracex4_user.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sampleip: sample instruction pointer and frequency count in a BPF map.
*
* Copyright 2016 Netflix, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "perf-sys.h"
#include "trace_helpers.h"
#define DEFAULT_FREQ 99
#define DEFAULT_SECS 5
#define MAX_IPS 8192
static int map_fd;
static int nr_cpus;
static long _text_addr;
static void usage(void)
{
printf("USAGE: sampleip [-F freq] [duration]\n");
printf(" -F freq # sample frequency (Hertz), default 99\n");
printf(" duration # sampling duration (seconds), default 5\n");
}
static int sampling_start(int freq, struct bpf_program *prog,
struct bpf_link *links[])
{
int i, pmu_fd;
struct perf_event_attr pe_sample_attr = {
.type = PERF_TYPE_SOFTWARE,
.freq = 1,
.sample_period = freq,
.config = PERF_COUNT_SW_CPU_CLOCK,
.inherit = 1,
};
for (i = 0; i < nr_cpus; i++) {
pmu_fd = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
-1 /* group_fd */, 0 /* flags */);
if (pmu_fd < 0) {
fprintf(stderr, "ERROR: Initializing perf sampling\n");
return 1;
}
links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
if (libbpf_get_error(links[i])) {
fprintf(stderr, "ERROR: Attach perf event\n");
links[i] = NULL;
close(pmu_fd);
return 1;
}
}
return 0;
}
static void sampling_end(struct bpf_link *links[])
{
int i;
for (i = 0; i < nr_cpus; i++)
bpf_link__destroy(links[i]);
}
struct ipcount {
__u64 ip;
__u32 count;
};
/* used for sorting */
struct ipcount counts[MAX_IPS];
static int count_cmp(const void *p1, const void *p2)
{
return ((struct ipcount *)p1)->count - ((struct ipcount *)p2)->count;
}
static void print_ip_map(int fd)
{
struct ksym *sym;
__u64 key, next_key;
__u32 value;
int i, max;
printf("%-19s %-32s %s\n", "ADDR", "KSYM", "COUNT");
/* fetch IPs and counts */
key = 0, i = 0;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
bpf_map_lookup_elem(fd, &next_key, &value);
counts[i].ip = next_key;
counts[i++].count = value;
key = next_key;
}
max = i;
/* sort and print */
qsort(counts, max, sizeof(struct ipcount), count_cmp);
for (i = 0; i < max; i++) {
if (counts[i].ip > _text_addr) {
sym = ksym_search(counts[i].ip);
if (!sym) {
printf("ksym not found. Is kallsyms loaded?\n");
continue;
}
printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
counts[i].count);
} else {
printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)",
counts[i].count);
}
}
if (max == MAX_IPS) {
printf("WARNING: IP hash was full (max %d entries); ", max);
printf("may have dropped samples\n");
}
}
static void int_exit(int sig)
{
printf("\n");
print_ip_map(map_fd);
exit(0);
}
int main(int argc, char **argv)
{
int opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS, error = 1;
struct bpf_object *obj = NULL;
struct bpf_program *prog;
struct bpf_link **links;
char filename[256];
/* process arguments */
while ((opt = getopt(argc, argv, "F:h")) != -1) {
switch (opt) {
case 'F':
freq = atoi(optarg);
break;
case 'h':
default:
usage();
return 0;
}
}
if (argc - optind == 1)
secs = atoi(argv[optind]);
if (freq == 0 || secs == 0) {
usage();
return 1;
}
/* initialize kernel symbol translation */
if (load_kallsyms()) {
fprintf(stderr, "ERROR: loading /proc/kallsyms\n");
return 2;
}
/* used to determine whether the address is kernel space */
_text_addr = ksym_get_addr("_text");
if (!_text_addr) {
fprintf(stderr, "ERROR: no '_text' in /proc/kallsyms\n");
return 3;
}
/* create perf FDs for each CPU */
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
links = calloc(nr_cpus, sizeof(struct bpf_link *));
if (!links) {
fprintf(stderr, "ERROR: malloc of links\n");
goto cleanup;
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
obj = NULL;
goto cleanup;
}
prog = bpf_object__find_program_by_name(obj, "do_sample");
if (!prog) {
fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
map_fd = bpf_object__find_map_fd_by_name(obj, "ip_map");
if (map_fd < 0) {
fprintf(stderr, "ERROR: finding a map in obj file failed\n");
goto cleanup;
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
/* do sampling */
printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
freq, secs);
if (sampling_start(freq, prog, links) != 0)
goto cleanup;
sleep(secs);
error = 0;
cleanup:
sampling_end(links);
/* output sample counts */
if (!error)
print_ip_map(map_fd);
free(links);
bpf_object__close(obj);
return error;
}
| linux-master | samples/bpf/sampleip_user.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Facebook
*
* BPF program to automatically reflect TOS option from received syn packet
*
* Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/ipv6.h>
#include <uapi/linux/in.h>
#include <linux/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define DEBUG 1
SEC("sockops")
int bpf_basertt(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr)];
struct ipv6hdr *hdr6;
struct iphdr *hdr;
int hdr_size = 0;
int save_syn = 1;
int tos = 0;
int rv = 0;
int op;
op = (int) skops->op;
#ifdef DEBUG
bpf_printk("BPF command: %d\n", op);
#endif
switch (op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
rv = bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&save_syn, sizeof(save_syn));
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
if (skops->family == AF_INET)
hdr_size = sizeof(struct iphdr);
else
hdr_size = sizeof(struct ipv6hdr);
rv = bpf_getsockopt(skops, SOL_TCP, TCP_SAVED_SYN,
header, hdr_size);
if (!rv) {
if (skops->family == AF_INET) {
hdr = (struct iphdr *) header;
tos = hdr->tos;
if (tos != 0)
bpf_setsockopt(skops, SOL_IP, IP_TOS,
&tos, sizeof(tos));
} else {
hdr6 = (struct ipv6hdr *) header;
tos = ((hdr6->priority) << 4 |
(hdr6->flow_lbl[0]) >> 4);
if (tos)
bpf_setsockopt(skops, SOL_IPV6,
IPV6_TCLASS,
&tos, sizeof(tos));
}
rv = 0;
}
break;
default:
rv = -1;
}
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/tcp_tos_reflect_kern.c |
/* Copyright (c) 2016 Thomas Graf <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, u64);
__type(value, u64);
__uint(pinning, LIBBPF_PIN_BY_NAME);
__uint(max_entries, 1024);
} lwt_len_hist_map SEC(".maps");
static unsigned int log2(unsigned int v)
{
unsigned int r;
unsigned int shift;
r = (v > 0xFFFF) << 4; v >>= r;
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
r |= (v >> 1);
return r;
}
static unsigned int log2l(unsigned long v)
{
unsigned int hi = v >> 32;
if (hi)
return log2(hi) + 32;
else
return log2(v);
}
SEC("len_hist")
int do_len_hist(struct __sk_buff *skb)
{
__u64 *value, key, init_val = 1;
key = log2l(skb->len);
value = bpf_map_lookup_elem(&lwt_len_hist_map, &key);
if (value)
__sync_fetch_and_add(value, 1);
else
bpf_map_update_elem(&lwt_len_hist_map, &key, &init_val, BPF_ANY);
return BPF_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | samples/bpf/lwt_len_hist.bpf.c |
/* Copyright (c) 2016 Sargun Dhillon <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(u32));
__uint(max_entries, 1);
} cgroup_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, u64);
__uint(max_entries, 1);
} perf_map SEC(".maps");
/* Writes the last PID that called sync to a map at index 0 */
SEC("ksyscall/sync")
int BPF_KSYSCALL(bpf_prog1)
{
u64 pid = bpf_get_current_pid_tgid();
int idx = 0;
if (!bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY);
return 0;
}
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
| linux-master | samples/bpf/test_current_task_under_cgroup.bpf.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.