python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/resume_user_mode.h>
#include <linux/highmem.h>
#include <linux/jump_label.h>
#include <linux/kmsan.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
#include <linux/tick.h>
#include "common.h"
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/* See comment for enter_from_user_mode() in entry-common.h */
static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
{
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(__ct_state() != CONTEXT_USER);
user_exit_irqoff();
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
instrumentation_end();
}
void noinstr enter_from_user_mode(struct pt_regs *regs)
{
__enter_from_user_mode(regs);
}
static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
{
if (unlikely(audit_context())) {
unsigned long args[6];
syscall_get_arguments(current, regs, args);
audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
}
}
static long syscall_trace_enter(struct pt_regs *regs, long syscall,
unsigned long work)
{
long ret = 0;
/*
* Handle Syscall User Dispatch. This must comes first, since
* the ABI here can be something that doesn't make sense for
* other syscall_work features.
*/
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
if (syscall_user_dispatch(regs))
return -1L;
}
/* Handle ptrace */
if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
ret = ptrace_report_syscall_entry(regs);
if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
return -1L;
}
/* Do seccomp after ptrace, to catch any tracer changes. */
if (work & SYSCALL_WORK_SECCOMP) {
ret = __secure_computing(NULL);
if (ret == -1L)
return ret;
}
/* Either of the above might have changed the syscall number */
syscall = syscall_get_nr(current, regs);
if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall);
syscall_enter_audit(regs, syscall);
return ret ? : syscall;
}
static __always_inline long
__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
if (work & SYSCALL_WORK_ENTER)
syscall = syscall_trace_enter(regs, syscall, work);
return syscall;
}
long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
{
return __syscall_enter_from_user_work(regs, syscall);
}
noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
{
long ret;
__enter_from_user_mode(regs);
instrumentation_begin();
local_irq_enable();
ret = __syscall_enter_from_user_work(regs, syscall);
instrumentation_end();
return ret;
}
noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
{
__enter_from_user_mode(regs);
instrumentation_begin();
local_irq_enable();
instrumentation_end();
}
/* See comment for exit_to_user_mode() in entry-common.h */
static __always_inline void __exit_to_user_mode(void)
{
instrumentation_begin();
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
instrumentation_end();
user_enter_irqoff();
arch_exit_to_user_mode();
lockdep_hardirqs_on(CALLER_ADDR0);
}
void noinstr exit_to_user_mode(void)
{
__exit_to_user_mode();
}
/* Workaround to allow gradual conversion of architecture code */
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
unsigned long ti_work)
{
/*
* Before returning to user space ensure that all pending work
* items have been completed.
*/
while (ti_work & EXIT_TO_USER_MODE_WORK) {
local_irq_enable_exit_to_user(ti_work);
if (ti_work & _TIF_NEED_RESCHED)
schedule();
if (ti_work & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (ti_work & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
arch_do_signal_or_restart(regs);
if (ti_work & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
/* Architecture specific TIF work */
arch_exit_to_user_mode_work(regs, ti_work);
/*
* Disable interrupts and reevaluate the work flags as they
* might have changed while interrupts and preemption was
* enabled above.
*/
local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */
tick_nohz_user_enter_prepare();
ti_work = read_thread_flags();
}
/* Return the latest work state for arch_exit_to_user_mode() */
return ti_work;
}
static void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long ti_work;
lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */
tick_nohz_user_enter_prepare();
ti_work = read_thread_flags();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
arch_exit_to_user_mode_prepare(regs, ti_work);
/* Ensure that kernel state is sane for a return to userspace */
kmap_assert_nomap();
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
}
/*
* If SYSCALL_EMU is set, then the only reason to report is when
* SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
* instruction has been already reported in syscall_enter_from_user_mode().
*/
static inline bool report_single_step(unsigned long work)
{
if (work & SYSCALL_WORK_SYSCALL_EMU)
return false;
return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
}
static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
{
bool step;
/*
* If the syscall was rolled back due to syscall user dispatching,
* then the tracers below are not invoked for the same reason as
* the entry side was not invoked in syscall_trace_enter(): The ABI
* of these syscalls is unknown.
*/
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
if (unlikely(current->syscall_dispatch.on_dispatch)) {
current->syscall_dispatch.on_dispatch = false;
return;
}
}
audit_syscall_exit(regs);
if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
trace_sys_exit(regs, syscall_get_return_value(current, regs));
step = report_single_step(work);
if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
ptrace_report_syscall_exit(regs, step);
}
/*
* Syscall specific exit to user mode preparation. Runs with interrupts
* enabled.
*/
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
local_irq_enable();
}
rseq_syscall(regs);
/*
* Do one-time syscall specific work. If these work items are
* enabled, we want to run them exactly once per syscall exit with
* interrupts enabled.
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
}
static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
syscall_exit_to_user_mode_prepare(regs);
local_irq_disable_exit_to_user();
exit_to_user_mode_prepare(regs);
}
void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
__syscall_exit_to_user_mode_work(regs);
}
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
__syscall_exit_to_user_mode_work(regs);
instrumentation_end();
__exit_to_user_mode();
}
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{
__enter_from_user_mode(regs);
}
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
exit_to_user_mode_prepare(regs);
instrumentation_end();
__exit_to_user_mode();
}
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
{
irqentry_state_t ret = {
.exit_rcu = false,
};
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
return ret;
}
/*
* If this entry hit the idle task invoke ct_irq_enter() whether
* RCU is watching or not.
*
* Interrupts can nest when the first interrupt invokes softirq
* processing on return which enables interrupts.
*
* Scheduler ticks in the idle task can mark quiescent state and
* terminate a grace period, if and only if the timer interrupt is
* not nested into another interrupt.
*
* Checking for rcu_is_watching() here would prevent the nesting
* interrupt to invoke ct_irq_enter(). If that nested interrupt is
* the tick then rcu_flavor_sched_clock_irq() would wrongfully
* assume that it is the first interrupt and eventually claim
* quiescent state and end grace periods prematurely.
*
* Unconditionally invoke ct_irq_enter() so RCU state stays
* consistent.
*
* TINY_RCU does not support EQS, so let the compiler eliminate
* this part when enabled.
*/
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
/*
* If RCU is not watching then the same careful
* sequence vs. lockdep and tracing is required
* as in irqentry_enter_from_user_mode().
*/
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
instrumentation_end();
ret.exit_rcu = true;
return ret;
}
/*
* If RCU is watching then RCU only wants to check whether it needs
* to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
* already contains a warning when RCU is not watching, so no point
* in having another one here.
*/
lockdep_hardirqs_off(CALLER_ADDR0);
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
instrumentation_end();
return ret;
}
void raw_irqentry_exit_cond_resched(void)
{
if (!preempt_count()) {
/* Sanity check RCU and thread stack */
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
if (need_resched())
preempt_schedule_irq();
}
}
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_irqentry_exit_cond_resched(void)
{
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
return;
raw_irqentry_exit_cond_resched();
}
#endif
#endif
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
{
lockdep_assert_irqs_disabled();
/* Check whether this returns to user mode */
if (user_mode(regs)) {
irqentry_exit_to_user_mode(regs);
} else if (!regs_irqs_disabled(regs)) {
/*
* If RCU was not watching on entry this needs to be done
* carefully and needs the same ordering of lockdep/tracing
* and RCU as the return to user mode path.
*/
if (state.exit_rcu) {
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
instrumentation_end();
ct_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
instrumentation_begin();
if (IS_ENABLED(CONFIG_PREEMPTION))
irqentry_exit_cond_resched();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
} else {
/*
* IRQ flags state is correct already. Just tell RCU if it
* was not watching on entry.
*/
if (state.exit_rcu)
ct_irq_exit();
}
}
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
{
irqentry_state_t irq_state;
irq_state.lockdep = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
ct_nmi_enter();
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
ftrace_nmi_enter();
instrumentation_end();
return irq_state;
}
void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
{
instrumentation_begin();
ftrace_nmi_exit();
if (irq_state.lockdep) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
}
instrumentation_end();
ct_nmi_exit();
lockdep_hardirq_exit();
if (irq_state.lockdep)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
}
| linux-master | kernel/entry/common.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/entry-kvm.h>
#include <linux/kvm_host.h>
static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
{
do {
int ret;
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
kvm_handle_signal_exit(vcpu);
return -EINTR;
}
if (ti_work & _TIF_NEED_RESCHED)
schedule();
if (ti_work & _TIF_NOTIFY_RESUME)
resume_user_mode_work(NULL);
ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
if (ret)
return ret;
ti_work = read_thread_flags();
} while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
return 0;
}
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
{
unsigned long ti_work;
/*
* This is invoked from the outer guest loop with interrupts and
* preemption enabled.
*
* KVM invokes xfer_to_guest_mode_work_pending() with interrupts
* disabled in the inner loop before going into guest mode. No need
* to disable interrupts here.
*/
ti_work = read_thread_flags();
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
return 0;
return xfer_to_guest_mode_work(vcpu, ti_work);
}
EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);
| linux-master | kernel/entry/kvm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Collabora Ltd.
*/
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <linux/elf.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <asm/syscall.h>
#include "common.h"
static void trigger_sigsys(struct pt_regs *regs)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSYS;
info.si_code = SYS_USER_DISPATCH;
info.si_call_addr = (void __user *)KSTK_EIP(current);
info.si_errno = 0;
info.si_arch = syscall_get_arch(current);
info.si_syscall = syscall_get_nr(current, regs);
force_sig_info(&info);
}
bool syscall_user_dispatch(struct pt_regs *regs)
{
struct syscall_user_dispatch *sd = ¤t->syscall_dispatch;
char state;
if (likely(instruction_pointer(regs) - sd->offset < sd->len))
return false;
if (unlikely(arch_syscall_is_vdso_sigreturn(regs)))
return false;
if (likely(sd->selector)) {
/*
* access_ok() is performed once, at prctl time, when
* the selector is loaded by userspace.
*/
if (unlikely(__get_user(state, sd->selector))) {
force_exit_sig(SIGSEGV);
return true;
}
if (likely(state == SYSCALL_DISPATCH_FILTER_ALLOW))
return false;
if (state != SYSCALL_DISPATCH_FILTER_BLOCK) {
force_exit_sig(SIGSYS);
return true;
}
}
sd->on_dispatch = true;
syscall_rollback(current, regs);
trigger_sigsys(regs);
return true;
}
static int task_set_syscall_user_dispatch(struct task_struct *task, unsigned long mode,
unsigned long offset, unsigned long len,
char __user *selector)
{
switch (mode) {
case PR_SYS_DISPATCH_OFF:
if (offset || len || selector)
return -EINVAL;
break;
case PR_SYS_DISPATCH_ON:
/*
* Validate the direct dispatcher region just for basic
* sanity against overflow and a 0-sized dispatcher
* region. If the user is able to submit a syscall from
* an address, that address is obviously valid.
*/
if (offset && offset + len <= offset)
return -EINVAL;
/*
* access_ok() will clear memory tags for tagged addresses
* if current has memory tagging enabled.
* To enable a tracer to set a tracees selector the
* selector address must be untagged for access_ok(),
* otherwise an untagged tracer will always fail to set a
* tagged tracees selector.
*/
if (selector && !access_ok(untagged_addr(selector), sizeof(*selector)))
return -EFAULT;
break;
default:
return -EINVAL;
}
task->syscall_dispatch.selector = selector;
task->syscall_dispatch.offset = offset;
task->syscall_dispatch.len = len;
task->syscall_dispatch.on_dispatch = false;
if (mode == PR_SYS_DISPATCH_ON)
set_task_syscall_work(task, SYSCALL_USER_DISPATCH);
else
clear_task_syscall_work(task, SYSCALL_USER_DISPATCH);
return 0;
}
int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector)
{
return task_set_syscall_user_dispatch(current, mode, offset, len, selector);
}
int syscall_user_dispatch_get_config(struct task_struct *task, unsigned long size,
void __user *data)
{
struct syscall_user_dispatch *sd = &task->syscall_dispatch;
struct ptrace_sud_config cfg;
if (size != sizeof(cfg))
return -EINVAL;
if (test_task_syscall_work(task, SYSCALL_USER_DISPATCH))
cfg.mode = PR_SYS_DISPATCH_ON;
else
cfg.mode = PR_SYS_DISPATCH_OFF;
cfg.offset = sd->offset;
cfg.len = sd->len;
cfg.selector = (__u64)(uintptr_t)sd->selector;
if (copy_to_user(data, &cfg, sizeof(cfg)))
return -EFAULT;
return 0;
}
int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long size,
void __user *data)
{
struct ptrace_sud_config cfg;
if (size != sizeof(cfg))
return -EINVAL;
if (copy_from_user(&cfg, data, sizeof(cfg)))
return -EFAULT;
return task_set_syscall_user_dispatch(task, cfg.mode, cfg.offset, cfg.len,
(char __user *)(uintptr_t)cfg.selector);
}
| linux-master | kernel/entry/syscall_user_dispatch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Modules tree lookup
*
* Copyright (C) 2015 Peter Zijlstra
* Copyright (C) 2015 Rusty Russell
*/
#include <linux/module.h>
#include <linux/rbtree_latch.h>
#include "internal.h"
/*
* Use a latched RB-tree for __module_address(); this allows us to use
* RCU-sched lookups of the address from any context.
*
* This is conditional on PERF_EVENTS || TRACING because those can really hit
* __module_address() hard by doing a lot of stack unwinding; potentially from
* NMI context.
*/
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
{
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
return (unsigned long)mod_mem->base;
}
static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
return (unsigned long)mod_mem->size;
}
static __always_inline bool
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
{
return __mod_tree_val(a) < __mod_tree_val(b);
}
static __always_inline int
mod_tree_comp(void *key, struct latch_tree_node *n)
{
unsigned long val = (unsigned long)key;
unsigned long start, end;
start = __mod_tree_val(n);
if (val < start)
return -1;
end = start + __mod_tree_size(n);
if (val >= end)
return 1;
return 0;
}
static const struct latch_tree_ops mod_tree_ops = {
.less = mod_tree_less,
.comp = mod_tree_comp,
};
static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree)
{
latch_tree_insert(&node->node, &tree->root, &mod_tree_ops);
}
static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree)
{
latch_tree_erase(&node->node, &tree->root, &mod_tree_ops);
}
/*
* These modifications: insert, remove_init and remove; are serialized by the
* module_mutex.
*/
void mod_tree_insert(struct module *mod)
{
for_each_mod_mem_type(type) {
mod->mem[type].mtn.mod = mod;
if (mod->mem[type].size)
__mod_tree_insert(&mod->mem[type].mtn, &mod_tree);
}
}
void mod_tree_remove_init(struct module *mod)
{
for_class_mod_mem_type(type, init) {
if (mod->mem[type].size)
__mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
}
}
void mod_tree_remove(struct module *mod)
{
for_each_mod_mem_type(type) {
if (mod->mem[type].size)
__mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
}
}
struct module *mod_find(unsigned long addr, struct mod_tree_root *tree)
{
struct latch_tree_node *ltn;
ltn = latch_tree_find((void *)addr, &tree->root, &mod_tree_ops);
if (!ltn)
return NULL;
return container_of(ltn, struct mod_tree_node, node)->mod;
}
| linux-master | kernel/module/tree_lookup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module sysfs support
*
* Copyright (C) 2008 Rusty Russell
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include "internal.h"
/*
* /sys/module/foo/sections stuff
* J. Corbet <[email protected]>
*/
#ifdef CONFIG_KALLSYMS
struct module_sect_attr {
struct bin_attribute battr;
unsigned long address;
};
struct module_sect_attrs {
struct attribute_group grp;
unsigned int nsections;
struct module_sect_attr attrs[];
};
#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
struct bin_attribute *battr,
char *buf, loff_t pos, size_t count)
{
struct module_sect_attr *sattr =
container_of(battr, struct module_sect_attr, battr);
char bounce[MODULE_SECT_READ_SIZE + 1];
size_t wrote;
if (pos != 0)
return -EINVAL;
/*
* Since we're a binary read handler, we must account for the
* trailing NUL byte that sprintf will write: if "buf" is
* too small to hold the NUL, or the NUL is exactly the last
* byte, the read will look like it got truncated by one byte.
* Since there is no way to ask sprintf nicely to not write
* the NUL, we have to use a bounce buffer.
*/
wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
kallsyms_show_value(file->f_cred)
? (void *)sattr->address : NULL);
count = min(count, wrote);
memcpy(buf, bounce, count);
return count;
}
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
{
unsigned int section;
for (section = 0; section < sect_attrs->nsections; section++)
kfree(sect_attrs->attrs[section].battr.attr.name);
kfree(sect_attrs);
}
static void add_sect_attrs(struct module *mod, const struct load_info *info)
{
unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
struct bin_attribute **gattr;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]))
nloaded++;
size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
sizeof(sect_attrs->grp.bin_attrs[0]));
size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
if (!sect_attrs)
return;
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
sect_attrs->nsections = 0;
sattr = §_attrs->attrs[0];
gattr = §_attrs->grp.bin_attrs[0];
for (i = 0; i < info->hdr->e_shnum; i++) {
Elf_Shdr *sec = &info->sechdrs[i];
if (sect_empty(sec))
continue;
sysfs_bin_attr_init(&sattr->battr);
sattr->address = sec->sh_addr;
sattr->battr.attr.name =
kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
if (!sattr->battr.attr.name)
goto out;
sect_attrs->nsections++;
sattr->battr.read = module_sect_read;
sattr->battr.size = MODULE_SECT_READ_SIZE;
sattr->battr.attr.mode = 0400;
*(gattr++) = &(sattr++)->battr;
}
*gattr = NULL;
if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp))
goto out;
mod->sect_attrs = sect_attrs;
return;
out:
free_sect_attrs(sect_attrs);
}
static void remove_sect_attrs(struct module *mod)
{
if (mod->sect_attrs) {
sysfs_remove_group(&mod->mkobj.kobj,
&mod->sect_attrs->grp);
/*
* We are positive that no one is using any sect attrs
* at this point. Deallocate immediately.
*/
free_sect_attrs(mod->sect_attrs);
mod->sect_attrs = NULL;
}
}
/*
* /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
*/
struct module_notes_attrs {
struct kobject *dir;
unsigned int notes;
struct bin_attribute attrs[];
};
static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
/*
* The caller checked the pos and count against our size.
*/
memcpy(buf, bin_attr->private + pos, count);
return count;
}
static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
unsigned int i)
{
if (notes_attrs->dir) {
while (i-- > 0)
sysfs_remove_bin_file(notes_attrs->dir,
¬es_attrs->attrs[i]);
kobject_put(notes_attrs->dir);
}
kfree(notes_attrs);
}
static void add_notes_attrs(struct module *mod, const struct load_info *info)
{
unsigned int notes, loaded, i;
struct module_notes_attrs *notes_attrs;
struct bin_attribute *nattr;
/* failed to create section attributes, so can't create notes */
if (!mod->sect_attrs)
return;
/* Count notes sections and allocate structures. */
notes = 0;
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]) &&
info->sechdrs[i].sh_type == SHT_NOTE)
++notes;
if (notes == 0)
return;
notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
GFP_KERNEL);
if (!notes_attrs)
return;
notes_attrs->notes = notes;
nattr = ¬es_attrs->attrs[0];
for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
if (sect_empty(&info->sechdrs[i]))
continue;
if (info->sechdrs[i].sh_type == SHT_NOTE) {
sysfs_bin_attr_init(nattr);
nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
nattr->attr.mode = 0444;
nattr->size = info->sechdrs[i].sh_size;
nattr->private = (void *)info->sechdrs[i].sh_addr;
nattr->read = module_notes_read;
++nattr;
}
++loaded;
}
notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
if (!notes_attrs->dir)
goto out;
for (i = 0; i < notes; ++i)
if (sysfs_create_bin_file(notes_attrs->dir,
¬es_attrs->attrs[i]))
goto out;
mod->notes_attrs = notes_attrs;
return;
out:
free_notes_attrs(notes_attrs, i);
}
static void remove_notes_attrs(struct module *mod)
{
if (mod->notes_attrs)
free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
}
#else /* !CONFIG_KALLSYMS */
static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { }
static inline void remove_sect_attrs(struct module *mod) { }
static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { }
static inline void remove_notes_attrs(struct module *mod) { }
#endif /* CONFIG_KALLSYMS */
static void del_usage_links(struct module *mod)
{
#ifdef CONFIG_MODULE_UNLOAD
struct module_use *use;
mutex_lock(&module_mutex);
list_for_each_entry(use, &mod->target_list, target_list)
sysfs_remove_link(use->target->holders_dir, mod->name);
mutex_unlock(&module_mutex);
#endif
}
static int add_usage_links(struct module *mod)
{
int ret = 0;
#ifdef CONFIG_MODULE_UNLOAD
struct module_use *use;
mutex_lock(&module_mutex);
list_for_each_entry(use, &mod->target_list, target_list) {
ret = sysfs_create_link(use->target->holders_dir,
&mod->mkobj.kobj, mod->name);
if (ret)
break;
}
mutex_unlock(&module_mutex);
if (ret)
del_usage_links(mod);
#endif
return ret;
}
static void module_remove_modinfo_attrs(struct module *mod, int end)
{
struct module_attribute *attr;
int i;
for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
if (end >= 0 && i > end)
break;
/* pick a field to test for end of list */
if (!attr->attr.name)
break;
sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
if (attr->free)
attr->free(mod);
}
kfree(mod->modinfo_attrs);
}
static int module_add_modinfo_attrs(struct module *mod)
{
struct module_attribute *attr;
struct module_attribute *temp_attr;
int error = 0;
int i;
mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
(modinfo_attrs_count + 1)),
GFP_KERNEL);
if (!mod->modinfo_attrs)
return -ENOMEM;
temp_attr = mod->modinfo_attrs;
for (i = 0; (attr = modinfo_attrs[i]); i++) {
if (!attr->test || attr->test(mod)) {
memcpy(temp_attr, attr, sizeof(*temp_attr));
sysfs_attr_init(&temp_attr->attr);
error = sysfs_create_file(&mod->mkobj.kobj,
&temp_attr->attr);
if (error)
goto error_out;
++temp_attr;
}
}
return 0;
error_out:
if (i > 0)
module_remove_modinfo_attrs(mod, --i);
else
kfree(mod->modinfo_attrs);
return error;
}
static void mod_kobject_put(struct module *mod)
{
DECLARE_COMPLETION_ONSTACK(c);
mod->mkobj.kobj_completion = &c;
kobject_put(&mod->mkobj.kobj);
wait_for_completion(&c);
}
static int mod_sysfs_init(struct module *mod)
{
int err;
struct kobject *kobj;
if (!module_kset) {
pr_err("%s: module sysfs not initialized\n", mod->name);
err = -EINVAL;
goto out;
}
kobj = kset_find_obj(module_kset, mod->name);
if (kobj) {
pr_err("%s: module is already loaded\n", mod->name);
kobject_put(kobj);
err = -EINVAL;
goto out;
}
mod->mkobj.mod = mod;
memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
mod->mkobj.kobj.kset = module_kset;
err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
"%s", mod->name);
if (err)
mod_kobject_put(mod);
out:
return err;
}
int mod_sysfs_setup(struct module *mod,
const struct load_info *info,
struct kernel_param *kparam,
unsigned int num_params)
{
int err;
err = mod_sysfs_init(mod);
if (err)
goto out;
mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
if (!mod->holders_dir) {
err = -ENOMEM;
goto out_unreg;
}
err = module_param_sysfs_setup(mod, kparam, num_params);
if (err)
goto out_unreg_holders;
err = module_add_modinfo_attrs(mod);
if (err)
goto out_unreg_param;
err = add_usage_links(mod);
if (err)
goto out_unreg_modinfo_attrs;
add_sect_attrs(mod, info);
add_notes_attrs(mod, info);
return 0;
out_unreg_modinfo_attrs:
module_remove_modinfo_attrs(mod, -1);
out_unreg_param:
module_param_sysfs_remove(mod);
out_unreg_holders:
kobject_put(mod->holders_dir);
out_unreg:
mod_kobject_put(mod);
out:
return err;
}
static void mod_sysfs_fini(struct module *mod)
{
remove_notes_attrs(mod);
remove_sect_attrs(mod);
mod_kobject_put(mod);
}
void mod_sysfs_teardown(struct module *mod)
{
del_usage_links(mod);
module_remove_modinfo_attrs(mod, -1);
module_param_sysfs_remove(mod);
kobject_put(mod->mkobj.drivers_dir);
kobject_put(mod->holders_dir);
mod_sysfs_fini(mod);
}
void init_param_lock(struct module *mod)
{
mutex_init(&mod->param_lock);
}
| linux-master | kernel/module/sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2021 Google LLC.
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/kobject.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/vmalloc.h>
#include "internal.h"
static int module_extend_max_pages(struct load_info *info, unsigned int extent)
{
struct page **new_pages;
new_pages = kvmalloc_array(info->max_pages + extent,
sizeof(info->pages), GFP_KERNEL);
if (!new_pages)
return -ENOMEM;
memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages));
kvfree(info->pages);
info->pages = new_pages;
info->max_pages += extent;
return 0;
}
static struct page *module_get_next_page(struct load_info *info)
{
struct page *page;
int error;
if (info->max_pages == info->used_pages) {
error = module_extend_max_pages(info, info->used_pages);
if (error)
return ERR_PTR(error);
}
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!page)
return ERR_PTR(-ENOMEM);
info->pages[info->used_pages++] = page;
return page;
}
#if defined(CONFIG_MODULE_COMPRESS_GZIP)
#include <linux/zlib.h>
#define MODULE_COMPRESSION gzip
#define MODULE_DECOMPRESS_FN module_gzip_decompress
/*
* Calculate length of the header which consists of signature, header
* flags, time stamp and operating system ID (10 bytes total), plus
* an optional filename.
*/
static size_t module_gzip_header_len(const u8 *buf, size_t size)
{
const u8 signature[] = { 0x1f, 0x8b, 0x08 };
size_t len = 10;
if (size < len || memcmp(buf, signature, sizeof(signature)))
return 0;
if (buf[3] & 0x08) {
do {
/*
* If we can't find the end of the file name we must
* be dealing with a corrupted file.
*/
if (len == size)
return 0;
} while (buf[len++] != '\0');
}
return len;
}
static ssize_t module_gzip_decompress(struct load_info *info,
const void *buf, size_t size)
{
struct z_stream_s s = { 0 };
size_t new_size = 0;
size_t gzip_hdr_len;
ssize_t retval;
int rc;
gzip_hdr_len = module_gzip_header_len(buf, size);
if (!gzip_hdr_len) {
pr_err("not a gzip compressed module\n");
return -EINVAL;
}
s.next_in = buf + gzip_hdr_len;
s.avail_in = size - gzip_hdr_len;
s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (!s.workspace)
return -ENOMEM;
rc = zlib_inflateInit2(&s, -MAX_WBITS);
if (rc != Z_OK) {
pr_err("failed to initialize decompressor: %d\n", rc);
retval = -EINVAL;
goto out;
}
do {
struct page *page = module_get_next_page(info);
if (IS_ERR(page)) {
retval = PTR_ERR(page);
goto out_inflate_end;
}
s.next_out = kmap_local_page(page);
s.avail_out = PAGE_SIZE;
rc = zlib_inflate(&s, 0);
kunmap_local(s.next_out);
new_size += PAGE_SIZE - s.avail_out;
} while (rc == Z_OK);
if (rc != Z_STREAM_END) {
pr_err("decompression failed with status %d\n", rc);
retval = -EINVAL;
goto out_inflate_end;
}
retval = new_size;
out_inflate_end:
zlib_inflateEnd(&s);
out:
kfree(s.workspace);
return retval;
}
#elif defined(CONFIG_MODULE_COMPRESS_XZ)
#include <linux/xz.h>
#define MODULE_COMPRESSION xz
#define MODULE_DECOMPRESS_FN module_xz_decompress
static ssize_t module_xz_decompress(struct load_info *info,
const void *buf, size_t size)
{
static const u8 signature[] = { 0xfd, '7', 'z', 'X', 'Z', 0 };
struct xz_dec *xz_dec;
struct xz_buf xz_buf;
enum xz_ret xz_ret;
size_t new_size = 0;
ssize_t retval;
if (size < sizeof(signature) ||
memcmp(buf, signature, sizeof(signature))) {
pr_err("not an xz compressed module\n");
return -EINVAL;
}
xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
if (!xz_dec)
return -ENOMEM;
xz_buf.in_size = size;
xz_buf.in = buf;
xz_buf.in_pos = 0;
do {
struct page *page = module_get_next_page(info);
if (IS_ERR(page)) {
retval = PTR_ERR(page);
goto out;
}
xz_buf.out = kmap_local_page(page);
xz_buf.out_pos = 0;
xz_buf.out_size = PAGE_SIZE;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
kunmap_local(xz_buf.out);
new_size += xz_buf.out_pos;
} while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK);
if (xz_ret != XZ_STREAM_END) {
pr_err("decompression failed with status %d\n", xz_ret);
retval = -EINVAL;
goto out;
}
retval = new_size;
out:
xz_dec_end(xz_dec);
return retval;
}
#elif defined(CONFIG_MODULE_COMPRESS_ZSTD)
#include <linux/zstd.h>
#define MODULE_COMPRESSION zstd
#define MODULE_DECOMPRESS_FN module_zstd_decompress
static ssize_t module_zstd_decompress(struct load_info *info,
const void *buf, size_t size)
{
static const u8 signature[] = { 0x28, 0xb5, 0x2f, 0xfd };
ZSTD_outBuffer zstd_dec;
ZSTD_inBuffer zstd_buf;
zstd_frame_header header;
size_t wksp_size;
void *wksp = NULL;
ZSTD_DStream *dstream;
size_t ret;
size_t new_size = 0;
int retval;
if (size < sizeof(signature) ||
memcmp(buf, signature, sizeof(signature))) {
pr_err("not a zstd compressed module\n");
return -EINVAL;
}
zstd_buf.src = buf;
zstd_buf.pos = 0;
zstd_buf.size = size;
ret = zstd_get_frame_header(&header, zstd_buf.src, zstd_buf.size);
if (ret != 0) {
pr_err("ZSTD-compressed data has an incomplete frame header\n");
retval = -EINVAL;
goto out;
}
if (header.windowSize > (1 << ZSTD_WINDOWLOG_MAX)) {
pr_err("ZSTD-compressed data has too large a window size\n");
retval = -EINVAL;
goto out;
}
wksp_size = zstd_dstream_workspace_bound(header.windowSize);
wksp = vmalloc(wksp_size);
if (!wksp) {
retval = -ENOMEM;
goto out;
}
dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
if (!dstream) {
pr_err("Can't initialize ZSTD stream\n");
retval = -ENOMEM;
goto out;
}
do {
struct page *page = module_get_next_page(info);
if (IS_ERR(page)) {
retval = PTR_ERR(page);
goto out;
}
zstd_dec.dst = kmap_local_page(page);
zstd_dec.pos = 0;
zstd_dec.size = PAGE_SIZE;
ret = zstd_decompress_stream(dstream, &zstd_dec, &zstd_buf);
kunmap_local(zstd_dec.dst);
retval = zstd_get_error_code(ret);
if (retval)
break;
new_size += zstd_dec.pos;
} while (zstd_dec.pos == PAGE_SIZE && ret != 0);
if (retval) {
pr_err("ZSTD-decompression failed with status %d\n", retval);
retval = -EINVAL;
goto out;
}
retval = new_size;
out:
vfree(wksp);
return retval;
}
#else
#error "Unexpected configuration for CONFIG_MODULE_DECOMPRESS"
#endif
int module_decompress(struct load_info *info, const void *buf, size_t size)
{
unsigned int n_pages;
ssize_t data_size;
int error;
#if defined(CONFIG_MODULE_STATS)
info->compressed_len = size;
#endif
/*
* Start with number of pages twice as big as needed for
* compressed data.
*/
n_pages = DIV_ROUND_UP(size, PAGE_SIZE) * 2;
error = module_extend_max_pages(info, n_pages);
data_size = MODULE_DECOMPRESS_FN(info, buf, size);
if (data_size < 0) {
error = data_size;
goto err;
}
info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL);
if (!info->hdr) {
error = -ENOMEM;
goto err;
}
info->len = data_size;
return 0;
err:
module_decompress_cleanup(info);
return error;
}
void module_decompress_cleanup(struct load_info *info)
{
int i;
if (info->hdr)
vunmap(info->hdr);
for (i = 0; i < info->used_pages; i++)
__free_page(info->pages[i]);
kvfree(info->pages);
info->pages = NULL;
info->max_pages = info->used_pages = 0;
}
#ifdef CONFIG_SYSFS
static ssize_t compression_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, __stringify(MODULE_COMPRESSION) "\n");
}
static struct kobj_attribute module_compression_attr = __ATTR_RO(compression);
static int __init module_decompress_sysfs_init(void)
{
int error;
error = sysfs_create_file(&module_kset->kobj,
&module_compression_attr.attr);
if (error)
pr_warn("Failed to create 'compression' attribute");
return 0;
}
late_initcall(module_decompress_sysfs_init);
#endif
| linux-master | kernel/module/decompress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module taint unload tracking support
*
* Copyright (C) 2022 Aaron Tomlin
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/rculist.h>
#include "internal.h"
static LIST_HEAD(unloaded_tainted_modules);
extern struct dentry *mod_debugfs_root;
int try_add_tainted_module(struct module *mod)
{
struct mod_unload_taint *mod_taint;
module_assert_mutex_or_preempt();
if (!mod->taints)
goto out;
list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules, list,
lockdep_is_held(&module_mutex)) {
if (!strcmp(mod_taint->name, mod->name) &&
mod_taint->taints & mod->taints) {
mod_taint->count++;
goto out;
}
}
mod_taint = kmalloc(sizeof(*mod_taint), GFP_KERNEL);
if (unlikely(!mod_taint))
return -ENOMEM;
strscpy(mod_taint->name, mod->name, MODULE_NAME_LEN);
mod_taint->taints = mod->taints;
list_add_rcu(&mod_taint->list, &unloaded_tainted_modules);
mod_taint->count = 1;
out:
return 0;
}
void print_unloaded_tainted_modules(void)
{
struct mod_unload_taint *mod_taint;
char buf[MODULE_FLAGS_BUF_SIZE];
if (!list_empty(&unloaded_tainted_modules)) {
printk(KERN_DEFAULT "Unloaded tainted modules:");
list_for_each_entry_rcu(mod_taint, &unloaded_tainted_modules,
list) {
size_t l;
l = module_flags_taint(mod_taint->taints, buf);
buf[l++] = '\0';
pr_cont(" %s(%s):%llu", mod_taint->name, buf,
mod_taint->count);
}
}
}
#ifdef CONFIG_DEBUG_FS
static void *unloaded_tainted_modules_seq_start(struct seq_file *m, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return seq_list_start_rcu(&unloaded_tainted_modules, *pos);
}
static void *unloaded_tainted_modules_seq_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next_rcu(p, &unloaded_tainted_modules, pos);
}
static void unloaded_tainted_modules_seq_stop(struct seq_file *m, void *p)
__releases(rcu)
{
rcu_read_unlock();
}
static int unloaded_tainted_modules_seq_show(struct seq_file *m, void *p)
{
struct mod_unload_taint *mod_taint;
char buf[MODULE_FLAGS_BUF_SIZE];
size_t l;
mod_taint = list_entry(p, struct mod_unload_taint, list);
l = module_flags_taint(mod_taint->taints, buf);
buf[l++] = '\0';
seq_printf(m, "%s (%s) %llu", mod_taint->name, buf, mod_taint->count);
seq_puts(m, "\n");
return 0;
}
static const struct seq_operations unloaded_tainted_modules_seq_ops = {
.start = unloaded_tainted_modules_seq_start,
.next = unloaded_tainted_modules_seq_next,
.stop = unloaded_tainted_modules_seq_stop,
.show = unloaded_tainted_modules_seq_show,
};
static int unloaded_tainted_modules_open(struct inode *inode, struct file *file)
{
return seq_open(file, &unloaded_tainted_modules_seq_ops);
}
static const struct file_operations unloaded_tainted_modules_fops = {
.open = unloaded_tainted_modules_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init unloaded_tainted_modules_init(void)
{
debugfs_create_file("unloaded_tainted", 0444, mod_debugfs_root, NULL,
&unloaded_tainted_modules_fops);
return 0;
}
module_init(unloaded_tainted_modules_init);
#endif /* CONFIG_DEBUG_FS */
| linux-master | kernel/module/tracking.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module strict rwx
*
* Copyright (C) 2015 Rusty Russell
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include "internal.h"
static void module_set_memory(const struct module *mod, enum mod_mem_type type,
int (*set_memory)(unsigned long start, int num_pages))
{
const struct module_memory *mod_mem = &mod->mem[type];
set_vm_flush_reset_perms(mod_mem->base);
set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
}
/*
* Since some arches are moving towards PAGE_KERNEL module allocations instead
* of PAGE_KERNEL_EXEC, keep module_enable_x() independent of
* CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
* are strict.
*/
void module_enable_x(const struct module *mod)
{
for_class_mod_mem_type(type, text)
module_set_memory(mod, type, set_memory_x);
}
void module_enable_ro(const struct module *mod, bool after_init)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return;
#ifdef CONFIG_STRICT_MODULE_RWX
if (!rodata_enabled)
return;
#endif
module_set_memory(mod, MOD_TEXT, set_memory_ro);
module_set_memory(mod, MOD_INIT_TEXT, set_memory_ro);
module_set_memory(mod, MOD_RODATA, set_memory_ro);
module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (after_init)
module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
}
void module_enable_nx(const struct module *mod)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return;
for_class_mod_mem_type(type, data)
module_set_memory(mod, type, set_memory_nx);
}
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR;
int i;
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return 0;
for (i = 0; i < hdr->e_shnum; i++) {
if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
mod->name, secstrings + sechdrs[i].sh_name, i);
return -ENOEXEC;
}
}
return 0;
}
| linux-master | kernel/module/strict_rwx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Module signature checker
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/module_signature.h>
#include <linux/string.h>
#include <linux/verification.h>
#include <linux/security.h>
#include <crypto/public_key.h>
#include <uapi/linux/module.h>
#include "internal.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "module."
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
*/
bool is_module_sig_enforced(void)
{
return sig_enforce;
}
EXPORT_SYMBOL(is_module_sig_enforced);
void set_module_sig_enforced(void)
{
sig_enforce = true;
}
/*
* Verify the signature on a module.
*/
int mod_verify_sig(const void *mod, struct load_info *info)
{
struct module_signature ms;
size_t sig_len, modlen = info->len;
int ret;
pr_devel("==>%s(,%zu)\n", __func__, modlen);
if (modlen <= sizeof(ms))
return -EBADMSG;
memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
ret = mod_check_sig(&ms, modlen, "module");
if (ret)
return ret;
sig_len = be32_to_cpu(ms.sig_len);
modlen -= sig_len + sizeof(ms);
info->len = modlen;
return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
}
int module_sig_check(struct load_info *info, int flags)
{
int err = -ENODATA;
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
const char *reason;
const void *mod = info->hdr;
bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS |
MODULE_INIT_IGNORE_VERMAGIC);
/*
* Do not allow mangled modules as a module with version information
* removed is no longer the module that was signed.
*/
if (!mangled_module &&
info->len > markerlen &&
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
info->len -= markerlen;
err = mod_verify_sig(mod, info);
if (!err) {
info->sig_ok = true;
return 0;
}
}
/*
* We don't permit modules to be loaded into the trusted kernels
* without a valid signature on them, but if we're not enforcing,
* certain errors are non-fatal.
*/
switch (err) {
case -ENODATA:
reason = "unsigned module";
break;
case -ENOPKG:
reason = "module with unsupported crypto";
break;
case -ENOKEY:
reason = "module with unavailable key";
break;
default:
/*
* All other errors are fatal, including lack of memory,
* unparseable signatures, and signature check failures --
* even if signatures aren't required.
*/
return err;
}
if (is_module_sig_enforced()) {
pr_notice("Loading of %s is rejected\n", reason);
return -EKEYREJECTED;
}
return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
}
| linux-master | kernel/module/signing.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Debugging module statistics.
*
* Copyright (C) 2023 Luis Chamberlain <[email protected]>
*/
#include <linux/module.h>
#include <uapi/linux/module.h>
#include <linux/string.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/rculist.h>
#include <linux/math.h>
#include "internal.h"
/**
* DOC: module debugging statistics overview
*
* Enabling CONFIG_MODULE_STATS enables module debugging statistics which
* are useful to monitor and root cause memory pressure issues with module
* loading. These statistics are useful to allow us to improve production
* workloads.
*
* The current module debugging statistics supported help keep track of module
* loading failures to enable improvements either for kernel module auto-loading
* usage (request_module()) or interactions with userspace. Statistics are
* provided to track all possible failures in the finit_module() path and memory
* wasted in this process space. Each of the failure counters are associated
* to a type of module loading failure which is known to incur a certain amount
* of memory allocation loss. In the worst case loading a module will fail after
* a 3 step memory allocation process:
*
* a) memory allocated with kernel_read_file_from_fd()
* b) module decompression processes the file read from
* kernel_read_file_from_fd(), and vmap() is used to map
* the decompressed module to a new local buffer which represents
* a copy of the decompressed module passed from userspace. The buffer
* from kernel_read_file_from_fd() is freed right away.
* c) layout_and_allocate() allocates space for the final resting
* place where we would keep the module if it were to be processed
* successfully.
*
* If a failure occurs after these three different allocations only one
* counter will be incremented with the summation of the allocated bytes freed
* incurred during this failure. Likewise, if module loading failed only after
* step b) a separate counter is used and incremented for the bytes freed and
* not used during both of those allocations.
*
* Virtual memory space can be limited, for example on x86 virtual memory size
* defaults to 128 MiB. We should strive to limit and avoid wasting virtual
* memory allocations when possible. These module debugging statistics help
* to evaluate how much memory is being wasted on bootup due to module loading
* failures.
*
* All counters are designed to be incremental. Atomic counters are used so to
* remain simple and avoid delays and deadlocks.
*/
/**
* DOC: dup_failed_modules - tracks duplicate failed modules
*
* Linked list of modules which failed to be loaded because an already existing
* module with the same name was already being processed or already loaded.
* The finit_module() system call incurs heavy virtual memory allocations. In
* the worst case an finit_module() system call can end up allocating virtual
* memory 3 times:
*
* 1) kernel_read_file_from_fd() call uses vmalloc()
* 2) optional module decompression uses vmap()
* 3) layout_and allocate() can use vzalloc() or an arch specific variation of
* vmalloc to deal with ELF sections requiring special permissions
*
* In practice on a typical boot today most finit_module() calls fail due to
* the module with the same name already being loaded or about to be processed.
* All virtual memory allocated to these failed modules will be freed with
* no functional use.
*
* To help with this the dup_failed_modules allows us to track modules which
* failed to load due to the fact that a module was already loaded or being
* processed. There are only two points at which we can fail such calls,
* we list them below along with the number of virtual memory allocation
* calls:
*
* a) FAIL_DUP_MOD_BECOMING: at the end of early_mod_check() before
* layout_and_allocate().
* - with module decompression: 2 virtual memory allocation calls
* - without module decompression: 1 virtual memory allocation calls
* b) FAIL_DUP_MOD_LOAD: after layout_and_allocate() on add_unformed_module()
* - with module decompression 3 virtual memory allocation calls
* - without module decompression 2 virtual memory allocation calls
*
* We should strive to get this list to be as small as possible. If this list
* is not empty it is a reflection of possible work or optimizations possible
* either in-kernel or in userspace.
*/
static LIST_HEAD(dup_failed_modules);
/**
* DOC: module statistics debugfs counters
*
* The total amount of wasted virtual memory allocation space during module
* loading can be computed by adding the total from the summation:
*
* * @invalid_kread_bytes +
* @invalid_decompress_bytes +
* @invalid_becoming_bytes +
* @invalid_mod_bytes
*
* The following debugfs counters are available to inspect module loading
* failures:
*
* * total_mod_size: total bytes ever used by all modules we've dealt with on
* this system
* * total_text_size: total bytes of the .text and .init.text ELF section
* sizes we've dealt with on this system
* * invalid_kread_bytes: bytes allocated and then freed on failures which
* happen due to the initial kernel_read_file_from_fd(). kernel_read_file_from_fd()
* uses vmalloc(). These should typically not happen unless your system is
* under memory pressure.
* * invalid_decompress_bytes: number of bytes allocated and freed due to
* memory allocations in the module decompression path that use vmap().
* These typically should not happen unless your system is under memory
* pressure.
* * invalid_becoming_bytes: total number of bytes allocated and freed used
* used to read the kernel module userspace wants us to read before we
* promote it to be processed to be added to our @modules linked list. These
* failures can happen if we had a check in between a successful kernel_read_file_from_fd()
* call and right before we allocate the our private memory for the module
* which would be kept if the module is successfully loaded. The most common
* reason for this failure is when userspace is racing to load a module
* which it does not yet see loaded. The first module to succeed in
* add_unformed_module() will add a module to our &modules list and
* subsequent loads of modules with the same name will error out at the
* end of early_mod_check(). The check for module_patient_check_exists()
* at the end of early_mod_check() prevents duplicate allocations
* on layout_and_allocate() for modules already being processed. These
* duplicate failed modules are non-fatal, however they typically are
* indicative of userspace not seeing a module in userspace loaded yet and
* unnecessarily trying to load a module before the kernel even has a chance
* to begin to process prior requests. Although duplicate failures can be
* non-fatal, we should try to reduce vmalloc() pressure proactively, so
* ideally after boot this will be close to as 0 as possible. If module
* decompression was used we also add to this counter the cost of the
* initial kernel_read_file_from_fd() of the compressed module. If module
* decompression was not used the value represents the total allocated and
* freed bytes in kernel_read_file_from_fd() calls for these type of
* failures. These failures can occur because:
*
* * module_sig_check() - module signature checks
* * elf_validity_cache_copy() - some ELF validation issue
* * early_mod_check():
*
* * blacklisting
* * failed to rewrite section headers
* * version magic
* * live patch requirements didn't check out
* * the module was detected as being already present
*
* * invalid_mod_bytes: these are the total number of bytes allocated and
* freed due to failures after we did all the sanity checks of the module
* which userspace passed to us and after our first check that the module
* is unique. A module can still fail to load if we detect the module is
* loaded after we allocate space for it with layout_and_allocate(), we do
* this check right before processing the module as live and run its
* initialization routines. Note that you have a failure of this type it
* also means the respective kernel_read_file_from_fd() memory space was
* also freed and not used, and so we increment this counter with twice
* the size of the module. Additionally if you used module decompression
* the size of the compressed module is also added to this counter.
*
* * modcount: how many modules we've loaded in our kernel life time
* * failed_kreads: how many modules failed due to failed kernel_read_file_from_fd()
* * failed_decompress: how many failed module decompression attempts we've had.
* These really should not happen unless your compression / decompression
* might be broken.
* * failed_becoming: how many modules failed after we kernel_read_file_from_fd()
* it and before we allocate memory for it with layout_and_allocate(). This
* counter is never incremented if you manage to validate the module and
* call layout_and_allocate() for it.
* * failed_load_modules: how many modules failed once we've allocated our
* private space for our module using layout_and_allocate(). These failures
* should hopefully mostly be dealt with already. Races in theory could
* still exist here, but it would just mean the kernel had started processing
* two threads concurrently up to early_mod_check() and one thread won.
* These failures are good signs the kernel or userspace is doing something
* seriously stupid or that could be improved. We should strive to fix these,
* but it is perhaps not easy to fix them. A recent example are the modules
* requests incurred for frequency modules, a separate module request was
* being issued for each CPU on a system.
*/
atomic_long_t total_mod_size;
atomic_long_t total_text_size;
atomic_long_t invalid_kread_bytes;
atomic_long_t invalid_decompress_bytes;
static atomic_long_t invalid_becoming_bytes;
static atomic_long_t invalid_mod_bytes;
atomic_t modcount;
atomic_t failed_kreads;
atomic_t failed_decompress;
static atomic_t failed_becoming;
static atomic_t failed_load_modules;
static const char *mod_fail_to_str(struct mod_fail_load *mod_fail)
{
if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask) &&
test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask))
return "Becoming & Load";
if (test_bit(FAIL_DUP_MOD_BECOMING, &mod_fail->dup_fail_mask))
return "Becoming";
if (test_bit(FAIL_DUP_MOD_LOAD, &mod_fail->dup_fail_mask))
return "Load";
return "Bug-on-stats";
}
void mod_stat_bump_invalid(struct load_info *info, int flags)
{
atomic_long_add(info->len * 2, &invalid_mod_bytes);
atomic_inc(&failed_load_modules);
#if defined(CONFIG_MODULE_DECOMPRESS)
if (flags & MODULE_INIT_COMPRESSED_FILE)
atomic_long_add(info->compressed_len, &invalid_mod_bytes);
#endif
}
void mod_stat_bump_becoming(struct load_info *info, int flags)
{
atomic_inc(&failed_becoming);
atomic_long_add(info->len, &invalid_becoming_bytes);
#if defined(CONFIG_MODULE_DECOMPRESS)
if (flags & MODULE_INIT_COMPRESSED_FILE)
atomic_long_add(info->compressed_len, &invalid_becoming_bytes);
#endif
}
int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason)
{
struct mod_fail_load *mod_fail;
list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list,
lockdep_is_held(&module_mutex)) {
if (!strcmp(mod_fail->name, name)) {
atomic_long_inc(&mod_fail->count);
__set_bit(reason, &mod_fail->dup_fail_mask);
goto out;
}
}
mod_fail = kzalloc(sizeof(*mod_fail), GFP_KERNEL);
if (!mod_fail)
return -ENOMEM;
memcpy(mod_fail->name, name, strlen(name));
__set_bit(reason, &mod_fail->dup_fail_mask);
atomic_long_inc(&mod_fail->count);
list_add_rcu(&mod_fail->list, &dup_failed_modules);
out:
return 0;
}
/*
* At 64 bytes per module and assuming a 1024 bytes preamble we can fit the
* 112 module prints within 8k.
*
* 1024 + (64*112) = 8k
*/
#define MAX_PREAMBLE 1024
#define MAX_FAILED_MOD_PRINT 112
#define MAX_BYTES_PER_MOD 64
static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mod_fail_load *mod_fail;
unsigned int len, size, count_failed = 0;
char *buf;
int ret;
u32 live_mod_count, fkreads, fdecompress, fbecoming, floads;
unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes,
idecompress_bytes, imod_bytes, total_virtual_lost;
live_mod_count = atomic_read(&modcount);
fkreads = atomic_read(&failed_kreads);
fdecompress = atomic_read(&failed_decompress);
fbecoming = atomic_read(&failed_becoming);
floads = atomic_read(&failed_load_modules);
total_size = atomic_long_read(&total_mod_size);
text_size = atomic_long_read(&total_text_size);
ikread_bytes = atomic_long_read(&invalid_kread_bytes);
idecompress_bytes = atomic_long_read(&invalid_decompress_bytes);
ibecoming_bytes = atomic_long_read(&invalid_becoming_bytes);
imod_bytes = atomic_long_read(&invalid_mod_bytes);
total_virtual_lost = ikread_bytes + idecompress_bytes + ibecoming_bytes + imod_bytes;
size = MAX_PREAMBLE + min((unsigned int)(floads + fbecoming),
(unsigned int)MAX_FAILED_MOD_PRINT) * MAX_BYTES_PER_MOD;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
/* The beginning of our debug preamble */
len = scnprintf(buf, size, "%25s\t%u\n", "Mods ever loaded", live_mod_count);
len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on kread", fkreads);
len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on decompress",
fdecompress);
len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on becoming", fbecoming);
len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on load", floads);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total module size", total_size);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total mod text size", text_size);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kread bytes", ikread_bytes);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed decompress bytes",
idecompress_bytes);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed becoming bytes", ibecoming_bytes);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Failed kmod bytes", imod_bytes);
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Virtual mem wasted bytes", total_virtual_lost);
if (live_mod_count && total_size) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod size",
DIV_ROUND_UP(total_size, live_mod_count));
}
if (live_mod_count && text_size) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod text size",
DIV_ROUND_UP(text_size, live_mod_count));
}
/*
* We use WARN_ON_ONCE() for the counters to ensure we always have parity
* for keeping tabs on a type of failure with one type of byte counter.
* The counters for imod_bytes does not increase for fkreads failures
* for example, and so on.
*/
WARN_ON_ONCE(ikread_bytes && !fkreads);
if (fkreads && ikread_bytes) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail kread bytes",
DIV_ROUND_UP(ikread_bytes, fkreads));
}
WARN_ON_ONCE(ibecoming_bytes && !fbecoming);
if (fbecoming && ibecoming_bytes) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail becoming bytes",
DIV_ROUND_UP(ibecoming_bytes, fbecoming));
}
WARN_ON_ONCE(idecompress_bytes && !fdecompress);
if (fdecompress && idecompress_bytes) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail decomp bytes",
DIV_ROUND_UP(idecompress_bytes, fdecompress));
}
WARN_ON_ONCE(imod_bytes && !floads);
if (floads && imod_bytes) {
len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average fail load bytes",
DIV_ROUND_UP(imod_bytes, floads));
}
/* End of our debug preamble header. */
/* Catch when we've gone beyond our expected preamble */
WARN_ON_ONCE(len >= MAX_PREAMBLE);
if (list_empty(&dup_failed_modules))
goto out;
len += scnprintf(buf + len, size - len, "Duplicate failed modules:\n");
len += scnprintf(buf + len, size - len, "%25s\t%15s\t%25s\n",
"Module-name", "How-many-times", "Reason");
mutex_lock(&module_mutex);
list_for_each_entry_rcu(mod_fail, &dup_failed_modules, list) {
if (WARN_ON_ONCE(++count_failed >= MAX_FAILED_MOD_PRINT))
goto out_unlock;
len += scnprintf(buf + len, size - len, "%25s\t%15lu\t%25s\n", mod_fail->name,
atomic_long_read(&mod_fail->count), mod_fail_to_str(mod_fail));
}
out_unlock:
mutex_unlock(&module_mutex);
out:
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
#undef MAX_PREAMBLE
#undef MAX_FAILED_MOD_PRINT
#undef MAX_BYTES_PER_MOD
static const struct file_operations fops_mod_stats = {
.read = read_file_mod_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#define mod_debug_add_ulong(name) debugfs_create_ulong(#name, 0400, mod_debugfs_root, (unsigned long *) &name.counter)
#define mod_debug_add_atomic(name) debugfs_create_atomic_t(#name, 0400, mod_debugfs_root, &name)
static int __init module_stats_init(void)
{
mod_debug_add_ulong(total_mod_size);
mod_debug_add_ulong(total_text_size);
mod_debug_add_ulong(invalid_kread_bytes);
mod_debug_add_ulong(invalid_decompress_bytes);
mod_debug_add_ulong(invalid_becoming_bytes);
mod_debug_add_ulong(invalid_mod_bytes);
mod_debug_add_atomic(modcount);
mod_debug_add_atomic(failed_kreads);
mod_debug_add_atomic(failed_decompress);
mod_debug_add_atomic(failed_becoming);
mod_debug_add_atomic(failed_load_modules);
debugfs_create_file("stats", 0400, mod_debugfs_root, mod_debugfs_root, &fops_mod_stats);
return 0;
}
#undef mod_debug_add_ulong
#undef mod_debug_add_atomic
module_init(module_stats_init);
| linux-master | kernel/module/stats.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module kdb support
*
* Copyright (C) 2010 Jason Wessel
*/
#include <linux/module.h>
#include <linux/kdb.h>
#include "internal.h"
/*
* kdb_lsmod - This function implements the 'lsmod' command. Lists
* currently loaded kernel modules.
* Mostly taken from userland lsmod.
*/
int kdb_lsmod(int argc, const char **argv)
{
struct module *mod;
if (argc != 0)
return KDB_ARGCOUNT;
kdb_printf("Module Size modstruct Used by\n");
list_for_each_entry(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
kdb_printf("%-20s%8u", mod->name, mod->mem[MOD_TEXT].size);
kdb_printf("/%8u", mod->mem[MOD_RODATA].size);
kdb_printf("/%8u", mod->mem[MOD_RO_AFTER_INIT].size);
kdb_printf("/%8u", mod->mem[MOD_DATA].size);
kdb_printf(" 0x%px ", (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4d ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
else if (mod->state == MODULE_STATE_COMING)
kdb_printf(" (Loading)");
else
kdb_printf(" (Live)");
kdb_printf(" 0x%px", mod->mem[MOD_TEXT].base);
kdb_printf("/0x%px", mod->mem[MOD_RODATA].base);
kdb_printf("/0x%px", mod->mem[MOD_RO_AFTER_INIT].base);
kdb_printf("/0x%px", mod->mem[MOD_DATA].base);
#ifdef CONFIG_MODULE_UNLOAD
{
struct module_use *use;
kdb_printf(" [ ");
list_for_each_entry(use, &mod->source_list,
source_list)
kdb_printf("%s ", use->target->name);
kdb_printf("]\n");
}
#endif
}
return 0;
}
| linux-master | kernel/module/kdb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module proc support
*
* Copyright (C) 2008 Alexey Dobriyan
*/
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include "internal.h"
#ifdef CONFIG_MODULE_UNLOAD
static inline void print_unload_info(struct seq_file *m, struct module *mod)
{
struct module_use *use;
int printed_something = 0;
seq_printf(m, " %i ", module_refcount(mod));
/*
* Always include a trailing , so userspace can differentiate
* between this and the old multi-field proc format.
*/
list_for_each_entry(use, &mod->source_list, source_list) {
printed_something = 1;
seq_printf(m, "%s,", use->source->name);
}
if (mod->init && !mod->exit) {
printed_something = 1;
seq_puts(m, "[permanent],");
}
if (!printed_something)
seq_puts(m, "-");
}
#else /* !CONFIG_MODULE_UNLOAD */
static inline void print_unload_info(struct seq_file *m, struct module *mod)
{
/* We don't know the usage count, or what modules are using. */
seq_puts(m, " - -");
}
#endif /* CONFIG_MODULE_UNLOAD */
/* Called by the /proc file system to return a list of modules. */
static void *m_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&module_mutex);
return seq_list_start(&modules, *pos);
}
static void *m_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &modules, pos);
}
static void m_stop(struct seq_file *m, void *p)
{
mutex_unlock(&module_mutex);
}
static unsigned int module_total_size(struct module *mod)
{
int size = 0;
for_each_mod_mem_type(type)
size += mod->mem[type].size;
return size;
}
static int m_show(struct seq_file *m, void *p)
{
struct module *mod = list_entry(p, struct module, list);
char buf[MODULE_FLAGS_BUF_SIZE];
void *value;
unsigned int size;
/* We always ignore unformed modules. */
if (mod->state == MODULE_STATE_UNFORMED)
return 0;
size = module_total_size(mod);
seq_printf(m, "%s %u", mod->name, size);
print_unload_info(m, mod);
/* Informative for users. */
seq_printf(m, " %s",
mod->state == MODULE_STATE_GOING ? "Unloading" :
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
value = m->private ? NULL : mod->mem[MOD_TEXT].base;
seq_printf(m, " 0x%px", value);
/* Taints info */
if (mod->taints)
seq_printf(m, " %s", module_flags(mod, buf, true));
seq_puts(m, "\n");
return 0;
}
/*
* Format: modulename size refcount deps address
*
* Where refcount is a number or -, and deps is a comma-separated list
* of depends or -.
*/
static const struct seq_operations modules_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = m_show
};
/*
* This also sets the "private" pointer to non-NULL if the
* kernel pointers should be hidden (so you can just test
* "m->private" to see if you should keep the values private).
*
* We use the same logic as for /proc/kallsyms.
*/
static int modules_open(struct inode *inode, struct file *file)
{
int err = seq_open(file, &modules_op);
if (!err) {
struct seq_file *m = file->private_data;
m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
}
return err;
}
static const struct proc_ops modules_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = modules_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
static int __init proc_modules_init(void)
{
proc_create("modules", 0, NULL, &modules_proc_ops);
return 0;
}
module_init(proc_modules_init);
| linux-master | kernel/module/procfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2002 Richard Henderson
* Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
* Copyright (C) 2023 Luis Chamberlain <[email protected]>
*/
#define INCLUDE_VERMAGIC
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/moduleloader.h>
#include <linux/module_signature.h>
#include <linux/trace_events.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/buildid.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kernel_read_file.h>
#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/set_memory.h>
#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#include <linux/livepatch.h>
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
#include <linux/dynamic_debug.h>
#include <linux/audit.h>
#include <linux/cfi.h>
#include <linux/debugfs.h>
#include <uapi/linux/module.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>
/*
* Mutex protects:
* 1) List of modules (also safely readable with preempt_disable),
* 2) module_use links,
* 3) mod_tree.addr_min/mod_tree.addr_max.
* (delete and add uses RCU list operations).
*/
DEFINE_MUTEX(module_mutex);
LIST_HEAD(modules);
/* Work queue for freeing init sections in success case */
static void do_free_init(struct work_struct *w);
static DECLARE_WORK(init_free_wq, do_free_init);
static LLIST_HEAD(init_free_list);
struct mod_tree_root mod_tree __cacheline_aligned = {
.addr_min = -1UL,
};
struct symsearch {
const struct kernel_symbol *start, *stop;
const s32 *crcs;
enum mod_license license;
};
/*
* Bounds of module memory, for speeding up __module_address.
* Protected by module_mutex.
*/
static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base,
unsigned int size, struct mod_tree_root *tree)
{
unsigned long min = (unsigned long)base;
unsigned long max = min + size;
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
if (mod_mem_type_is_core_data(type)) {
if (min < tree->data_addr_min)
tree->data_addr_min = min;
if (max > tree->data_addr_max)
tree->data_addr_max = max;
return;
}
#endif
if (min < tree->addr_min)
tree->addr_min = min;
if (max > tree->addr_max)
tree->addr_max = max;
}
static void mod_update_bounds(struct module *mod)
{
for_each_mod_mem_type(type) {
struct module_memory *mod_mem = &mod->mem[type];
if (mod_mem->size)
__mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree);
}
}
/* Block module loading/unloading? */
int modules_disabled;
core_param(nomodule, modules_disabled, bint, 0);
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
int register_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&module_notify_list, nb);
}
EXPORT_SYMBOL(register_module_notifier);
int unregister_module_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&module_notify_list, nb);
}
EXPORT_SYMBOL(unregister_module_notifier);
/*
* We require a truly strong try_module_get(): 0 means success.
* Otherwise an error is returned due to ongoing or failed
* initialization etc.
*/
static inline int strong_try_module_get(struct module *mod)
{
BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
if (mod && mod->state == MODULE_STATE_COMING)
return -EBUSY;
if (try_module_get(mod))
return 0;
else
return -ENOENT;
}
static inline void add_taint_module(struct module *mod, unsigned flag,
enum lockdep_ok lockdep_ok)
{
add_taint(flag, lockdep_ok);
set_bit(flag, &mod->taints);
}
/*
* A thread that wants to hold a reference to a module only while it
* is running can call this to safely exit.
*/
void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
{
module_put(mod);
kthread_exit(code);
}
EXPORT_SYMBOL(__module_put_and_kthread_exit);
/* Find a module section: 0 means not found. */
static unsigned int find_sec(const struct load_info *info, const char *name)
{
unsigned int i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
/* Alloc bit cleared means "ignore it." */
if ((shdr->sh_flags & SHF_ALLOC)
&& strcmp(info->secstrings + shdr->sh_name, name) == 0)
return i;
}
return 0;
}
/* Find a module section, or NULL. */
static void *section_addr(const struct load_info *info, const char *name)
{
/* Section 0 has sh_addr 0. */
return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
}
/* Find a module section, or NULL. Fill in number of "objects" in section. */
static void *section_objs(const struct load_info *info,
const char *name,
size_t object_size,
unsigned int *num)
{
unsigned int sec = find_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size;
return (void *)info->sechdrs[sec].sh_addr;
}
/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
static unsigned int find_any_sec(const struct load_info *info, const char *name)
{
unsigned int i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
if (strcmp(info->secstrings + shdr->sh_name, name) == 0)
return i;
}
return 0;
}
/*
* Find a module section, or NULL. Fill in number of "objects" in section.
* Ignores SHF_ALLOC flag.
*/
static __maybe_unused void *any_section_objs(const struct load_info *info,
const char *name,
size_t object_size,
unsigned int *num)
{
unsigned int sec = find_any_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size;
return (void *)info->sechdrs[sec].sh_addr;
}
#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
#else
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
#endif
static const char *kernel_symbol_name(const struct kernel_symbol *sym)
{
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
return offset_to_ptr(&sym->name_offset);
#else
return sym->name;
#endif
}
static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
{
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
if (!sym->namespace_offset)
return NULL;
return offset_to_ptr(&sym->namespace_offset);
#else
return sym->namespace;
#endif
}
int cmp_name(const void *name, const void *sym)
{
return strcmp(name, kernel_symbol_name(sym));
}
static bool find_exported_symbol_in_section(const struct symsearch *syms,
struct module *owner,
struct find_symbol_arg *fsa)
{
struct kernel_symbol *sym;
if (!fsa->gplok && syms->license == GPL_ONLY)
return false;
sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
sizeof(struct kernel_symbol), cmp_name);
if (!sym)
return false;
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, sym - syms->start);
fsa->sym = sym;
fsa->license = syms->license;
return true;
}
/*
* Find an exported symbol and return it, along with, (optional) crc and
* (optional) module which owns it. Needs preempt disabled or module_mutex.
*/
bool find_symbol(struct find_symbol_arg *fsa)
{
static const struct symsearch arr[] = {
{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
NOT_GPL_ONLY },
{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
__start___kcrctab_gpl,
GPL_ONLY },
};
struct module *mod;
unsigned int i;
module_assert_mutex_or_preempt();
for (i = 0; i < ARRAY_SIZE(arr); i++)
if (find_exported_symbol_in_section(&arr[i], NULL, fsa))
return true;
list_for_each_entry_rcu(mod, &modules, list,
lockdep_is_held(&module_mutex)) {
struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
NOT_GPL_ONLY },
{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
mod->gpl_crcs,
GPL_ONLY },
};
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < ARRAY_SIZE(arr); i++)
if (find_exported_symbol_in_section(&arr[i], mod, fsa))
return true;
}
pr_debug("Failed to find symbol %s\n", fsa->name);
return false;
}
/*
* Search for module by name: must hold module_mutex (or preempt disabled
* for read-only access).
*/
struct module *find_module_all(const char *name, size_t len,
bool even_unformed)
{
struct module *mod;
module_assert_mutex_or_preempt();
list_for_each_entry_rcu(mod, &modules, list,
lockdep_is_held(&module_mutex)) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
continue;
if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
return mod;
}
return NULL;
}
struct module *find_module(const char *name)
{
return find_module_all(name, strlen(name), false);
}
#ifdef CONFIG_SMP
static inline void __percpu *mod_percpu(struct module *mod)
{
return mod->percpu;
}
static int percpu_modalloc(struct module *mod, struct load_info *info)
{
Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
unsigned long align = pcpusec->sh_addralign;
if (!pcpusec->sh_size)
return 0;
if (align > PAGE_SIZE) {
pr_warn("%s: per-cpu alignment %li > %li\n",
mod->name, align, PAGE_SIZE);
align = PAGE_SIZE;
}
mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
if (!mod->percpu) {
pr_warn("%s: Could not allocate %lu bytes percpu data\n",
mod->name, (unsigned long)pcpusec->sh_size);
return -ENOMEM;
}
mod->percpu_size = pcpusec->sh_size;
return 0;
}
static void percpu_modfree(struct module *mod)
{
free_percpu(mod->percpu);
}
static unsigned int find_pcpusec(struct load_info *info)
{
return find_sec(info, ".data..percpu");
}
static void percpu_modcopy(struct module *mod,
const void *from, unsigned long size)
{
int cpu;
for_each_possible_cpu(cpu)
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
}
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{
struct module *mod;
unsigned int cpu;
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
if (!mod->percpu_size)
continue;
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(mod->percpu, cpu);
void *va = (void *)addr;
if (va >= start && va < start + mod->percpu_size) {
if (can_addr) {
*can_addr = (unsigned long) (va - start);
*can_addr += (unsigned long)
per_cpu_ptr(mod->percpu,
get_boot_cpu_id());
}
preempt_enable();
return true;
}
}
}
preempt_enable();
return false;
}
/**
* is_module_percpu_address() - test whether address is from module static percpu
* @addr: address to test
*
* Test whether @addr belongs to module static percpu area.
*
* Return: %true if @addr is from module static percpu area
*/
bool is_module_percpu_address(unsigned long addr)
{
return __is_module_percpu_address(addr, NULL);
}
#else /* ... !CONFIG_SMP */
static inline void __percpu *mod_percpu(struct module *mod)
{
return NULL;
}
static int percpu_modalloc(struct module *mod, struct load_info *info)
{
/* UP modules shouldn't have this section: ENOMEM isn't quite right */
if (info->sechdrs[info->index.pcpu].sh_size != 0)
return -ENOMEM;
return 0;
}
static inline void percpu_modfree(struct module *mod)
{
}
static unsigned int find_pcpusec(struct load_info *info)
{
return 0;
}
static inline void percpu_modcopy(struct module *mod,
const void *from, unsigned long size)
{
/* pcpusec should be 0, and size of that section should be 0. */
BUG_ON(size != 0);
}
bool is_module_percpu_address(unsigned long addr)
{
return false;
}
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{
return false;
}
#endif /* CONFIG_SMP */
#define MODINFO_ATTR(field) \
static void setup_modinfo_##field(struct module *mod, const char *s) \
{ \
mod->field = kstrdup(s, GFP_KERNEL); \
} \
static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
struct module_kobject *mk, char *buffer) \
{ \
return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
} \
static int modinfo_##field##_exists(struct module *mod) \
{ \
return mod->field != NULL; \
} \
static void free_modinfo_##field(struct module *mod) \
{ \
kfree(mod->field); \
mod->field = NULL; \
} \
static struct module_attribute modinfo_##field = { \
.attr = { .name = __stringify(field), .mode = 0444 }, \
.show = show_modinfo_##field, \
.setup = setup_modinfo_##field, \
.test = modinfo_##field##_exists, \
.free = free_modinfo_##field, \
};
MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);
static struct {
char name[MODULE_NAME_LEN + 1];
char taints[MODULE_FLAGS_BUF_SIZE];
} last_unloaded_module;
#ifdef CONFIG_MODULE_UNLOAD
EXPORT_TRACEPOINT_SYMBOL(module_get);
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
#define MODULE_REF_BASE 1
/* Init the unload section of the module. */
static int module_unload_init(struct module *mod)
{
/*
* Initialize reference counter to MODULE_REF_BASE.
* refcnt == 0 means module is going.
*/
atomic_set(&mod->refcnt, MODULE_REF_BASE);
INIT_LIST_HEAD(&mod->source_list);
INIT_LIST_HEAD(&mod->target_list);
/* Hold reference count during initialization. */
atomic_inc(&mod->refcnt);
return 0;
}
/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{
struct module_use *use;
list_for_each_entry(use, &b->source_list, source_list) {
if (use->source == a)
return 1;
}
pr_debug("%s does not use %s!\n", a->name, b->name);
return 0;
}
/*
* Module a uses b
* - we add 'a' as a "source", 'b' as a "target" of module use
* - the module_use is added to the list of 'b' sources (so
* 'b' can walk the list to see who sourced them), and of 'a'
* targets (so 'a' can see what modules it targets).
*/
static int add_module_usage(struct module *a, struct module *b)
{
struct module_use *use;
pr_debug("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC);
if (!use)
return -ENOMEM;
use->source = a;
use->target = b;
list_add(&use->source_list, &b->source_list);
list_add(&use->target_list, &a->target_list);
return 0;
}
/* Module a uses b: caller needs module_mutex() */
static int ref_module(struct module *a, struct module *b)
{
int err;
if (b == NULL || already_uses(a, b))
return 0;
/* If module isn't available, we fail. */
err = strong_try_module_get(b);
if (err)
return err;
err = add_module_usage(a, b);
if (err) {
module_put(b);
return err;
}
return 0;
}
/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
{
struct module_use *use, *tmp;
mutex_lock(&module_mutex);
list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
struct module *i = use->target;
pr_debug("%s unusing %s\n", mod->name, i->name);
module_put(i);
list_del(&use->source_list);
list_del(&use->target_list);
kfree(use);
}
mutex_unlock(&module_mutex);
}
#ifdef CONFIG_MODULE_FORCE_UNLOAD
static inline int try_force_unload(unsigned int flags)
{
int ret = (flags & O_TRUNC);
if (ret)
add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
return ret;
}
#else
static inline int try_force_unload(unsigned int flags)
{
return 0;
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */
/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref(struct module *mod)
{
int ret;
/* Try to decrement refcnt which we set at loading */
ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
BUG_ON(ret < 0);
if (ret)
/* Someone can put this right now, recover with checking */
ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
return ret;
}
static int try_stop_module(struct module *mod, int flags, int *forced)
{
/* If it's not unused, quit unless we're forcing. */
if (try_release_module_ref(mod) != 0) {
*forced = try_force_unload(flags);
if (!(*forced))
return -EWOULDBLOCK;
}
/* Mark it as dying. */
mod->state = MODULE_STATE_GOING;
return 0;
}
/**
* module_refcount() - return the refcount or -1 if unloading
* @mod: the module we're checking
*
* Return:
* -1 if the module is in the process of unloading
* otherwise the number of references in the kernel to the module
*/
int module_refcount(struct module *mod)
{
return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
/* This exists whether we can unload or not */
static void free_module(struct module *mod);
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
unsigned int, flags)
{
struct module *mod;
char name[MODULE_NAME_LEN];
char buf[MODULE_FLAGS_BUF_SIZE];
int ret, forced = 0;
if (!capable(CAP_SYS_MODULE) || modules_disabled)
return -EPERM;
if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
return -EFAULT;
name[MODULE_NAME_LEN-1] = '\0';
audit_log_kern_module(name);
if (mutex_lock_interruptible(&module_mutex) != 0)
return -EINTR;
mod = find_module(name);
if (!mod) {
ret = -ENOENT;
goto out;
}
if (!list_empty(&mod->source_list)) {
/* Other modules depend on us: get rid of them first. */
ret = -EWOULDBLOCK;
goto out;
}
/* Doing init or already dying? */
if (mod->state != MODULE_STATE_LIVE) {
/* FIXME: if (force), slam module count damn the torpedoes */
pr_debug("%s already dying\n", mod->name);
ret = -EBUSY;
goto out;
}
/* If it has an init func, it must have an exit func to unload */
if (mod->init && !mod->exit) {
forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
ret = -EBUSY;
goto out;
}
}
ret = try_stop_module(mod, flags, &forced);
if (ret != 0)
goto out;
mutex_unlock(&module_mutex);
/* Final destruction now no one is using it. */
if (mod->exit != NULL)
mod->exit();
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
ftrace_release_mod(mod);
async_synchronize_full();
/* Store the name and taints of the last unloaded module for diagnostic purposes */
strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name));
strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
free_module(mod);
/* someone could wait for the module in add_unformed_module() */
wake_up_all(&module_wq);
return 0;
out:
mutex_unlock(&module_mutex);
return ret;
}
void __symbol_put(const char *symbol)
{
struct find_symbol_arg fsa = {
.name = symbol,
.gplok = true,
};
preempt_disable();
BUG_ON(!find_symbol(&fsa));
module_put(fsa.owner);
preempt_enable();
}
EXPORT_SYMBOL(__symbol_put);
/* Note this assumes addr is a function, which it currently always is. */
void symbol_put_addr(void *addr)
{
struct module *modaddr;
unsigned long a = (unsigned long)dereference_function_descriptor(addr);
if (core_kernel_text(a))
return;
/*
* Even though we hold a reference on the module; we still need to
* disable preemption in order to safely traverse the data structure.
*/
preempt_disable();
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
preempt_enable();
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
static ssize_t show_refcnt(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
static struct module_attribute modinfo_refcnt =
__ATTR(refcnt, 0444, show_refcnt, NULL);
void __module_get(struct module *module)
{
if (module) {
atomic_inc(&module->refcnt);
trace_module_get(module, _RET_IP_);
}
}
EXPORT_SYMBOL(__module_get);
bool try_module_get(struct module *module)
{
bool ret = true;
if (module) {
/* Note: here, we can fail to get a reference */
if (likely(module_is_live(module) &&
atomic_inc_not_zero(&module->refcnt) != 0))
trace_module_get(module, _RET_IP_);
else
ret = false;
}
return ret;
}
EXPORT_SYMBOL(try_module_get);
void module_put(struct module *module)
{
int ret;
if (module) {
ret = atomic_dec_if_positive(&module->refcnt);
WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_);
}
}
EXPORT_SYMBOL(module_put);
#else /* !CONFIG_MODULE_UNLOAD */
static inline void module_unload_free(struct module *mod)
{
}
static int ref_module(struct module *a, struct module *b)
{
return strong_try_module_get(b);
}
static inline int module_unload_init(struct module *mod)
{
return 0;
}
#endif /* CONFIG_MODULE_UNLOAD */
size_t module_flags_taint(unsigned long taints, char *buf)
{
size_t l = 0;
int i;
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
if (taint_flags[i].module && test_bit(i, &taints))
buf[l++] = taint_flags[i].c_true;
}
return l;
}
static ssize_t show_initstate(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
const char *state = "unknown";
switch (mk->mod->state) {
case MODULE_STATE_LIVE:
state = "live";
break;
case MODULE_STATE_COMING:
state = "coming";
break;
case MODULE_STATE_GOING:
state = "going";
break;
default:
BUG();
}
return sprintf(buffer, "%s\n", state);
}
static struct module_attribute modinfo_initstate =
__ATTR(initstate, 0444, show_initstate, NULL);
static ssize_t store_uevent(struct module_attribute *mattr,
struct module_kobject *mk,
const char *buffer, size_t count)
{
int rc;
rc = kobject_synth_uevent(&mk->kobj, buffer, count);
return rc ? rc : count;
}
struct module_attribute module_uevent =
__ATTR(uevent, 0200, NULL, store_uevent);
static ssize_t show_coresize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = mk->mod->mem[MOD_TEXT].size;
if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) {
for_class_mod_mem_type(type, core_data)
size += mk->mod->mem[type].size;
}
return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_coresize =
__ATTR(coresize, 0444, show_coresize, NULL);
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
static ssize_t show_datasize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = 0;
for_class_mod_mem_type(type, core_data)
size += mk->mod->mem[type].size;
return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_datasize =
__ATTR(datasize, 0444, show_datasize, NULL);
#endif
static ssize_t show_initsize(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = 0;
for_class_mod_mem_type(type, init)
size += mk->mod->mem[type].size;
return sprintf(buffer, "%u\n", size);
}
static struct module_attribute modinfo_initsize =
__ATTR(initsize, 0444, show_initsize, NULL);
static ssize_t show_taint(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
size_t l;
l = module_flags_taint(mk->mod->taints, buffer);
buffer[l++] = '\n';
return l;
}
static struct module_attribute modinfo_taint =
__ATTR(taint, 0444, show_taint, NULL);
struct module_attribute *modinfo_attrs[] = {
&module_uevent,
&modinfo_version,
&modinfo_srcversion,
&modinfo_initstate,
&modinfo_coresize,
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
&modinfo_datasize,
#endif
&modinfo_initsize,
&modinfo_taint,
#ifdef CONFIG_MODULE_UNLOAD
&modinfo_refcnt,
#endif
NULL,
};
size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs);
static const char vermagic[] = VERMAGIC_STRING;
int try_to_force_load(struct module *mod, const char *reason)
{
#ifdef CONFIG_MODULE_FORCE_LOAD
if (!test_taint(TAINT_FORCED_MODULE))
pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
return 0;
#else
return -ENOEXEC;
#endif
}
/* Parse tag=value strings from .modinfo section */
char *module_next_tag_pair(char *string, unsigned long *secsize)
{
/* Skip non-zero chars */
while (string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
/* Skip any zero padding. */
while (!string[0]) {
string++;
if ((*secsize)-- <= 1)
return NULL;
}
return string;
}
static char *get_next_modinfo(const struct load_info *info, const char *tag,
char *prev)
{
char *p;
unsigned int taglen = strlen(tag);
Elf_Shdr *infosec = &info->sechdrs[info->index.info];
unsigned long size = infosec->sh_size;
/*
* get_modinfo() calls made before rewrite_section_headers()
* must use sh_offset, as sh_addr isn't set!
*/
char *modinfo = (char *)info->hdr + infosec->sh_offset;
if (prev) {
size -= prev - modinfo;
modinfo = module_next_tag_pair(prev, &size);
}
for (p = modinfo; p; p = module_next_tag_pair(p, &size)) {
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
return p + taglen + 1;
}
return NULL;
}
static char *get_modinfo(const struct load_info *info, const char *tag)
{
return get_next_modinfo(info, tag, NULL);
}
static int verify_namespace_is_imported(const struct load_info *info,
const struct kernel_symbol *sym,
struct module *mod)
{
const char *namespace;
char *imported_namespace;
namespace = kernel_symbol_namespace(sym);
if (namespace && namespace[0]) {
for_each_modinfo_entry(imported_namespace, info, "import_ns") {
if (strcmp(namespace, imported_namespace) == 0)
return 0;
}
#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
pr_warn(
#else
pr_err(
#endif
"%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
mod->name, kernel_symbol_name(sym), namespace);
#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
return -EINVAL;
#endif
}
return 0;
}
static bool inherit_taint(struct module *mod, struct module *owner, const char *name)
{
if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
return true;
if (mod->using_gplonly_symbols) {
pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n",
mod->name, name, owner->name);
return false;
}
if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n",
mod->name, name, owner->name);
set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
}
return true;
}
/* Resolve a symbol for this module. I.e. if we find one, record usage. */
static const struct kernel_symbol *resolve_symbol(struct module *mod,
const struct load_info *info,
const char *name,
char ownername[])
{
struct find_symbol_arg fsa = {
.name = name,
.gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)),
.warn = true,
};
int err;
/*
* The module_mutex should not be a heavily contended lock;
* if we get the occasional sleep here, we'll go an extra iteration
* in the wait_event_interruptible(), which is harmless.
*/
sched_annotate_sleep();
mutex_lock(&module_mutex);
if (!find_symbol(&fsa))
goto unlock;
if (fsa.license == GPL_ONLY)
mod->using_gplonly_symbols = true;
if (!inherit_taint(mod, fsa.owner, name)) {
fsa.sym = NULL;
goto getname;
}
if (!check_version(info, name, mod, fsa.crc)) {
fsa.sym = ERR_PTR(-EINVAL);
goto getname;
}
err = verify_namespace_is_imported(info, fsa.sym, mod);
if (err) {
fsa.sym = ERR_PTR(err);
goto getname;
}
err = ref_module(mod, fsa.owner);
if (err) {
fsa.sym = ERR_PTR(err);
goto getname;
}
getname:
/* We must make copy under the lock if we failed to get ref. */
strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
unlock:
mutex_unlock(&module_mutex);
return fsa.sym;
}
static const struct kernel_symbol *
resolve_symbol_wait(struct module *mod,
const struct load_info *info,
const char *name)
{
const struct kernel_symbol *ksym;
char owner[MODULE_NAME_LEN];
if (wait_event_interruptible_timeout(module_wq,
!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
|| PTR_ERR(ksym) != -EBUSY,
30 * HZ) <= 0) {
pr_warn("%s: gave up waiting for init of module %s.\n",
mod->name, owner);
}
return ksym;
}
void __weak module_memfree(void *module_region)
{
/*
* This memory may be RO, and freeing RO memory in an interrupt is not
* supported by vmalloc.
*/
WARN_ON(in_interrupt());
vfree(module_region);
}
void __weak module_arch_cleanup(struct module *mod)
{
}
void __weak module_arch_freeing_init(struct module *mod)
{
}
static bool mod_mem_use_vmalloc(enum mod_mem_type type)
{
return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) &&
mod_mem_type_is_core_data(type);
}
static void *module_memory_alloc(unsigned int size, enum mod_mem_type type)
{
if (mod_mem_use_vmalloc(type))
return vzalloc(size);
return module_alloc(size);
}
static void module_memory_free(void *ptr, enum mod_mem_type type)
{
if (mod_mem_use_vmalloc(type))
vfree(ptr);
else
module_memfree(ptr);
}
static void free_mod_mem(struct module *mod)
{
for_each_mod_mem_type(type) {
struct module_memory *mod_mem = &mod->mem[type];
if (type == MOD_DATA)
continue;
/* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod_mem->base, mod_mem->size);
if (mod_mem->size)
module_memory_free(mod_mem->base, type);
}
/* MOD_DATA hosts mod, so free it at last */
lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA);
}
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
trace_module_free(mod);
mod_sysfs_teardown(mod);
/*
* We leave it in list to prevent duplicate loads, but make sure
* that noone uses it while it's being deconstructed.
*/
mutex_lock(&module_mutex);
mod->state = MODULE_STATE_UNFORMED;
mutex_unlock(&module_mutex);
/* Arch-specific cleanup. */
module_arch_cleanup(mod);
/* Module unload stuff */
module_unload_free(mod);
/* Free any allocated parameters. */
destroy_params(mod->kp, mod->num_kp);
if (is_livepatch_module(mod))
free_module_elf(mod);
/* Now we can delete it from the lists */
mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
mod_tree_remove(mod);
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
synchronize_rcu();
if (try_add_tainted_module(mod))
pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
mod->name);
mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */
module_arch_freeing_init(mod);
kfree(mod->args);
percpu_modfree(mod);
free_mod_mem(mod);
}
void *__symbol_get(const char *symbol)
{
struct find_symbol_arg fsa = {
.name = symbol,
.gplok = true,
.warn = true,
};
preempt_disable();
if (!find_symbol(&fsa))
goto fail;
if (fsa.license != GPL_ONLY) {
pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
symbol);
goto fail;
}
if (strong_try_module_get(fsa.owner))
goto fail;
preempt_enable();
return (void *)kernel_symbol_value(fsa.sym);
fail:
preempt_enable();
return NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
/*
* Ensure that an exported symbol [global namespace] does not already exist
* in the kernel or in some other module's exported symbol table.
*
* You must hold the module_mutex.
*/
static int verify_exported_symbols(struct module *mod)
{
unsigned int i;
const struct kernel_symbol *s;
struct {
const struct kernel_symbol *sym;
unsigned int num;
} arr[] = {
{ mod->syms, mod->num_syms },
{ mod->gpl_syms, mod->num_gpl_syms },
};
for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
struct find_symbol_arg fsa = {
.name = kernel_symbol_name(s),
.gplok = true,
};
if (find_symbol(&fsa)) {
pr_err("%s: exports duplicate symbol %s"
" (owned by %s)\n",
mod->name, kernel_symbol_name(s),
module_name(fsa.owner));
return -ENOEXEC;
}
}
}
return 0;
}
static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
{
/*
* On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
* before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
* i386 has a similar problem but may not deserve a fix.
*
* If we ever have to ignore many symbols, consider refactoring the code to
* only warn if referenced by a relocation.
*/
if (emachine == EM_386 || emachine == EM_X86_64)
return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
return false;
}
/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols(struct module *mod, const struct load_info *info)
{
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
Elf_Sym *sym = (void *)symsec->sh_addr;
unsigned long secbase;
unsigned int i;
int ret = 0;
const struct kernel_symbol *ksym;
for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
const char *name = info->strtab + sym[i].st_name;
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* Ignore common symbols */
if (!strncmp(name, "__gnu_lto", 9))
break;
/*
* We compiled with -fno-common. These are not
* supposed to happen.
*/
pr_debug("Common symbol: %s\n", name);
pr_warn("%s: please compile with -fno-common\n",
mod->name);
ret = -ENOEXEC;
break;
case SHN_ABS:
/* Don't need to do anything */
pr_debug("Absolute symbol: 0x%08lx %s\n",
(long)sym[i].st_value, name);
break;
case SHN_LIVEPATCH:
/* Livepatch symbols are resolved by livepatch */
break;
case SHN_UNDEF:
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
if (ksym && !IS_ERR(ksym)) {
sym[i].st_value = kernel_symbol_value(ksym);
break;
}
/* Ok if weak or ignored. */
if (!ksym &&
(ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
ignore_undef_symbol(info->hdr->e_machine, name)))
break;
ret = PTR_ERR(ksym) ?: -ENOENT;
pr_warn("%s: Unknown symbol %s (err %d)\n",
mod->name, name, ret);
break;
default:
/* Divert to percpu allocation if a percpu var. */
if (sym[i].st_shndx == info->index.pcpu)
secbase = (unsigned long)mod_percpu(mod);
else
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
sym[i].st_value += secbase;
break;
}
}
return ret;
}
static int apply_relocations(struct module *mod, const struct load_info *info)
{
unsigned int i;
int err = 0;
/* Now do relocations. */
for (i = 1; i < info->hdr->e_shnum; i++) {
unsigned int infosec = info->sechdrs[i].sh_info;
/* Not a valid relocation section? */
if (infosec >= info->hdr->e_shnum)
continue;
/* Don't bother with non-allocated sections */
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
err = klp_apply_section_relocs(mod, info->sechdrs,
info->secstrings,
info->strtab,
info->index.sym, i,
NULL);
else if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
err = apply_relocate_add(info->sechdrs, info->strtab,
info->index.sym, i, mod);
if (err < 0)
break;
}
return err;
}
/* Additional bytes needed by arch in front of individual sections */
unsigned int __weak arch_mod_section_prepend(struct module *mod,
unsigned int section)
{
/* default implementation just returns zero */
return 0;
}
long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
Elf_Shdr *sechdr, unsigned int section)
{
long offset;
long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT;
mod->mem[type].size += arch_mod_section_prepend(mod, section);
offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1);
mod->mem[type].size = offset + sechdr->sh_size;
WARN_ON_ONCE(offset & mask);
return offset | mask;
}
bool module_init_layout_section(const char *sname)
{
#ifndef CONFIG_MODULE_UNLOAD
if (module_exit_section(sname))
return true;
#endif
return module_init_section(sname);
}
static void __layout_sections(struct module *mod, struct load_info *info, bool is_init)
{
unsigned int m, i;
static const unsigned long masks[][2] = {
/*
* NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
* finder in the two loops below
*/
{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
};
static const int core_m_to_mem_type[] = {
MOD_TEXT,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_DATA,
MOD_DATA,
};
static const int init_m_to_mem_type[] = {
MOD_INIT_TEXT,
MOD_INIT_RODATA,
MOD_INVALID,
MOD_INIT_DATA,
MOD_INIT_DATA,
};
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m];
for (i = 0; i < info->hdr->e_shnum; ++i) {
Elf_Shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
|| is_init != module_init_layout_section(sname))
continue;
if (WARN_ON_ONCE(type == MOD_INVALID))
continue;
s->sh_entsize = module_get_offset_and_type(mod, type, s, i);
pr_debug("\t%s\n", sname);
}
}
}
/*
* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
* might -- code, read-only data, read-write data, small data. Tally
* sizes, and place the offsets into sh_entsize fields: high bit means it
* belongs in init.
*/
static void layout_sections(struct module *mod, struct load_info *info)
{
unsigned int i;
for (i = 0; i < info->hdr->e_shnum; i++)
info->sechdrs[i].sh_entsize = ~0UL;
pr_debug("Core section allocation order for %s:\n", mod->name);
__layout_sections(mod, info, false);
pr_debug("Init section allocation order for %s:\n", mod->name);
__layout_sections(mod, info, true);
}
static void module_license_taint_check(struct module *mod, const char *license)
{
if (!license)
license = "unspecified";
if (!license_is_gpl_compatible(license)) {
if (!test_taint(TAINT_PROPRIETARY_MODULE))
pr_warn("%s: module license '%s' taints kernel.\n",
mod->name, license);
add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
LOCKDEP_NOW_UNRELIABLE);
}
}
static void setup_modinfo(struct module *mod, struct load_info *info)
{
struct module_attribute *attr;
int i;
for (i = 0; (attr = modinfo_attrs[i]); i++) {
if (attr->setup)
attr->setup(mod, get_modinfo(info, attr->attr.name));
}
}
static void free_modinfo(struct module *mod)
{
struct module_attribute *attr;
int i;
for (i = 0; (attr = modinfo_attrs[i]); i++) {
if (attr->free)
attr->free(mod);
}
}
void * __weak module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __builtin_return_address(0));
}
bool __weak module_init_section(const char *name)
{
return strstarts(name, ".init");
}
bool __weak module_exit_section(const char *name)
{
return strstarts(name, ".exit");
}
static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
{
#if defined(CONFIG_64BIT)
unsigned long long secend;
#else
unsigned long secend;
#endif
/*
* Check for both overflow and offset/size being
* too large.
*/
secend = shdr->sh_offset + shdr->sh_size;
if (secend < shdr->sh_offset || secend > info->len)
return -ENOEXEC;
return 0;
}
/*
* Check userspace passed ELF module against our expectations, and cache
* useful variables for further processing as we go.
*
* This does basic validity checks against section offsets and sizes, the
* section name string table, and the indices used for it (sh_name).
*
* As a last step, since we're already checking the ELF sections we cache
* useful variables which will be used later for our convenience:
*
* o pointers to section headers
* o cache the modinfo symbol section
* o cache the string symbol section
* o cache the module section
*
* As a last step we set info->mod to the temporary copy of the module in
* info->hdr. The final one will be allocated in move_module(). Any
* modifications we make to our copy of the module will be carried over
* to the final minted module.
*/
static int elf_validity_cache_copy(struct load_info *info, int flags)
{
unsigned int i;
Elf_Shdr *shdr, *strhdr;
int err;
unsigned int num_mod_secs = 0, mod_idx;
unsigned int num_info_secs = 0, info_idx;
unsigned int num_sym_secs = 0, sym_idx;
if (info->len < sizeof(*(info->hdr))) {
pr_err("Invalid ELF header len %lu\n", info->len);
goto no_exec;
}
if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) {
pr_err("Invalid ELF header magic: != %s\n", ELFMAG);
goto no_exec;
}
if (info->hdr->e_type != ET_REL) {
pr_err("Invalid ELF header type: %u != %u\n",
info->hdr->e_type, ET_REL);
goto no_exec;
}
if (!elf_check_arch(info->hdr)) {
pr_err("Invalid architecture in ELF header: %u\n",
info->hdr->e_machine);
goto no_exec;
}
if (!module_elf_check_arch(info->hdr)) {
pr_err("Invalid module architecture in ELF header: %u\n",
info->hdr->e_machine);
goto no_exec;
}
if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) {
pr_err("Invalid ELF section header size\n");
goto no_exec;
}
/*
* e_shnum is 16 bits, and sizeof(Elf_Shdr) is
* known and small. So e_shnum * sizeof(Elf_Shdr)
* will not overflow unsigned long on any platform.
*/
if (info->hdr->e_shoff >= info->len
|| (info->hdr->e_shnum * sizeof(Elf_Shdr) >
info->len - info->hdr->e_shoff)) {
pr_err("Invalid ELF section header overflow\n");
goto no_exec;
}
info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
/*
* Verify if the section name table index is valid.
*/
if (info->hdr->e_shstrndx == SHN_UNDEF
|| info->hdr->e_shstrndx >= info->hdr->e_shnum) {
pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
info->hdr->e_shstrndx, info->hdr->e_shstrndx,
info->hdr->e_shnum);
goto no_exec;
}
strhdr = &info->sechdrs[info->hdr->e_shstrndx];
err = validate_section_offset(info, strhdr);
if (err < 0) {
pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type);
return err;
}
/*
* The section name table must be NUL-terminated, as required
* by the spec. This makes strcmp and pr_* calls that access
* strings in the section safe.
*/
info->secstrings = (void *)info->hdr + strhdr->sh_offset;
if (strhdr->sh_size == 0) {
pr_err("empty section name table\n");
goto no_exec;
}
if (info->secstrings[strhdr->sh_size - 1] != '\0') {
pr_err("ELF Spec violation: section name table isn't null terminated\n");
goto no_exec;
}
/*
* The code assumes that section 0 has a length of zero and
* an addr of zero, so check for it.
*/
if (info->sechdrs[0].sh_type != SHT_NULL
|| info->sechdrs[0].sh_size != 0
|| info->sechdrs[0].sh_addr != 0) {
pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
info->sechdrs[0].sh_type);
goto no_exec;
}
for (i = 1; i < info->hdr->e_shnum; i++) {
shdr = &info->sechdrs[i];
switch (shdr->sh_type) {
case SHT_NULL:
case SHT_NOBITS:
continue;
case SHT_SYMTAB:
if (shdr->sh_link == SHN_UNDEF
|| shdr->sh_link >= info->hdr->e_shnum) {
pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
shdr->sh_link, shdr->sh_link,
info->hdr->e_shnum);
goto no_exec;
}
num_sym_secs++;
sym_idx = i;
fallthrough;
default:
err = validate_section_offset(info, shdr);
if (err < 0) {
pr_err("Invalid ELF section in module (section %u type %u)\n",
i, shdr->sh_type);
return err;
}
if (strcmp(info->secstrings + shdr->sh_name,
".gnu.linkonce.this_module") == 0) {
num_mod_secs++;
mod_idx = i;
} else if (strcmp(info->secstrings + shdr->sh_name,
".modinfo") == 0) {
num_info_secs++;
info_idx = i;
}
if (shdr->sh_flags & SHF_ALLOC) {
if (shdr->sh_name >= strhdr->sh_size) {
pr_err("Invalid ELF section name in module (section %u type %u)\n",
i, shdr->sh_type);
return -ENOEXEC;
}
}
break;
}
}
if (num_info_secs > 1) {
pr_err("Only one .modinfo section must exist.\n");
goto no_exec;
} else if (num_info_secs == 1) {
/* Try to find a name early so we can log errors with a module name */
info->index.info = info_idx;
info->name = get_modinfo(info, "name");
}
if (num_sym_secs != 1) {
pr_warn("%s: module has no symbols (stripped?)\n",
info->name ?: "(missing .modinfo section or name field)");
goto no_exec;
}
/* Sets internal symbols and strings. */
info->index.sym = sym_idx;
shdr = &info->sechdrs[sym_idx];
info->index.str = shdr->sh_link;
info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset;
/*
* The ".gnu.linkonce.this_module" ELF section is special. It is
* what modpost uses to refer to __this_module and let's use rely
* on THIS_MODULE to point to &__this_module properly. The kernel's
* modpost declares it on each modules's *.mod.c file. If the struct
* module of the kernel changes a full kernel rebuild is required.
*
* We have a few expectaions for this special section, the following
* code validates all this for us:
*
* o Only one section must exist
* o We expect the kernel to always have to allocate it: SHF_ALLOC
* o The section size must match the kernel's run time's struct module
* size
*/
if (num_mod_secs != 1) {
pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n",
info->name ?: "(missing .modinfo section or name field)");
goto no_exec;
}
shdr = &info->sechdrs[mod_idx];
/*
* This is already implied on the switch above, however let's be
* pedantic about it.
*/
if (shdr->sh_type == SHT_NOBITS) {
pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
info->name ?: "(missing .modinfo section or name field)");
goto no_exec;
}
if (!(shdr->sh_flags & SHF_ALLOC)) {
pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
info->name ?: "(missing .modinfo section or name field)");
goto no_exec;
}
if (shdr->sh_size != sizeof(struct module)) {
pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
info->name ?: "(missing .modinfo section or name field)");
goto no_exec;
}
info->index.mod = mod_idx;
/* This is temporary: point mod into copy of data. */
info->mod = (void *)info->hdr + shdr->sh_offset;
/*
* If we didn't load the .modinfo 'name' field earlier, fall back to
* on-disk struct mod 'name' field.
*/
if (!info->name)
info->name = info->mod->name;
if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
info->index.vers = 0; /* Pretend no __versions section! */
else
info->index.vers = find_sec(info, "__versions");
info->index.pcpu = find_pcpusec(info);
return 0;
no_exec:
return -ENOEXEC;
}
#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
{
do {
unsigned long n = min(len, COPY_CHUNK_SIZE);
if (copy_from_user(dst, usrc, n) != 0)
return -EFAULT;
cond_resched();
dst += n;
usrc += n;
len -= n;
} while (len);
return 0;
}
static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
{
if (!get_modinfo(info, "livepatch"))
/* Nothing more to do */
return 0;
if (set_livepatch_module(mod))
return 0;
pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
mod->name);
return -ENOEXEC;
}
static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
{
if (retpoline_module_ok(get_modinfo(info, "retpoline")))
return;
pr_warn("%s: loading module not compiled with retpoline compiler.\n",
mod->name);
}
/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info)
{
int err;
info->len = len;
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
err = security_kernel_load_data(LOADING_MODULE, true);
if (err)
return err;
/* Suck in entire file: we'll want most of it. */
info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
if (!info->hdr)
return -ENOMEM;
if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
err = -EFAULT;
goto out;
}
err = security_kernel_post_load_data((char *)info->hdr, info->len,
LOADING_MODULE, "init_module");
out:
if (err)
vfree(info->hdr);
return err;
}
static void free_copy(struct load_info *info, int flags)
{
if (flags & MODULE_INIT_COMPRESSED_FILE)
module_decompress_cleanup(info);
else
vfree(info->hdr);
}
static int rewrite_section_headers(struct load_info *info, int flags)
{
unsigned int i;
/* This should always be true, but let's be sure. */
info->sechdrs[0].sh_addr = 0;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
/*
* Mark all sections sh_addr with their address in the
* temporary image.
*/
shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
}
/* Track but don't keep modinfo and version sections. */
info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
return 0;
}
/*
* These calls taint the kernel depending certain module circumstances */
static void module_augment_kernel_taints(struct module *mod, struct load_info *info)
{
int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
if (!get_modinfo(info, "intree")) {
if (!test_taint(TAINT_OOT_MODULE))
pr_warn("%s: loading out-of-tree module taints kernel.\n",
mod->name);
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
}
check_modinfo_retpoline(mod, info);
if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
pr_warn("%s: module is from the staging directory, the quality "
"is unknown, you have been warned.\n", mod->name);
}
if (is_livepatch_module(mod)) {
add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
mod->name);
}
module_license_taint_check(mod, get_modinfo(info, "license"));
if (get_modinfo(info, "test")) {
if (!test_taint(TAINT_TEST))
pr_warn("%s: loading test module taints kernel.\n",
mod->name);
add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK);
}
#ifdef CONFIG_MODULE_SIG
mod->sig_ok = info->sig_ok;
if (!mod->sig_ok) {
pr_notice_once("%s: module verification failed: signature "
"and/or required key missing - tainting "
"kernel\n", mod->name);
add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
}
#endif
/*
* ndiswrapper is under GPL by itself, but loads proprietary modules.
* Don't use add_taint_module(), as it would prevent ndiswrapper from
* using GPL-only symbols it needs.
*/
if (strcmp(mod->name, "ndiswrapper") == 0)
add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
/* driverloader was caught wrongly pretending to be under GPL */
if (strcmp(mod->name, "driverloader") == 0)
add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
LOCKDEP_NOW_UNRELIABLE);
/* lve claims to be GPL but upstream won't provide source */
if (strcmp(mod->name, "lve") == 0)
add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
LOCKDEP_NOW_UNRELIABLE);
if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
pr_warn("%s: module license taints kernel.\n", mod->name);
}
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
const char *modmagic = get_modinfo(info, "vermagic");
int err;
if (flags & MODULE_INIT_IGNORE_VERMAGIC)
modmagic = NULL;
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
err = try_to_force_load(mod, "bad vermagic");
if (err)
return err;
} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
pr_err("%s: version magic '%s' should be '%s'\n",
info->name, modmagic, vermagic);
return -ENOEXEC;
}
err = check_modinfo_livepatch(mod, info);
if (err)
return err;
return 0;
}
static int find_module_sections(struct module *mod, struct load_info *info)
{
mod->kp = section_objs(info, "__param",
sizeof(*mod->kp), &mod->num_kp);
mod->syms = section_objs(info, "__ksymtab",
sizeof(*mod->syms), &mod->num_syms);
mod->crcs = section_addr(info, "__kcrctab");
mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
sizeof(*mod->gpl_syms),
&mod->num_gpl_syms);
mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
#ifdef CONFIG_CONSTRUCTORS
mod->ctors = section_objs(info, ".ctors",
sizeof(*mod->ctors), &mod->num_ctors);
if (!mod->ctors)
mod->ctors = section_objs(info, ".init_array",
sizeof(*mod->ctors), &mod->num_ctors);
else if (find_sec(info, ".init_array")) {
/*
* This shouldn't happen with same compiler and binutils
* building all parts of the module.
*/
pr_warn("%s: has both .ctors and .init_array.\n",
mod->name);
return -EINVAL;
}
#endif
mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
&mod->noinstr_text_size);
#ifdef CONFIG_TRACEPOINTS
mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
sizeof(*mod->tracepoints_ptrs),
&mod->num_tracepoints);
#endif
#ifdef CONFIG_TREE_SRCU
mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
sizeof(*mod->srcu_struct_ptrs),
&mod->num_srcu_structs);
#endif
#ifdef CONFIG_BPF_EVENTS
mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
sizeof(*mod->bpf_raw_events),
&mod->num_bpf_raw_events);
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
#endif
#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
&mod->num_jump_entries);
#endif
#ifdef CONFIG_EVENT_TRACING
mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
mod->trace_evals = section_objs(info, "_ftrace_eval_map",
sizeof(*mod->trace_evals),
&mod->num_trace_evals);
#endif
#ifdef CONFIG_TRACING
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
sizeof(*mod->trace_bprintk_fmt_start),
&mod->num_trace_bprintk_fmt);
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
sizeof(*mod->ftrace_callsites),
&mod->num_ftrace_callsites);
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
sizeof(*mod->ei_funcs),
&mod->num_ei_funcs);
#endif
#ifdef CONFIG_KPROBES
mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
&mod->kprobes_text_size);
mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
sizeof(unsigned long),
&mod->num_kprobe_blacklist);
#endif
#ifdef CONFIG_PRINTK_INDEX
mod->printk_index_start = section_objs(info, ".printk_index",
sizeof(*mod->printk_index_start),
&mod->printk_index_size);
#endif
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
mod->static_call_sites = section_objs(info, ".static_call_sites",
sizeof(*mod->static_call_sites),
&mod->num_static_call_sites);
#endif
#if IS_ENABLED(CONFIG_KUNIT)
mod->kunit_suites = section_objs(info, ".kunit_test_suites",
sizeof(*mod->kunit_suites),
&mod->num_kunit_suites);
#endif
mod->extable = section_objs(info, "__ex_table",
sizeof(*mod->extable), &mod->num_exentries);
if (section_addr(info, "__obsparm"))
pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
#ifdef CONFIG_DYNAMIC_DEBUG_CORE
mod->dyndbg_info.descs = section_objs(info, "__dyndbg",
sizeof(*mod->dyndbg_info.descs),
&mod->dyndbg_info.num_descs);
mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes",
sizeof(*mod->dyndbg_info.classes),
&mod->dyndbg_info.num_classes);
#endif
return 0;
}
static int move_module(struct module *mod, struct load_info *info)
{
int i;
void *ptr;
enum mod_mem_type t = 0;
int ret = -ENOMEM;
for_each_mod_mem_type(type) {
if (!mod->mem[type].size) {
mod->mem[type].base = NULL;
continue;
}
mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size);
ptr = module_memory_alloc(mod->mem[type].size, type);
/*
* The pointer to these blocks of memory are stored on the module
* structure and we keep that around so long as the module is
* around. We only free that memory when we unload the module.
* Just mark them as not being a leak then. The .init* ELF
* sections *do* get freed after boot so we *could* treat them
* slightly differently with kmemleak_ignore() and only grey
* them out as they work as typical memory allocations which
* *do* eventually get freed, but let's just keep things simple
* and avoid *any* false positives.
*/
kmemleak_not_leak(ptr);
if (!ptr) {
t = type;
goto out_enomem;
}
memset(ptr, 0, mod->mem[type].size);
mod->mem[type].base = ptr;
}
/* Transfer each section which specifies SHF_ALLOC */
pr_debug("Final section addresses for %s:\n", mod->name);
for (i = 0; i < info->hdr->e_shnum; i++) {
void *dest;
Elf_Shdr *shdr = &info->sechdrs[i];
enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK);
if (shdr->sh_type != SHT_NOBITS) {
/*
* Our ELF checker already validated this, but let's
* be pedantic and make the goal clearer. We actually
* end up copying over all modifications made to the
* userspace copy of the entire struct module.
*/
if (i == info->index.mod &&
(WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) {
ret = -ENOEXEC;
goto out_enomem;
}
memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
}
/*
* Update the userspace copy's ELF section address to point to
* our newly allocated memory as a pure convenience so that
* users of info can keep taking advantage and using the newly
* minted official memory area.
*/
shdr->sh_addr = (unsigned long)dest;
pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr,
(long)shdr->sh_size, info->secstrings + shdr->sh_name);
}
return 0;
out_enomem:
for (t--; t >= 0; t--)
module_memory_free(mod->mem[t].base, t);
return ret;
}
static int check_export_symbol_versions(struct module *mod)
{
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !mod->crcs) ||
(mod->num_gpl_syms && !mod->gpl_crcs)) {
return try_to_force_load(mod,
"no versions for exported symbols");
}
#endif
return 0;
}
static void flush_module_icache(const struct module *mod)
{
/*
* Flush the instruction cache, since we've played with text.
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
for_each_mod_mem_type(type) {
const struct module_memory *mod_mem = &mod->mem[type];
if (mod_mem->size) {
flush_icache_range((unsigned long)mod_mem->base,
(unsigned long)mod_mem->base + mod_mem->size);
}
}
}
bool __weak module_elf_check_arch(Elf_Ehdr *hdr)
{
return true;
}
int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
/* module_blacklist is a comma-separated list of module names */
static char *module_blacklist;
static bool blacklisted(const char *module_name)
{
const char *p;
size_t len;
if (!module_blacklist)
return false;
for (p = module_blacklist; *p; p += len) {
len = strcspn(p, ",");
if (strlen(module_name) == len && !memcmp(module_name, p, len))
return true;
if (p[len] == ',')
len++;
}
return false;
}
core_param(module_blacklist, module_blacklist, charp, 0400);
static struct module *layout_and_allocate(struct load_info *info, int flags)
{
struct module *mod;
unsigned int ndx;
int err;
/* Allow arches to frob section contents and sizes. */
err = module_frob_arch_sections(info->hdr, info->sechdrs,
info->secstrings, info->mod);
if (err < 0)
return ERR_PTR(err);
err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
info->secstrings, info->mod);
if (err < 0)
return ERR_PTR(err);
/* We will do a special allocation for per-cpu sections later. */
info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
/*
* Mark ro_after_init section with SHF_RO_AFTER_INIT so that
* layout_sections() can put it in the right place.
* Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
*/
ndx = find_sec(info, ".data..ro_after_init");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
/*
* Mark the __jump_table section as ro_after_init as well: these data
* structures are never modified, with the exception of entries that
* refer to code in the __init section, which are annotated as such
* at module load time.
*/
ndx = find_sec(info, "__jump_table");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
/*
* Determine total sizes, and put offsets in sh_entsize. For now
* this is done generically; there doesn't appear to be any
* special cases for the architectures.
*/
layout_sections(info->mod, info);
layout_symtab(info->mod, info);
/* Allocate and move to the final place */
err = move_module(info->mod, info);
if (err)
return ERR_PTR(err);
/* Module has been copied to its final place now: return it. */
mod = (void *)info->sechdrs[info->index.mod].sh_addr;
kmemleak_load_module(mod, info);
return mod;
}
/* mod is no longer valid after this! */
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
module_arch_freeing_init(mod);
free_mod_mem(mod);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return 0;
}
static int post_relocation(struct module *mod, const struct load_info *info)
{
/* Sort exception table now relocations are done. */
sort_extable(mod->extable, mod->extable + mod->num_exentries);
/* Copy relocated percpu area over. */
percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
info->sechdrs[info->index.pcpu].sh_size);
/* Setup kallsyms-specific fields. */
add_kallsyms(mod, info);
/* Arch-specific module finalizing. */
return module_finalize(info->hdr, info->sechdrs, mod);
}
/* Call module constructors. */
static void do_mod_ctors(struct module *mod)
{
#ifdef CONFIG_CONSTRUCTORS
unsigned long i;
for (i = 0; i < mod->num_ctors; i++)
mod->ctors[i]();
#endif
}
/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {
struct llist_node node;
void *init_text;
void *init_data;
void *init_rodata;
};
static void do_free_init(struct work_struct *w)
{
struct llist_node *pos, *n, *list;
struct mod_initfree *initfree;
list = llist_del_all(&init_free_list);
synchronize_rcu();
llist_for_each_safe(pos, n, list) {
initfree = container_of(pos, struct mod_initfree, node);
module_memfree(initfree->init_text);
module_memfree(initfree->init_data);
module_memfree(initfree->init_rodata);
kfree(initfree);
}
}
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "module."
/* Default value for module->async_probe_requested */
static bool async_probe;
module_param(async_probe, bool, 0644);
/*
* This is where the real work happens.
*
* Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
* helper command 'lx-symbols'.
*/
static noinline int do_init_module(struct module *mod)
{
int ret = 0;
struct mod_initfree *freeinit;
#if defined(CONFIG_MODULE_STATS)
unsigned int text_size = 0, total_size = 0;
for_each_mod_mem_type(type) {
const struct module_memory *mod_mem = &mod->mem[type];
if (mod_mem->size) {
total_size += mod_mem->size;
if (type == MOD_TEXT || type == MOD_INIT_TEXT)
text_size += mod_mem->size;
}
}
#endif
freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
if (!freeinit) {
ret = -ENOMEM;
goto fail;
}
freeinit->init_text = mod->mem[MOD_INIT_TEXT].base;
freeinit->init_data = mod->mem[MOD_INIT_DATA].base;
freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base;
do_mod_ctors(mod);
/* Start the module */
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
goto fail_free_freeinit;
}
if (ret > 0) {
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
"follow 0/-E convention\n"
"%s: loading module anyway...\n",
__func__, mod->name, ret, __func__);
dump_stack();
}
/* Now it's a first class citizen! */
mod->state = MODULE_STATE_LIVE;
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
/* Delay uevent until module has finished its init routine */
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
/*
* We need to finish all async code before the module init sequence
* is done. This has potential to deadlock if synchronous module
* loading is requested from async (which is not allowed!).
*
* See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
* request_module() from async workers") for more details.
*/
if (!mod->async_probe_requested)
async_synchronize_full();
ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base,
mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size);
mutex_lock(&module_mutex);
/* Drop initial reference. */
module_put(mod);
trim_init_extable(mod);
#ifdef CONFIG_KALLSYMS
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
module_enable_ro(mod, true);
mod_tree_remove_init(mod);
module_arch_freeing_init(mod);
for_class_mod_mem_type(type, init) {
mod->mem[type].base = NULL;
mod->mem[type].size = 0;
}
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
/* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
mod->btf_data = NULL;
#endif
/*
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_rcu(), but we don't want to slow down the success
* path. module_memfree() cannot be called in an interrupt, so do the
* work and call synchronize_rcu() in a work queue.
*
* Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to
* be cleaned up needs to sync with the queued work - ie
* rcu_barrier()
*/
if (llist_add(&freeinit->node, &init_free_list))
schedule_work(&init_free_wq);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
mod_stat_add_long(text_size, &total_text_size);
mod_stat_add_long(total_size, &total_mod_size);
mod_stat_inc(&modcount);
return 0;
fail_free_freeinit:
kfree(freeinit);
fail:
/* Try to protect us from buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_rcu();
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
ftrace_release_mod(mod);
free_module(mod);
wake_up_all(&module_wq);
return ret;
}
static int may_init_module(void)
{
if (!capable(CAP_SYS_MODULE) || modules_disabled)
return -EPERM;
return 0;
}
/* Is this module of this name done loading? No locks held. */
static bool finished_loading(const char *name)
{
struct module *mod;
bool ret;
/*
* The module_mutex should not be a heavily contended lock;
* if we get the occasional sleep here, we'll go an extra iteration
* in the wait_event_interruptible(), which is harmless.
*/
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
ret = !mod || mod->state == MODULE_STATE_LIVE
|| mod->state == MODULE_STATE_GOING;
mutex_unlock(&module_mutex);
return ret;
}
/* Must be called with module_mutex held */
static int module_patient_check_exists(const char *name,
enum fail_dup_mod_reason reason)
{
struct module *old;
int err = 0;
old = find_module_all(name, strlen(name), true);
if (old == NULL)
return 0;
if (old->state == MODULE_STATE_COMING ||
old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
finished_loading(name));
mutex_lock(&module_mutex);
if (err)
return err;
/* The module might have gone in the meantime. */
old = find_module_all(name, strlen(name), true);
}
if (try_add_failed_module(name, reason))
pr_warn("Could not add fail-tracking for module: %s\n", name);
/*
* We are here only when the same module was being loaded. Do
* not try to load it again right now. It prevents long delays
* caused by serialized module load failures. It might happen
* when more devices of the same type trigger load of
* a particular module.
*/
if (old && old->state == MODULE_STATE_LIVE)
return -EEXIST;
return -EBUSY;
}
/*
* We try to place it in the list now to make sure it's unique before
* we dedicate too many resources. In particular, temporary percpu
* memory exhaustion.
*/
static int add_unformed_module(struct module *mod)
{
int err;
mod->state = MODULE_STATE_UNFORMED;
mutex_lock(&module_mutex);
err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD);
if (err)
goto out;
mod_update_bounds(mod);
list_add_rcu(&mod->list, &modules);
mod_tree_insert(mod);
err = 0;
out:
mutex_unlock(&module_mutex);
return err;
}
static int complete_formation(struct module *mod, struct load_info *info)
{
int err;
mutex_lock(&module_mutex);
/* Find duplicate symbols (must be called under lock). */
err = verify_exported_symbols(mod);
if (err < 0)
goto out;
/* These rely on module_mutex for list integrity. */
module_bug_finalize(info->hdr, info->sechdrs, mod);
module_cfi_finalize(info->hdr, info->sechdrs, mod);
module_enable_ro(mod, false);
module_enable_nx(mod);
module_enable_x(mod);
/*
* Mark state as coming so strong_try_module_get() ignores us,
* but kallsyms etc. can see us.
*/
mod->state = MODULE_STATE_COMING;
mutex_unlock(&module_mutex);
return 0;
out:
mutex_unlock(&module_mutex);
return err;
}
static int prepare_coming_module(struct module *mod)
{
int err;
ftrace_module_enable(mod);
err = klp_module_coming(mod);
if (err)
return err;
err = blocking_notifier_call_chain_robust(&module_notify_list,
MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
err = notifier_to_errno(err);
if (err)
klp_module_going(mod);
return err;
}
static int unknown_module_param_cb(char *param, char *val, const char *modname,
void *arg)
{
struct module *mod = arg;
int ret;
if (strcmp(param, "async_probe") == 0) {
if (kstrtobool(val, &mod->async_probe_requested))
mod->async_probe_requested = true;
return 0;
}
/* Check for magic 'dyndbg' arg */
ret = ddebug_dyndbg_module_param_cb(param, val, modname);
if (ret != 0)
pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
return 0;
}
/* Module within temporary copy, this doesn't do any allocation */
static int early_mod_check(struct load_info *info, int flags)
{
int err;
/*
* Now that we know we have the correct module name, check
* if it's blacklisted.
*/
if (blacklisted(info->name)) {
pr_err("Module %s is blacklisted\n", info->name);
return -EPERM;
}
err = rewrite_section_headers(info, flags);
if (err)
return err;
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(info, info->mod))
return -ENOEXEC;
err = check_modinfo(info->mod, info, flags);
if (err)
return err;
mutex_lock(&module_mutex);
err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING);
mutex_unlock(&module_mutex);
return err;
}
/*
* Allocate and load the module: note that size of section 0 is always
* zero, and we rely on this for optional sections.
*/
static int load_module(struct load_info *info, const char __user *uargs,
int flags)
{
struct module *mod;
bool module_allocated = false;
long err = 0;
char *after_dashes;
/*
* Do the signature check (if any) first. All that
* the signature check needs is info->len, it does
* not need any of the section info. That can be
* set up later. This will minimize the chances
* of a corrupt module causing problems before
* we even get to the signature check.
*
* The check will also adjust info->len by stripping
* off the sig length at the end of the module, making
* checks against info->len more correct.
*/
err = module_sig_check(info, flags);
if (err)
goto free_copy;
/*
* Do basic sanity checks against the ELF header and
* sections. Cache useful sections and set the
* info->mod to the userspace passed struct module.
*/
err = elf_validity_cache_copy(info, flags);
if (err)
goto free_copy;
err = early_mod_check(info, flags);
if (err)
goto free_copy;
/* Figure out module layout, and allocate all the memory. */
mod = layout_and_allocate(info, flags);
if (IS_ERR(mod)) {
err = PTR_ERR(mod);
goto free_copy;
}
module_allocated = true;
audit_log_kern_module(mod->name);
/* Reserve our place in the list. */
err = add_unformed_module(mod);
if (err)
goto free_module;
/*
* We are tainting your kernel if your module gets into
* the modules linked list somehow.
*/
module_augment_kernel_taints(mod, info);
/* To avoid stressing percpu allocator, do this once we're unique. */
err = percpu_modalloc(mod, info);
if (err)
goto unlink_mod;
/* Now module is in final location, initialize linked lists, etc. */
err = module_unload_init(mod);
if (err)
goto unlink_mod;
init_param_lock(mod);
/*
* Now we've got everything in the final locations, we can
* find optional sections.
*/
err = find_module_sections(mod, info);
if (err)
goto free_unload;
err = check_export_symbol_versions(mod);
if (err)
goto free_unload;
/* Set up MODINFO_ATTR fields */
setup_modinfo(mod, info);
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(mod, info);
if (err < 0)
goto free_modinfo;
err = apply_relocations(mod, info);
if (err < 0)
goto free_modinfo;
err = post_relocation(mod, info);
if (err < 0)
goto free_modinfo;
flush_module_icache(mod);
/* Now copy in args */
mod->args = strndup_user(uargs, ~0UL >> 1);
if (IS_ERR(mod->args)) {
err = PTR_ERR(mod->args);
goto free_arch_cleanup;
}
init_build_id(mod, info);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
ftrace_module_init(mod);
/* Finally it's fully formed, ready to start executing. */
err = complete_formation(mod, info);
if (err)
goto ddebug_cleanup;
err = prepare_coming_module(mod);
if (err)
goto bug_cleanup;
mod->async_probe_requested = async_probe;
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-32768, 32767, mod,
unknown_module_param_cb);
if (IS_ERR(after_dashes)) {
err = PTR_ERR(after_dashes);
goto coming_cleanup;
} else if (after_dashes) {
pr_warn("%s: parameters '%s' after `--' ignored\n",
mod->name, after_dashes);
}
/* Link in to sysfs. */
err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
if (err < 0)
goto coming_cleanup;
if (is_livepatch_module(mod)) {
err = copy_module_elf(mod, info);
if (err < 0)
goto sysfs_cleanup;
}
/* Get rid of temporary copy. */
free_copy(info, flags);
/* Done! */
trace_module_load(mod);
return do_init_module(mod);
sysfs_cleanup:
mod_sysfs_teardown(mod);
coming_cleanup:
mod->state = MODULE_STATE_GOING;
destroy_params(mod->kp, mod->num_kp);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
bug_cleanup:
mod->state = MODULE_STATE_GOING;
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
module_bug_cleanup(mod);
mutex_unlock(&module_mutex);
ddebug_cleanup:
ftrace_release_mod(mod);
synchronize_rcu();
kfree(mod->args);
free_arch_cleanup:
module_arch_cleanup(mod);
free_modinfo:
free_modinfo(mod);
free_unload:
module_unload_free(mod);
unlink_mod:
mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
mod_tree_remove(mod);
wake_up_all(&module_wq);
/* Wait for RCU-sched synchronizing before releasing mod->list. */
synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
mod_stat_bump_invalid(info, flags);
/* Free lock-classes; relies on the preceding sync_rcu() */
for_class_mod_mem_type(type, core_data) {
lockdep_free_key_range(mod->mem[type].base,
mod->mem[type].size);
}
module_deallocate(mod, info);
free_copy:
/*
* The info->len is always set. We distinguish between
* failures once the proper module was allocated and
* before that.
*/
if (!module_allocated)
mod_stat_bump_becoming(info, flags);
free_copy(info, flags);
return err;
}
SYSCALL_DEFINE3(init_module, void __user *, umod,
unsigned long, len, const char __user *, uargs)
{
int err;
struct load_info info = { };
err = may_init_module();
if (err)
return err;
pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
umod, len, uargs);
err = copy_module_from_user(umod, len, &info);
if (err) {
mod_stat_inc(&failed_kreads);
mod_stat_add_long(len, &invalid_kread_bytes);
return err;
}
return load_module(&info, uargs, 0);
}
struct idempotent {
const void *cookie;
struct hlist_node entry;
struct completion complete;
int ret;
};
#define IDEM_HASH_BITS 8
static struct hlist_head idem_hash[1 << IDEM_HASH_BITS];
static DEFINE_SPINLOCK(idem_lock);
static bool idempotent(struct idempotent *u, const void *cookie)
{
int hash = hash_ptr(cookie, IDEM_HASH_BITS);
struct hlist_head *head = idem_hash + hash;
struct idempotent *existing;
bool first;
u->ret = 0;
u->cookie = cookie;
init_completion(&u->complete);
spin_lock(&idem_lock);
first = true;
hlist_for_each_entry(existing, head, entry) {
if (existing->cookie != cookie)
continue;
first = false;
break;
}
hlist_add_head(&u->entry, idem_hash + hash);
spin_unlock(&idem_lock);
return !first;
}
/*
* We were the first one with 'cookie' on the list, and we ended
* up completing the operation. We now need to walk the list,
* remove everybody - which includes ourselves - fill in the return
* value, and then complete the operation.
*/
static int idempotent_complete(struct idempotent *u, int ret)
{
const void *cookie = u->cookie;
int hash = hash_ptr(cookie, IDEM_HASH_BITS);
struct hlist_head *head = idem_hash + hash;
struct hlist_node *next;
struct idempotent *pos;
spin_lock(&idem_lock);
hlist_for_each_entry_safe(pos, next, head, entry) {
if (pos->cookie != cookie)
continue;
hlist_del(&pos->entry);
pos->ret = ret;
complete(&pos->complete);
}
spin_unlock(&idem_lock);
return ret;
}
static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
{
struct load_info info = { };
void *buf = NULL;
int len;
len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE);
if (len < 0) {
mod_stat_inc(&failed_kreads);
return len;
}
if (flags & MODULE_INIT_COMPRESSED_FILE) {
int err = module_decompress(&info, buf, len);
vfree(buf); /* compressed data is no longer needed */
if (err) {
mod_stat_inc(&failed_decompress);
mod_stat_add_long(len, &invalid_decompress_bytes);
return err;
}
} else {
info.hdr = buf;
info.len = len;
}
return load_module(&info, uargs, flags);
}
static int idempotent_init_module(struct file *f, const char __user * uargs, int flags)
{
struct idempotent idem;
if (!f || !(f->f_mode & FMODE_READ))
return -EBADF;
/* See if somebody else is doing the operation? */
if (idempotent(&idem, file_inode(f))) {
wait_for_completion(&idem.complete);
return idem.ret;
}
/* Otherwise, we'll do it and complete others */
return idempotent_complete(&idem,
init_module_from_file(f, uargs, flags));
}
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
{
int err;
struct fd f;
err = may_init_module();
if (err)
return err;
pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
|MODULE_INIT_IGNORE_VERMAGIC
|MODULE_INIT_COMPRESSED_FILE))
return -EINVAL;
f = fdget(fd);
err = idempotent_init_module(f.file, uargs, flags);
fdput(f);
return err;
}
/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
char *module_flags(struct module *mod, char *buf, bool show_state)
{
int bx = 0;
BUG_ON(mod->state == MODULE_STATE_UNFORMED);
if (!mod->taints && !show_state)
goto out;
if (mod->taints ||
mod->state == MODULE_STATE_GOING ||
mod->state == MODULE_STATE_COMING) {
buf[bx++] = '(';
bx += module_flags_taint(mod->taints, buf + bx);
/* Show a - for module-is-being-unloaded */
if (mod->state == MODULE_STATE_GOING && show_state)
buf[bx++] = '-';
/* Show a + for module-is-being-loaded */
if (mod->state == MODULE_STATE_COMING && show_state)
buf[bx++] = '+';
buf[bx++] = ')';
}
out:
buf[bx] = '\0';
return buf;
}
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
struct module *mod;
preempt_disable();
mod = __module_address(addr);
if (!mod)
goto out;
if (!mod->num_exentries)
goto out;
e = search_extable(mod->extable,
mod->num_exentries,
addr);
out:
preempt_enable();
/*
* Now, if we found one, we are running inside it now, hence
* we cannot unload the module, hence no refcnt needed.
*/
return e;
}
/**
* is_module_address() - is this address inside a module?
* @addr: the address to check.
*
* See is_module_text_address() if you simply want to see if the address
* is code (not data).
*/
bool is_module_address(unsigned long addr)
{
bool ret;
preempt_disable();
ret = __module_address(addr) != NULL;
preempt_enable();
return ret;
}
/**
* __module_address() - get the module which contains an address.
* @addr: the address.
*
* Must be called with preempt disabled or module mutex held so that
* module doesn't get freed during this.
*/
struct module *__module_address(unsigned long addr)
{
struct module *mod;
if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max)
goto lookup;
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max)
goto lookup;
#endif
return NULL;
lookup:
module_assert_mutex_or_preempt();
mod = mod_find(addr, &mod_tree);
if (mod) {
BUG_ON(!within_module(addr, mod));
if (mod->state == MODULE_STATE_UNFORMED)
mod = NULL;
}
return mod;
}
/**
* is_module_text_address() - is this address inside module code?
* @addr: the address to check.
*
* See is_module_address() if you simply want to see if the address is
* anywhere in a module. See kernel_text_address() for testing if an
* address corresponds to kernel or module code.
*/
bool is_module_text_address(unsigned long addr)
{
bool ret;
preempt_disable();
ret = __module_text_address(addr) != NULL;
preempt_enable();
return ret;
}
/**
* __module_text_address() - get the module whose code contains an address.
* @addr: the address.
*
* Must be called with preempt disabled or module mutex held so that
* module doesn't get freed during this.
*/
struct module *__module_text_address(unsigned long addr)
{
struct module *mod = __module_address(addr);
if (mod) {
/* Make sure it's within the text section. */
if (!within_module_mem_type(addr, mod, MOD_TEXT) &&
!within_module_mem_type(addr, mod, MOD_INIT_TEXT))
mod = NULL;
}
return mod;
}
/* Don't grab lock, we're oopsing. */
void print_modules(void)
{
struct module *mod;
char buf[MODULE_FLAGS_BUF_SIZE];
printk(KERN_DEFAULT "Modules linked in:");
/* Most callers should already have preempt disabled, but make sure */
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
pr_cont(" %s%s", mod->name, module_flags(mod, buf, true));
}
print_unloaded_tainted_modules();
preempt_enable();
if (last_unloaded_module.name[0])
pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
last_unloaded_module.taints);
pr_cont("\n");
}
#ifdef CONFIG_MODULE_DEBUGFS
struct dentry *mod_debugfs_root;
static int module_debugfs_init(void)
{
mod_debugfs_root = debugfs_create_dir("modules", NULL);
return 0;
}
module_init(module_debugfs_init);
#endif
| linux-master | kernel/module/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module version support
*
* Copyright (C) 2008 Rusty Russell
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/printk.h>
#include "internal.h"
int check_version(const struct load_info *info,
const char *symname,
struct module *mod,
const s32 *crc)
{
Elf_Shdr *sechdrs = info->sechdrs;
unsigned int versindex = info->index.vers;
unsigned int i, num_versions;
struct modversion_info *versions;
/* Exporting module didn't supply crcs? OK, we're already tainted. */
if (!crc)
return 1;
/* No versions at all? modprobe --force does this. */
if (versindex == 0)
return try_to_force_load(mod, symname) == 0;
versions = (void *)sechdrs[versindex].sh_addr;
num_versions = sechdrs[versindex].sh_size
/ sizeof(struct modversion_info);
for (i = 0; i < num_versions; i++) {
u32 crcval;
if (strcmp(versions[i].name, symname) != 0)
continue;
crcval = *crc;
if (versions[i].crc == crcval)
return 1;
pr_debug("Found checksum %X vs module %lX\n",
crcval, versions[i].crc);
goto bad_version;
}
/* Broken toolchain. Warn once, then let it go.. */
pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
return 1;
bad_version:
pr_warn("%s: disagrees about version of symbol %s\n", info->name, symname);
return 0;
}
int check_modstruct_version(const struct load_info *info,
struct module *mod)
{
struct find_symbol_arg fsa = {
.name = "module_layout",
.gplok = true,
};
/*
* Since this should be found in kernel (which can't be removed), no
* locking is necessary -- use preempt_disable() to placate lockdep.
*/
preempt_disable();
if (!find_symbol(&fsa)) {
preempt_enable();
BUG();
}
preempt_enable();
return check_version(info, "module_layout", mod, fsa.crc);
}
/* First part is kernel version, which we ignore if module has crcs. */
int same_magic(const char *amagic, const char *bmagic,
bool has_crcs)
{
if (has_crcs) {
amagic += strcspn(amagic, " ");
bmagic += strcspn(bmagic, " ");
}
return strcmp(amagic, bmagic) == 0;
}
/*
* Generate the signature for all relevant module structures here.
* If these change, we don't want to try to parse the module.
*/
void module_layout(struct module *mod,
struct modversion_info *ver,
struct kernel_param *kp,
struct kernel_symbol *ks,
struct tracepoint * const *tp)
{
}
EXPORT_SYMBOL(module_layout);
| linux-master | kernel/module/version.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module kmemleak support
*
* Copyright (C) 2009 Catalin Marinas
*/
#include <linux/module.h>
#include <linux/kmemleak.h>
#include "internal.h"
void kmemleak_load_module(const struct module *mod,
const struct load_info *info)
{
unsigned int i;
/* only scan the sections containing data */
kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
for (i = 1; i < info->hdr->e_shnum; i++) {
/* Scan all writable sections that's not executable */
if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
!(info->sechdrs[i].sh_flags & SHF_WRITE) ||
(info->sechdrs[i].sh_flags & SHF_EXECINSTR))
continue;
kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
info->sechdrs[i].sh_size, GFP_KERNEL);
}
}
| linux-master | kernel/module/debug_kmemleak.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module livepatch support
*
* Copyright (C) 2016 Jessica Yu <[email protected]>
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "internal.h"
/*
* Persist ELF information about a module. Copy the ELF header,
* section header table, section string table, and symtab section
* index from info to mod->klp_info.
*/
int copy_module_elf(struct module *mod, struct load_info *info)
{
unsigned int size, symndx;
int ret;
size = sizeof(*mod->klp_info);
mod->klp_info = kmalloc(size, GFP_KERNEL);
if (!mod->klp_info)
return -ENOMEM;
/* ELF header */
size = sizeof(mod->klp_info->hdr);
memcpy(&mod->klp_info->hdr, info->hdr, size);
/* ELF section header table */
size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL);
if (!mod->klp_info->sechdrs) {
ret = -ENOMEM;
goto free_info;
}
/* ELF section name string table */
size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL);
if (!mod->klp_info->secstrings) {
ret = -ENOMEM;
goto free_sechdrs;
}
/* ELF symbol section index */
symndx = info->index.sym;
mod->klp_info->symndx = symndx;
/*
* For livepatch modules, core_kallsyms.symtab is a complete
* copy of the original symbol table. Adjust sh_addr to point
* to core_kallsyms.symtab since the copy of the symtab in module
* init memory is freed at the end of do_init_module().
*/
mod->klp_info->sechdrs[symndx].sh_addr = (unsigned long)mod->core_kallsyms.symtab;
return 0;
free_sechdrs:
kfree(mod->klp_info->sechdrs);
free_info:
kfree(mod->klp_info);
return ret;
}
void free_module_elf(struct module *mod)
{
kfree(mod->klp_info->sechdrs);
kfree(mod->klp_info->secstrings);
kfree(mod->klp_info);
}
| linux-master | kernel/module/livepatch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module kallsyms support
*
* Copyright (C) 2010 Rusty Russell
*/
#include <linux/module.h>
#include <linux/module_symbol.h>
#include <linux/kallsyms.h>
#include <linux/buildid.h>
#include <linux/bsearch.h>
#include "internal.h"
/* Lookup exported symbol in given range of kernel_symbols */
static const struct kernel_symbol *lookup_exported_symbol(const char *name,
const struct kernel_symbol *start,
const struct kernel_symbol *stop)
{
return bsearch(name, start, stop - start,
sizeof(struct kernel_symbol), cmp_name);
}
static int is_exported(const char *name, unsigned long value,
const struct module *mod)
{
const struct kernel_symbol *ks;
if (!mod)
ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
else
ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
return ks && kernel_symbol_value(ks) == value;
}
/* As per nm */
static char elf_type(const Elf_Sym *sym, const struct load_info *info)
{
const Elf_Shdr *sechdrs = info->sechdrs;
if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
return 'v';
else
return 'w';
}
if (sym->st_shndx == SHN_UNDEF)
return 'U';
if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
return 'a';
if (sym->st_shndx >= SHN_LORESERVE)
return '?';
if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
return 't';
if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC &&
sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
return 'r';
else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
return 'g';
else
return 'd';
}
if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
return 's';
else
return 'b';
}
if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
".debug")) {
return 'n';
}
return '?';
}
static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
unsigned int shnum, unsigned int pcpundx)
{
const Elf_Shdr *sec;
enum mod_mem_type type;
if (src->st_shndx == SHN_UNDEF ||
src->st_shndx >= shnum ||
!src->st_name)
return false;
#ifdef CONFIG_KALLSYMS_ALL
if (src->st_shndx == pcpundx)
return true;
#endif
sec = sechdrs + src->st_shndx;
type = sec->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
if (!(sec->sh_flags & SHF_ALLOC)
#ifndef CONFIG_KALLSYMS_ALL
|| !(sec->sh_flags & SHF_EXECINSTR)
#endif
|| mod_mem_type_is_init(type))
return false;
return true;
}
/*
* We only allocate and copy the strings needed by the parts of symtab
* we keep. This is simple, but has the effect of making multiple
* copies of duplicates. We could be more sophisticated, see
* linux-kernel thread starting with
* <73defb5e4bca04a6431392cc341112b1@localhost>.
*/
void layout_symtab(struct module *mod, struct load_info *info)
{
Elf_Shdr *symsect = info->sechdrs + info->index.sym;
Elf_Shdr *strsect = info->sechdrs + info->index.str;
const Elf_Sym *src;
unsigned int i, nsrc, ndst, strtab_size = 0;
struct module_memory *mod_mem_data = &mod->mem[MOD_DATA];
struct module_memory *mod_mem_init_data = &mod->mem[MOD_INIT_DATA];
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
symsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA,
symsect, info->index.sym);
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
src = (void *)info->hdr + symsect->sh_offset;
nsrc = symsect->sh_size / sizeof(*src);
/* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) {
if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
strtab_size += strlen(&info->strtab[src[i].st_name]) + 1;
ndst++;
}
}
/* Append room for core symbols at end of core part. */
info->symoffs = ALIGN(mod_mem_data->size, symsect->sh_addralign ?: 1);
info->stroffs = mod_mem_data->size = info->symoffs + ndst * sizeof(Elf_Sym);
mod_mem_data->size += strtab_size;
/* Note add_kallsyms() computes strtab_size as core_typeoffs - stroffs */
info->core_typeoffs = mod_mem_data->size;
mod_mem_data->size += ndst * sizeof(char);
/* Put string table section at end of init part of module. */
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = module_get_offset_and_type(mod, MOD_INIT_DATA,
strsect, info->index.str);
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
/* We'll tack temporary mod_kallsyms on the end. */
mod_mem_init_data->size = ALIGN(mod_mem_init_data->size,
__alignof__(struct mod_kallsyms));
info->mod_kallsyms_init_off = mod_mem_init_data->size;
mod_mem_init_data->size += sizeof(struct mod_kallsyms);
info->init_typeoffs = mod_mem_init_data->size;
mod_mem_init_data->size += nsrc * sizeof(char);
}
/*
* We use the full symtab and strtab which layout_symtab arranged to
* be appended to the init section. Later we switch to the cut-down
* core-only ones.
*/
void add_kallsyms(struct module *mod, const struct load_info *info)
{
unsigned int i, ndst;
const Elf_Sym *src;
Elf_Sym *dst;
char *s;
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
unsigned long strtab_size;
void *data_base = mod->mem[MOD_DATA].base;
void *init_data_base = mod->mem[MOD_INIT_DATA].base;
/* Set up to point into init section. */
mod->kallsyms = (void __rcu *)init_data_base +
info->mod_kallsyms_init_off;
rcu_read_lock();
/* The following is safe since this pointer cannot change */
rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr;
rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
/* Make sure we get permanent strtab: don't use info->strtab. */
rcu_dereference(mod->kallsyms)->strtab =
(void *)info->sechdrs[info->index.str].sh_addr;
rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs;
/*
* Now populate the cut down core kallsyms for after init
* and set types up while we still have access to sections.
*/
mod->core_kallsyms.symtab = dst = data_base + info->symoffs;
mod->core_kallsyms.strtab = s = data_base + info->stroffs;
mod->core_kallsyms.typetab = data_base + info->core_typeoffs;
strtab_size = info->core_typeoffs - info->stroffs;
src = rcu_dereference(mod->kallsyms)->symtab;
for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) {
rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info);
if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
ssize_t ret;
mod->core_kallsyms.typetab[ndst] =
rcu_dereference(mod->kallsyms)->typetab[i];
dst[ndst] = src[i];
dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
ret = strscpy(s,
&rcu_dereference(mod->kallsyms)->strtab[src[i].st_name],
strtab_size);
if (ret < 0)
break;
s += ret + 1;
strtab_size -= ret + 1;
}
}
rcu_read_unlock();
mod->core_kallsyms.num_symtab = ndst;
}
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
void init_build_id(struct module *mod, const struct load_info *info)
{
const Elf_Shdr *sechdr;
unsigned int i;
for (i = 0; i < info->hdr->e_shnum; i++) {
sechdr = &info->sechdrs[i];
if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE &&
!build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id,
sechdr->sh_size))
break;
}
}
#else
void init_build_id(struct module *mod, const struct load_info *info)
{
}
#endif
static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
{
return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
}
/*
* Given a module and address, find the corresponding symbol and return its name
* while providing its size and offset if needed.
*/
static const char *find_kallsyms_symbol(struct module *mod,
unsigned long addr,
unsigned long *size,
unsigned long *offset)
{
unsigned int i, best = 0;
unsigned long nextval, bestval;
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
struct module_memory *mod_mem;
/* At worse, next value is at end of module */
if (within_module_init(addr, mod))
mod_mem = &mod->mem[MOD_INIT_TEXT];
else
mod_mem = &mod->mem[MOD_TEXT];
nextval = (unsigned long)mod_mem->base + mod_mem->size;
bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
/*
* Scan for closest preceding symbol, and next symbol. (ELF
* starts real symbols at 1).
*/
for (i = 1; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
unsigned long thisval = kallsyms_symbol_value(sym);
if (sym->st_shndx == SHN_UNDEF)
continue;
/*
* We ignore unnamed symbols: they're uninformative
* and inserted at a whim.
*/
if (*kallsyms_symbol_name(kallsyms, i) == '\0' ||
is_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
continue;
if (thisval <= addr && thisval > bestval) {
best = i;
bestval = thisval;
}
if (thisval > addr && thisval < nextval)
nextval = thisval;
}
if (!best)
return NULL;
if (size)
*size = nextval - bestval;
if (offset)
*offset = addr - bestval;
return kallsyms_symbol_name(kallsyms, best);
}
void * __weak dereference_module_function_descriptor(struct module *mod,
void *ptr)
{
return ptr;
}
/*
* For kallsyms to ask for address resolution. NULL means not found. Careful
* not to lock to avoid deadlock on oopses, simply disable preemption.
*/
const char *module_address_lookup(unsigned long addr,
unsigned long *size,
unsigned long *offset,
char **modname,
const unsigned char **modbuildid,
char *namebuf)
{
const char *ret = NULL;
struct module *mod;
preempt_disable();
mod = __module_address(addr);
if (mod) {
if (modname)
*modname = mod->name;
if (modbuildid) {
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
*modbuildid = mod->build_id;
#else
*modbuildid = NULL;
#endif
}
ret = find_kallsyms_symbol(mod, addr, size, offset);
}
/* Make a copy in here where it's safe */
if (ret) {
strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
ret = namebuf;
}
preempt_enable();
return ret;
}
int lookup_module_symbol_name(unsigned long addr, char *symname)
{
struct module *mod;
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
if (within_module(addr, mod)) {
const char *sym;
sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
if (!sym)
goto out;
strscpy(symname, sym, KSYM_NAME_LEN);
preempt_enable();
return 0;
}
}
out:
preempt_enable();
return -ERANGE;
}
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *name, char *module_name, int *exported)
{
struct module *mod;
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
struct mod_kallsyms *kallsyms;
if (mod->state == MODULE_STATE_UNFORMED)
continue;
kallsyms = rcu_dereference_sched(mod->kallsyms);
if (symnum < kallsyms->num_symtab) {
const Elf_Sym *sym = &kallsyms->symtab[symnum];
*value = kallsyms_symbol_value(sym);
*type = kallsyms->typetab[symnum];
strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
strscpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
preempt_enable();
return 0;
}
symnum -= kallsyms->num_symtab;
}
preempt_enable();
return -ERANGE;
}
/* Given a module and name of symbol, find and return the symbol's value */
static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name)
{
unsigned int i;
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
for (i = 0; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
sym->st_shndx != SHN_UNDEF)
return kallsyms_symbol_value(sym);
}
return 0;
}
static unsigned long __module_kallsyms_lookup_name(const char *name)
{
struct module *mod;
char *colon;
colon = strnchr(name, MODULE_NAME_LEN, ':');
if (colon) {
mod = find_module_all(name, colon - name, false);
if (mod)
return __find_kallsyms_symbol_value(mod, colon + 1);
return 0;
}
list_for_each_entry_rcu(mod, &modules, list) {
unsigned long ret;
if (mod->state == MODULE_STATE_UNFORMED)
continue;
ret = __find_kallsyms_symbol_value(mod, name);
if (ret)
return ret;
}
return 0;
}
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name)
{
unsigned long ret;
/* Don't lock: we're in enough trouble already. */
preempt_disable();
ret = __module_kallsyms_lookup_name(name);
preempt_enable();
return ret;
}
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
{
unsigned long ret;
preempt_disable();
ret = __find_kallsyms_symbol_value(mod, name);
preempt_enable();
return ret;
}
int module_kallsyms_on_each_symbol(const char *modname,
int (*fn)(void *, const char *, unsigned long),
void *data)
{
struct module *mod;
unsigned int i;
int ret = 0;
mutex_lock(&module_mutex);
list_for_each_entry(mod, &modules, list) {
struct mod_kallsyms *kallsyms;
if (mod->state == MODULE_STATE_UNFORMED)
continue;
if (modname && strcmp(modname, mod->name))
continue;
/* Use rcu_dereference_sched() to remain compliant with the sparse tool */
preempt_disable();
kallsyms = rcu_dereference_sched(mod->kallsyms);
preempt_enable();
for (i = 0; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
if (sym->st_shndx == SHN_UNDEF)
continue;
ret = fn(data, kallsyms_symbol_name(kallsyms, i),
kallsyms_symbol_value(sym));
if (ret != 0)
goto out;
}
/*
* The given module is found, the subsequent modules do not
* need to be compared.
*/
if (modname)
break;
}
out:
mutex_unlock(&module_mutex);
return ret;
}
| linux-master | kernel/module/kallsyms.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* kmod dups - the kernel module autoloader duplicate suppressor
*
* Copyright (C) 2023 Luis Chamberlain <[email protected]>
*/
#define pr_fmt(fmt) "module: " fmt
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <linux/async.h>
#include <linux/uaccess.h>
#include "internal.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "module."
static bool enable_dups_trace = IS_ENABLED(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS_TRACE);
module_param(enable_dups_trace, bool_enable_only, 0644);
/*
* Protects dup_kmod_reqs list, adds / removals with RCU.
*/
static DEFINE_MUTEX(kmod_dup_mutex);
static LIST_HEAD(dup_kmod_reqs);
struct kmod_dup_req {
struct list_head list;
char name[MODULE_NAME_LEN];
struct completion first_req_done;
struct work_struct complete_work;
struct delayed_work delete_work;
int dup_ret;
};
static struct kmod_dup_req *kmod_dup_request_lookup(char *module_name)
{
struct kmod_dup_req *kmod_req;
list_for_each_entry_rcu(kmod_req, &dup_kmod_reqs, list,
lockdep_is_held(&kmod_dup_mutex)) {
if (strlen(kmod_req->name) == strlen(module_name) &&
!memcmp(kmod_req->name, module_name, strlen(module_name))) {
return kmod_req;
}
}
return NULL;
}
static void kmod_dup_request_delete(struct work_struct *work)
{
struct kmod_dup_req *kmod_req;
kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work);
/*
* The typical situation is a module successully loaded. In that
* situation the module will be present already in userspace. If
* new requests come in after that, userspace will already know the
* module is loaded so will just return 0 right away. There is still
* a small chance right after we delete this entry new request_module()
* calls may happen after that, they can happen. These heuristics
* are to protect finit_module() abuse for auto-loading, if modules
* are still tryign to auto-load even if a module is already loaded,
* that's on them, and those inneficiencies should not be fixed by
* kmod. The inneficies there are a call to modprobe and modprobe
* just returning 0.
*/
mutex_lock(&kmod_dup_mutex);
list_del_rcu(&kmod_req->list);
synchronize_rcu();
mutex_unlock(&kmod_dup_mutex);
kfree(kmod_req);
}
static void kmod_dup_request_complete(struct work_struct *work)
{
struct kmod_dup_req *kmod_req;
kmod_req = container_of(work, struct kmod_dup_req, complete_work);
/*
* This will ensure that the kernel will let all the waiters get
* informed its time to check the return value. It's time to
* go home.
*/
complete_all(&kmod_req->first_req_done);
/*
* Now that we have allowed prior request_module() calls to go on
* with life, let's schedule deleting this entry. We don't have
* to do it right away, but we *eventually* want to do it so to not
* let this linger forever as this is just a boot optimization for
* possible abuses of vmalloc() incurred by finit_module() thrashing.
*/
queue_delayed_work(system_wq, &kmod_req->delete_work, 60 * HZ);
}
bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret)
{
struct kmod_dup_req *kmod_req, *new_kmod_req;
int ret;
/*
* Pre-allocate the entry in case we have to use it later
* to avoid contention with the mutex.
*/
new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL);
if (!new_kmod_req)
return false;
memcpy(new_kmod_req->name, module_name, strlen(module_name));
INIT_WORK(&new_kmod_req->complete_work, kmod_dup_request_complete);
INIT_DELAYED_WORK(&new_kmod_req->delete_work, kmod_dup_request_delete);
init_completion(&new_kmod_req->first_req_done);
mutex_lock(&kmod_dup_mutex);
kmod_req = kmod_dup_request_lookup(module_name);
if (!kmod_req) {
/*
* If the first request that came through for a module
* was with request_module_nowait() we cannot wait for it
* and share its return value with other users which may
* have used request_module() and need a proper return value
* so just skip using them as an anchor.
*
* If a prior request to this one came through with
* request_module() though, then a request_module_nowait()
* would benefit from duplicate detection.
*/
if (!wait) {
kfree(new_kmod_req);
pr_debug("New request_module_nowait() for %s -- cannot track duplicates for this request\n", module_name);
mutex_unlock(&kmod_dup_mutex);
return false;
}
/*
* There was no duplicate, just add the request so we can
* keep tab on duplicates later.
*/
pr_debug("New request_module() for %s\n", module_name);
list_add_rcu(&new_kmod_req->list, &dup_kmod_reqs);
mutex_unlock(&kmod_dup_mutex);
return false;
}
mutex_unlock(&kmod_dup_mutex);
/* We are dealing with a duplicate request now */
kfree(new_kmod_req);
/*
* To fix these try to use try_then_request_module() instead as that
* will check if the component you are looking for is present or not.
* You could also just queue a single request to load the module once,
* instead of having each and everything you need try to request for
* the module.
*
* Duplicate request_module() calls can cause quite a bit of wasted
* vmalloc() space when racing with userspace.
*/
if (enable_dups_trace)
WARN(1, "module-autoload: duplicate request for module %s\n", module_name);
else
pr_warn("module-autoload: duplicate request for module %s\n", module_name);
if (!wait) {
/*
* If request_module_nowait() was used then the user just
* wanted to issue the request and if another module request
* was already its way with the same name we don't care for
* the return value either. Let duplicate request_module_nowait()
* calls bail out right away.
*/
*dup_ret = 0;
return true;
}
/*
* If a duplicate request_module() was used they *may* care for
* the return value, so we have no other option but to wait for
* the first caller to complete. If the first caller used
* the request_module_nowait() call, subsquent callers will
* deal with the comprmise of getting a successful call with this
* optimization enabled ...
*/
ret = wait_for_completion_state(&kmod_req->first_req_done,
TASK_UNINTERRUPTIBLE | TASK_KILLABLE);
if (ret) {
*dup_ret = ret;
return true;
}
/* Now the duplicate request has the same exact return value as the first request */
*dup_ret = kmod_req->dup_ret;
return true;
}
void kmod_dup_request_announce(char *module_name, int ret)
{
struct kmod_dup_req *kmod_req;
mutex_lock(&kmod_dup_mutex);
kmod_req = kmod_dup_request_lookup(module_name);
if (!kmod_req)
goto out;
kmod_req->dup_ret = ret;
/*
* If we complete() here we may allow duplicate threads
* to continue before the first one that submitted the
* request. We're in no rush also, given that each and
* every bounce back to userspace is slow we avoid that
* with a slight delay here. So queueue up the completion
* and let duplicates suffer, just wait a tad bit longer.
* There is no rush. But we also don't want to hold the
* caller up forever or introduce any boot delays.
*/
queue_work(system_wq, &kmod_req->complete_work);
out:
mutex_unlock(&kmod_dup_mutex);
}
| linux-master | kernel/module/dups.c |
/*
* kmod - the kernel module loader
*
* Copyright (C) 2023 Luis Chamberlain <[email protected]>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <linux/async.h>
#include <linux/uaccess.h>
#include <trace/events/module.h>
#include "internal.h"
/*
* Assuming:
*
* threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
* (u64) THREAD_SIZE * 8UL);
*
* If you need less than 50 threads would mean we're dealing with systems
* smaller than 3200 pages. This assumes you are capable of having ~13M memory,
* and this would only be an upper limit, after which the OOM killer would take
* effect. Systems like these are very unlikely if modules are enabled.
*/
#define MAX_KMOD_CONCURRENT 50
static DEFINE_SEMAPHORE(kmod_concurrent_max, MAX_KMOD_CONCURRENT);
/*
* This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
* running at the same time without returning. When this happens we
* believe you've somehow ended up with a recursive module dependency
* creating a loop.
*
* We have no option but to fail.
*
* Userspace should proactively try to detect and prevent these.
*/
#define MAX_KMOD_ALL_BUSY_TIMEOUT 5
/*
modprobe_path is set via /proc/sys.
*/
char modprobe_path[KMOD_PATH_LEN] = CONFIG_MODPROBE_PATH;
static void free_modprobe_argv(struct subprocess_info *info)
{
kfree(info->argv[3]); /* check call_modprobe() */
kfree(info->argv);
}
static int call_modprobe(char *orig_module_name, int wait)
{
struct subprocess_info *info;
static char *envp[] = {
"HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL
};
char *module_name;
int ret;
char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
if (!argv)
goto out;
module_name = kstrdup(orig_module_name, GFP_KERNEL);
if (!module_name)
goto free_argv;
argv[0] = modprobe_path;
argv[1] = "-q";
argv[2] = "--";
argv[3] = module_name; /* check free_modprobe_argv() */
argv[4] = NULL;
info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
NULL, free_modprobe_argv, NULL);
if (!info)
goto free_module_name;
ret = call_usermodehelper_exec(info, wait | UMH_KILLABLE);
kmod_dup_request_announce(orig_module_name, ret);
return ret;
free_module_name:
kfree(module_name);
free_argv:
kfree(argv);
out:
kmod_dup_request_announce(orig_module_name, -ENOMEM);
return -ENOMEM;
}
/**
* __request_module - try to load a kernel module
* @wait: wait (or not) for the operation to complete
* @fmt: printf style format string for the name of the module
* @...: arguments as specified in the format string
*
* Load a module using the user mode module loader. The function returns
* zero on success or a negative errno code or positive exit code from
* "modprobe" on failure. Note that a successful module load does not mean
* the module did not then unload and exit on an error of its own. Callers
* must check that the service they requested is now available not blindly
* invoke it.
*
* If module auto-loading support is disabled then this function
* simply returns -ENOENT.
*/
int __request_module(bool wait, const char *fmt, ...)
{
va_list args;
char module_name[MODULE_NAME_LEN];
int ret, dup_ret;
/*
* We don't allow synchronous module loading from async. Module
* init may invoke async_synchronize_full() which will end up
* waiting for this task which already is waiting for the module
* loading to complete, leading to a deadlock.
*/
WARN_ON_ONCE(wait && current_is_async());
if (!modprobe_path[0])
return -ENOENT;
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
if (ret >= MODULE_NAME_LEN)
return -ENAMETOOLONG;
ret = security_kernel_module_request(module_name);
if (ret)
return ret;
ret = down_timeout(&kmod_concurrent_max, MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
if (ret) {
pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
return ret;
}
trace_module_request(module_name, wait, _RET_IP_);
if (kmod_dup_request_exists_wait(module_name, wait, &dup_ret)) {
ret = dup_ret;
goto out;
}
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
out:
up(&kmod_concurrent_max);
return ret;
}
EXPORT_SYMBOL(__request_module);
| linux-master | kernel/module/kmod.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Intel Corp.
* Author: Jiang Liu <[email protected]>
*
* This file is licensed under GPLv2.
*
* This file contains common code to support Message Signaled Interrupts for
* PCI compatible and non PCI compatible devices.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include "internals.h"
/**
* struct msi_ctrl - MSI internal management control structure
* @domid: ID of the domain on which management operations should be done
* @first: First (hardware) slot index to operate on
* @last: Last (hardware) slot index to operate on
* @nirqs: The number of Linux interrupts to allocate. Can be larger
* than the range due to PCI/multi-MSI.
*/
struct msi_ctrl {
unsigned int domid;
unsigned int first;
unsigned int last;
unsigned int nirqs;
};
/* Invalid Xarray index which is outside of any searchable range */
#define MSI_XA_MAX_INDEX (ULONG_MAX - 1)
/* The maximum domain size */
#define MSI_XA_DOMAIN_SIZE (MSI_MAX_INDEX + 1)
static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl);
static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid);
static inline int msi_sysfs_create_group(struct device *dev);
/**
* msi_alloc_desc - Allocate an initialized msi_desc
* @dev: Pointer to the device for which this is allocated
* @nvec: The number of vectors used in this entry
* @affinity: Optional pointer to an affinity mask array size of @nvec
*
* If @affinity is not %NULL then an affinity array[@nvec] is allocated
* and the affinity masks and flags from @affinity are copied.
*
* Return: pointer to allocated &msi_desc on success or %NULL on failure
*/
static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
const struct irq_affinity_desc *affinity)
{
struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
desc->dev = dev;
desc->nvec_used = nvec;
if (affinity) {
desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
if (!desc->affinity) {
kfree(desc);
return NULL;
}
}
return desc;
}
static void msi_free_desc(struct msi_desc *desc)
{
kfree(desc->affinity);
kfree(desc);
}
static int msi_insert_desc(struct device *dev, struct msi_desc *desc,
unsigned int domid, unsigned int index)
{
struct msi_device_data *md = dev->msi.data;
struct xarray *xa = &md->__domains[domid].store;
unsigned int hwsize;
int ret;
hwsize = msi_domain_get_hwsize(dev, domid);
if (index == MSI_ANY_INDEX) {
struct xa_limit limit = { .min = 0, .max = hwsize - 1 };
unsigned int index;
/* Let the xarray allocate a free index within the limit */
ret = xa_alloc(xa, &index, desc, limit, GFP_KERNEL);
if (ret)
goto fail;
desc->msi_index = index;
return 0;
} else {
if (index >= hwsize) {
ret = -ERANGE;
goto fail;
}
desc->msi_index = index;
ret = xa_insert(xa, index, desc, GFP_KERNEL);
if (ret)
goto fail;
return 0;
}
fail:
msi_free_desc(desc);
return ret;
}
/**
* msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and
* insert it at @init_desc->msi_index
*
* @dev: Pointer to the device for which the descriptor is allocated
* @domid: The id of the interrupt domain to which the desriptor is added
* @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
*
* Return: 0 on success or an appropriate failure code.
*/
int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
struct msi_desc *init_desc)
{
struct msi_desc *desc;
lockdep_assert_held(&dev->msi.data->mutex);
desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
if (!desc)
return -ENOMEM;
/* Copy type specific data to the new descriptor. */
desc->pci = init_desc->pci;
return msi_insert_desc(dev, desc, domid, init_desc->msi_index);
}
static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
{
switch (filter) {
case MSI_DESC_ALL:
return true;
case MSI_DESC_NOTASSOCIATED:
return !desc->irq;
case MSI_DESC_ASSOCIATED:
return !!desc->irq;
}
WARN_ON_ONCE(1);
return false;
}
static bool msi_ctrl_valid(struct device *dev, struct msi_ctrl *ctrl)
{
unsigned int hwsize;
if (WARN_ON_ONCE(ctrl->domid >= MSI_MAX_DEVICE_IRQDOMAINS ||
(dev->msi.domain &&
!dev->msi.data->__domains[ctrl->domid].domain)))
return false;
hwsize = msi_domain_get_hwsize(dev, ctrl->domid);
if (WARN_ON_ONCE(ctrl->first > ctrl->last ||
ctrl->first >= hwsize ||
ctrl->last >= hwsize))
return false;
return true;
}
static void msi_domain_free_descs(struct device *dev, struct msi_ctrl *ctrl)
{
struct msi_desc *desc;
struct xarray *xa;
unsigned long idx;
lockdep_assert_held(&dev->msi.data->mutex);
if (!msi_ctrl_valid(dev, ctrl))
return;
xa = &dev->msi.data->__domains[ctrl->domid].store;
xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
xa_erase(xa, idx);
/* Leak the descriptor when it is still referenced */
if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
continue;
msi_free_desc(desc);
}
}
/**
* msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain
* @dev: Device for which to free the descriptors
* @domid: Id of the domain to operate on
* @first: Index to start freeing from (inclusive)
* @last: Last index to be freed (inclusive)
*/
void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
unsigned int first, unsigned int last)
{
struct msi_ctrl ctrl = {
.domid = domid,
.first = first,
.last = last,
};
msi_domain_free_descs(dev, &ctrl);
}
/**
* msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors
* @dev: Pointer to the device for which the descriptors are allocated
* @ctrl: Allocation control struct
*
* Return: 0 on success or an appropriate failure code.
*/
static int msi_domain_add_simple_msi_descs(struct device *dev, struct msi_ctrl *ctrl)
{
struct msi_desc *desc;
unsigned int idx;
int ret;
lockdep_assert_held(&dev->msi.data->mutex);
if (!msi_ctrl_valid(dev, ctrl))
return -EINVAL;
for (idx = ctrl->first; idx <= ctrl->last; idx++) {
desc = msi_alloc_desc(dev, 1, NULL);
if (!desc)
goto fail_mem;
ret = msi_insert_desc(dev, desc, ctrl->domid, idx);
if (ret)
goto fail;
}
return 0;
fail_mem:
ret = -ENOMEM;
fail:
msi_domain_free_descs(dev, ctrl);
return ret;
}
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
*msg = entry->msg;
}
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
__get_cached_msi_msg(entry, msg);
}
EXPORT_SYMBOL_GPL(get_cached_msi_msg);
static void msi_device_data_release(struct device *dev, void *res)
{
struct msi_device_data *md = res;
int i;
for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
msi_remove_device_irq_domain(dev, i);
WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
xa_destroy(&md->__domains[i].store);
}
dev->msi.data = NULL;
}
/**
* msi_setup_device_data - Setup MSI device data
* @dev: Device for which MSI device data should be set up
*
* Return: 0 on success, appropriate error code otherwise
*
* This can be called more than once for @dev. If the MSI device data is
* already allocated the call succeeds. The allocated memory is
* automatically released when the device is destroyed.
*/
int msi_setup_device_data(struct device *dev)
{
struct msi_device_data *md;
int ret, i;
if (dev->msi.data)
return 0;
md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
if (!md)
return -ENOMEM;
ret = msi_sysfs_create_group(dev);
if (ret) {
devres_free(md);
return ret;
}
for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
xa_init_flags(&md->__domains[i].store, XA_FLAGS_ALLOC);
/*
* If @dev::msi::domain is set and is a global MSI domain, copy the
* pointer into the domain array so all code can operate on domain
* ids. The NULL pointer check is required to keep the legacy
* architecture specific PCI/MSI support working.
*/
if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain))
md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain;
mutex_init(&md->mutex);
dev->msi.data = md;
devres_add(dev, md);
return 0;
}
/**
* msi_lock_descs - Lock the MSI descriptor storage of a device
* @dev: Device to operate on
*/
void msi_lock_descs(struct device *dev)
{
mutex_lock(&dev->msi.data->mutex);
}
EXPORT_SYMBOL_GPL(msi_lock_descs);
/**
* msi_unlock_descs - Unlock the MSI descriptor storage of a device
* @dev: Device to operate on
*/
void msi_unlock_descs(struct device *dev)
{
/* Invalidate the index which was cached by the iterator */
dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX;
mutex_unlock(&dev->msi.data->mutex);
}
EXPORT_SYMBOL_GPL(msi_unlock_descs);
static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid,
enum msi_desc_filter filter)
{
struct xarray *xa = &md->__domains[domid].store;
struct msi_desc *desc;
xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
if (msi_desc_match(desc, filter))
return desc;
}
md->__iter_idx = MSI_XA_MAX_INDEX;
return NULL;
}
/**
* msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
* @dev: Device to operate on
* @domid: The id of the interrupt domain which should be walked.
* @filter: Descriptor state filter
*
* Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
* must be invoked before the call.
*
* Return: Pointer to the first MSI descriptor matching the search
* criteria, NULL if none found.
*/
struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
enum msi_desc_filter filter)
{
struct msi_device_data *md = dev->msi.data;
if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
return NULL;
lockdep_assert_held(&md->mutex);
md->__iter_idx = 0;
return msi_find_desc(md, domid, filter);
}
EXPORT_SYMBOL_GPL(msi_domain_first_desc);
/**
* msi_next_desc - Get the next MSI descriptor of a device
* @dev: Device to operate on
* @domid: The id of the interrupt domain which should be walked.
* @filter: Descriptor state filter
*
* The first invocation of msi_next_desc() has to be preceeded by a
* successful invocation of __msi_first_desc(). Consecutive invocations are
* only valid if the previous one was successful. All these operations have
* to be done within the same MSI mutex held region.
*
* Return: Pointer to the next MSI descriptor matching the search
* criteria, NULL if none found.
*/
struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
enum msi_desc_filter filter)
{
struct msi_device_data *md = dev->msi.data;
if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
return NULL;
lockdep_assert_held(&md->mutex);
if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
return NULL;
md->__iter_idx++;
return msi_find_desc(md, domid, filter);
}
EXPORT_SYMBOL_GPL(msi_next_desc);
/**
* msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
* @dev: Device to operate on
* @domid: Domain ID of the interrupt domain associated to the device
* @index: MSI interrupt index to look for (0-based)
*
* Return: The Linux interrupt number on success (> 0), 0 if not found
*/
unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index)
{
struct msi_desc *desc;
unsigned int ret = 0;
bool pcimsi = false;
struct xarray *xa;
if (!dev->msi.data)
return 0;
if (WARN_ON_ONCE(index > MSI_MAX_INDEX || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
return 0;
/* This check is only valid for the PCI default MSI domain */
if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN)
pcimsi = to_pci_dev(dev)->msi_enabled;
msi_lock_descs(dev);
xa = &dev->msi.data->__domains[domid].store;
desc = xa_load(xa, pcimsi ? 0 : index);
if (desc && desc->irq) {
/*
* PCI-MSI has only one descriptor for multiple interrupts.
* PCI-MSIX and platform MSI use a descriptor per
* interrupt.
*/
if (pcimsi) {
if (index < desc->nvec_used)
ret = desc->irq + index;
} else {
ret = desc->irq;
}
}
msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(msi_domain_get_virq);
#ifdef CONFIG_SYSFS
static struct attribute *msi_dev_attrs[] = {
NULL
};
static const struct attribute_group msi_irqs_group = {
.name = "msi_irqs",
.attrs = msi_dev_attrs,
};
static inline int msi_sysfs_create_group(struct device *dev)
{
return devm_device_add_group(dev, &msi_irqs_group);
}
static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
/* MSI vs. MSIX is per device not per interrupt */
bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
}
static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
{
struct device_attribute *attrs = desc->sysfs_attrs;
int i;
if (!attrs)
return;
desc->sysfs_attrs = NULL;
for (i = 0; i < desc->nvec_used; i++) {
if (attrs[i].show)
sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
kfree(attrs[i].attr.name);
}
kfree(attrs);
}
static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
{
struct device_attribute *attrs;
int ret, i;
attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
desc->sysfs_attrs = attrs;
for (i = 0; i < desc->nvec_used; i++) {
sysfs_attr_init(&attrs[i].attr);
attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
if (!attrs[i].attr.name) {
ret = -ENOMEM;
goto fail;
}
attrs[i].attr.mode = 0444;
attrs[i].show = msi_mode_show;
ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
if (ret) {
attrs[i].show = NULL;
goto fail;
}
}
return 0;
fail:
msi_sysfs_remove_desc(dev, desc);
return ret;
}
#if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN)
/**
* msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
* @dev: The device (PCI, platform etc) which will get sysfs entries
*/
int msi_device_populate_sysfs(struct device *dev)
{
struct msi_desc *desc;
int ret;
msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
if (desc->sysfs_attrs)
continue;
ret = msi_sysfs_populate_desc(dev, desc);
if (ret)
return ret;
}
return 0;
}
/**
* msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
* @dev: The device (PCI, platform etc) for which to remove
* sysfs entries
*/
void msi_device_destroy_sysfs(struct device *dev)
{
struct msi_desc *desc;
msi_for_each_desc(desc, dev, MSI_DESC_ALL)
msi_sysfs_remove_desc(dev, desc);
}
#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */
#else /* CONFIG_SYSFS */
static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
#endif /* !CONFIG_SYSFS */
static struct irq_domain *msi_get_device_domain(struct device *dev, unsigned int domid)
{
struct irq_domain *domain;
lockdep_assert_held(&dev->msi.data->mutex);
if (WARN_ON_ONCE(domid >= MSI_MAX_DEVICE_IRQDOMAINS))
return NULL;
domain = dev->msi.data->__domains[domid].domain;
if (!domain)
return NULL;
if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain)))
return NULL;
return domain;
}
static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid)
{
struct msi_domain_info *info;
struct irq_domain *domain;
domain = msi_get_device_domain(dev, domid);
if (domain) {
info = domain->host_data;
return info->hwsize;
}
/* No domain, default to MSI_XA_DOMAIN_SIZE */
return MSI_XA_DOMAIN_SIZE;
}
static inline void irq_chip_write_msi_msg(struct irq_data *data,
struct msi_msg *msg)
{
data->chip->irq_write_msi_msg(data, msg);
}
static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
{
struct msi_domain_info *info = domain->host_data;
/*
* If the MSI provider has messed with the second message and
* not advertized that it is level-capable, signal the breakage.
*/
WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
(info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
(msg[1].address_lo || msg[1].address_hi || msg[1].data));
}
/**
* msi_domain_set_affinity - Generic affinity setter function for MSI domains
* @irq_data: The irq data associated to the interrupt
* @mask: The affinity mask to set
* @force: Flag to enforce setting (disable online checks)
*
* Intended to be used by MSI interrupt controllers which are
* implemented with hierarchical domains.
*
* Return: IRQ_SET_MASK_* result code
*/
int msi_domain_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
struct irq_data *parent = irq_data->parent_data;
struct msi_msg msg[2] = { [1] = { }, };
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
msi_check_level(irq_data->domain, msg);
irq_chip_write_msi_msg(irq_data, msg);
}
return ret;
}
static int msi_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
struct msi_msg msg[2] = { [1] = { }, };
BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
msi_check_level(irq_data->domain, msg);
irq_chip_write_msi_msg(irq_data, msg);
return 0;
}
static void msi_domain_deactivate(struct irq_domain *domain,
struct irq_data *irq_data)
{
struct msi_msg msg[2];
memset(msg, 0, sizeof(msg));
irq_chip_write_msi_msg(irq_data, msg);
}
static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
int i, ret;
if (irq_find_mapping(domain, hwirq) > 0)
return -EEXIST;
if (domain->parent) {
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret < 0)
return ret;
}
for (i = 0; i < nr_irqs; i++) {
ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
if (ret < 0) {
if (ops->msi_free) {
for (i--; i > 0; i--)
ops->msi_free(domain, info, virq + i);
}
irq_domain_free_irqs_top(domain, virq, nr_irqs);
return ret;
}
}
return 0;
}
static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct msi_domain_info *info = domain->host_data;
int i;
if (info->ops->msi_free) {
for (i = 0; i < nr_irqs; i++)
info->ops->msi_free(domain, info, virq + i);
}
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
static const struct irq_domain_ops msi_domain_ops = {
.alloc = msi_domain_alloc,
.free = msi_domain_free,
.activate = msi_domain_activate,
.deactivate = msi_domain_deactivate,
};
static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
msi_alloc_info_t *arg)
{
return arg->hwirq;
}
static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *arg)
{
memset(arg, 0, sizeof(*arg));
return 0;
}
static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
}
static int msi_domain_ops_init(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq, irq_hw_number_t hwirq,
msi_alloc_info_t *arg)
{
irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
info->chip_data);
if (info->handler && info->handler_name) {
__irq_set_handler(virq, info->handler, 0, info->handler_name);
if (info->handler_data)
irq_set_handler_data(virq, info->handler_data);
}
return 0;
}
static struct msi_domain_ops msi_domain_ops_default = {
.get_hwirq = msi_domain_ops_get_hwirq,
.msi_init = msi_domain_ops_init,
.msi_prepare = msi_domain_ops_prepare,
.set_desc = msi_domain_ops_set_desc,
};
static void msi_domain_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
if (ops == NULL) {
info->ops = &msi_domain_ops_default;
return;
}
if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
return;
if (ops->get_hwirq == NULL)
ops->get_hwirq = msi_domain_ops_default.get_hwirq;
if (ops->msi_init == NULL)
ops->msi_init = msi_domain_ops_default.msi_init;
if (ops->msi_prepare == NULL)
ops->msi_prepare = msi_domain_ops_default.msi_prepare;
if (ops->set_desc == NULL)
ops->set_desc = msi_domain_ops_default.set_desc;
}
static void msi_domain_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
if (!chip->irq_set_affinity)
chip->irq_set_affinity = msi_domain_set_affinity;
}
static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
unsigned int flags,
struct irq_domain *parent)
{
struct irq_domain *domain;
if (info->hwsize > MSI_XA_DOMAIN_SIZE)
return NULL;
/*
* Hardware size 0 is valid for backwards compatibility and for
* domains which are not backed by a hardware table. Grant the
* maximum index space.
*/
if (!info->hwsize)
info->hwsize = MSI_XA_DOMAIN_SIZE;
msi_domain_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
msi_domain_update_chip_ops(info);
domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0,
fwnode, &msi_domain_ops, info);
if (domain)
irq_domain_update_bus_token(domain, info->bus_token);
return domain;
}
/**
* msi_create_irq_domain - Create an MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Return: pointer to the created &struct irq_domain or %NULL on failure
*/
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
return __msi_create_irq_domain(fwnode, info, 0, parent);
}
/**
* msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down
* in the domain hierarchy
* @dev: The device for which the domain should be created
* @domain: The domain in the hierarchy this op is being called on
* @msi_parent_domain: The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to
* be created
* @msi_child_info: The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE
* domain to be created
*
* Return: true on success, false otherwise
*
* This is the most complex problem of per device MSI domains and the
* underlying interrupt domain hierarchy:
*
* The device domain to be initialized requests the broadest feature set
* possible and the underlying domain hierarchy puts restrictions on it.
*
* That's trivial for a simple parent->child relationship, but it gets
* interesting with an intermediate domain: root->parent->child. The
* intermediate 'parent' can expand the capabilities which the 'root'
* domain is providing. So that creates a classic hen and egg problem:
* Which entity is doing the restrictions/expansions?
*
* One solution is to let the root domain handle the initialization that's
* why there is the @domain and the @msi_parent_domain pointer.
*/
bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *msi_parent_domain,
struct msi_domain_info *msi_child_info)
{
struct irq_domain *parent = domain->parent;
if (WARN_ON_ONCE(!parent || !parent->msi_parent_ops ||
!parent->msi_parent_ops->init_dev_msi_info))
return false;
return parent->msi_parent_ops->init_dev_msi_info(dev, parent, msi_parent_domain,
msi_child_info);
}
/**
* msi_create_device_irq_domain - Create a device MSI interrupt domain
* @dev: Pointer to the device
* @domid: Domain id
* @template: MSI domain info bundle used as template
* @hwsize: Maximum number of MSI table entries (0 if unknown or unlimited)
* @domain_data: Optional pointer to domain specific data which is set in
* msi_domain_info::data
* @chip_data: Optional pointer to chip specific data which is set in
* msi_domain_info::chip_data
*
* Return: True on success, false otherwise
*
* There is no firmware node required for this interface because the per
* device domains are software constructs which are actually closer to the
* hardware reality than any firmware can describe them.
*
* The domain name and the irq chip name for a MSI device domain are
* composed by: "$(PREFIX)$(CHIPNAME)-$(DEVNAME)"
*
* $PREFIX: Optional prefix provided by the underlying MSI parent domain
* via msi_parent_ops::prefix. If that pointer is NULL the prefix
* is empty.
* $CHIPNAME: The name of the irq_chip in @template
* $DEVNAME: The name of the device
*
* This results in understandable chip names and hardware interrupt numbers
* in e.g. /proc/interrupts
*
* PCI-MSI-0000:00:1c.0 0-edge Parent domain has no prefix
* IR-PCI-MSI-0000:00:1c.4 0-edge Same with interrupt remapping prefix 'IR-'
*
* IR-PCI-MSIX-0000:3d:00.0 0-edge Hardware interrupt numbers reflect
* IR-PCI-MSIX-0000:3d:00.0 1-edge the real MSI-X index on that device
* IR-PCI-MSIX-0000:3d:00.0 2-edge
*
* On IMS domains the hardware interrupt number is either a table entry
* index or a purely software managed index but it is guaranteed to be
* unique.
*
* The domain pointer is stored in @dev::msi::data::__irqdomains[]. All
* subsequent operations on the domain depend on the domain id.
*
* The domain is automatically freed when the device is removed via devres
* in the context of @dev::msi::data freeing, but it can also be
* independently removed via @msi_remove_device_irq_domain().
*/
bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
const struct msi_domain_template *template,
unsigned int hwsize, void *domain_data,
void *chip_data)
{
struct irq_domain *domain, *parent = dev->msi.domain;
const struct msi_parent_ops *pops;
struct msi_domain_template *bundle;
struct fwnode_handle *fwnode;
if (!irq_domain_is_msi_parent(parent))
return false;
if (domid >= MSI_MAX_DEVICE_IRQDOMAINS)
return false;
bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL);
if (!bundle)
return false;
bundle->info.hwsize = hwsize;
bundle->info.chip = &bundle->chip;
bundle->info.ops = &bundle->ops;
bundle->info.data = domain_data;
bundle->info.chip_data = chip_data;
pops = parent->msi_parent_ops;
snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s",
pops->prefix ? : "", bundle->chip.name, dev_name(dev));
bundle->chip.name = bundle->name;
fwnode = irq_domain_alloc_named_fwnode(bundle->name);
if (!fwnode)
goto free_bundle;
if (msi_setup_device_data(dev))
goto free_fwnode;
msi_lock_descs(dev);
if (WARN_ON_ONCE(msi_get_device_domain(dev, domid)))
goto fail;
if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info))
goto fail;
domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent);
if (!domain)
goto fail;
domain->dev = dev;
dev->msi.data->__domains[domid].domain = domain;
msi_unlock_descs(dev);
return true;
fail:
msi_unlock_descs(dev);
free_fwnode:
irq_domain_free_fwnode(fwnode);
free_bundle:
kfree(bundle);
return false;
}
/**
* msi_remove_device_irq_domain - Free a device MSI interrupt domain
* @dev: Pointer to the device
* @domid: Domain id
*/
void msi_remove_device_irq_domain(struct device *dev, unsigned int domid)
{
struct fwnode_handle *fwnode = NULL;
struct msi_domain_info *info;
struct irq_domain *domain;
msi_lock_descs(dev);
domain = msi_get_device_domain(dev, domid);
if (!domain || !irq_domain_is_msi_device(domain))
goto unlock;
dev->msi.data->__domains[domid].domain = NULL;
info = domain->host_data;
if (irq_domain_is_msi_device(domain))
fwnode = domain->fwnode;
irq_domain_remove(domain);
irq_domain_free_fwnode(fwnode);
kfree(container_of(info, struct msi_domain_template, info));
unlock:
msi_unlock_descs(dev);
}
/**
* msi_match_device_irq_domain - Match a device irq domain against a bus token
* @dev: Pointer to the device
* @domid: Domain id
* @bus_token: Bus token to match against the domain bus token
*
* Return: True if device domain exists and bus tokens match.
*/
bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
enum irq_domain_bus_token bus_token)
{
struct msi_domain_info *info;
struct irq_domain *domain;
bool ret = false;
msi_lock_descs(dev);
domain = msi_get_device_domain(dev, domid);
if (domain && irq_domain_is_msi_device(domain)) {
info = domain->host_data;
ret = info->bus_token == bus_token;
}
msi_unlock_descs(dev);
return ret;
}
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
return ops->msi_prepare(domain, dev, nvec, arg);
}
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq_base, int nvec, msi_alloc_info_t *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
struct msi_ctrl ctrl = {
.domid = MSI_DEFAULT_DOMAIN,
.first = virq_base,
.last = virq_base + nvec - 1,
};
struct msi_desc *desc;
struct xarray *xa;
int ret, virq;
msi_lock_descs(dev);
if (!msi_ctrl_valid(dev, &ctrl)) {
ret = -EINVAL;
goto unlock;
}
ret = msi_domain_add_simple_msi_descs(dev, &ctrl);
if (ret)
goto unlock;
xa = &dev->msi.data->__domains[ctrl.domid].store;
for (virq = virq_base; virq < virq_base + nvec; virq++) {
desc = xa_load(xa, virq);
desc->irq = virq;
ops->set_desc(arg, desc);
ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (ret)
goto fail;
irq_set_msi_desc(virq, desc);
}
msi_unlock_descs(dev);
return 0;
fail:
for (--virq; virq >= virq_base; virq--) {
msi_domain_depopulate_descs(dev, virq, 1);
irq_domain_free_irqs_common(domain, virq, 1);
}
msi_domain_free_descs(dev, &ctrl);
unlock:
msi_unlock_descs(dev);
return ret;
}
void msi_domain_depopulate_descs(struct device *dev, int virq_base, int nvec)
{
struct msi_ctrl ctrl = {
.domid = MSI_DEFAULT_DOMAIN,
.first = virq_base,
.last = virq_base + nvec - 1,
};
struct msi_desc *desc;
struct xarray *xa;
unsigned long idx;
if (!msi_ctrl_valid(dev, &ctrl))
return;
xa = &dev->msi.data->__domains[ctrl.domid].store;
xa_for_each_range(xa, idx, desc, ctrl.first, ctrl.last)
desc->irq = 0;
}
/*
* Carefully check whether the device can use reservation mode. If
* reservation mode is enabled then the early activation will assign a
* dummy vector to the device. If the PCI/MSI device does not support
* masking of the entry then this can result in spurious interrupts when
* the device driver is not absolutely careful. But even then a malfunction
* of the hardware could result in a spurious interrupt on the dummy vector
* and render the device unusable. If the entry can be masked then the core
* logic will prevent the spurious interrupt and reservation mode can be
* used. For now reservation mode is restricted to PCI/MSI.
*/
static bool msi_check_reservation_mode(struct irq_domain *domain,
struct msi_domain_info *info,
struct device *dev)
{
struct msi_desc *desc;
switch(domain->bus_token) {
case DOMAIN_BUS_PCI_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
case DOMAIN_BUS_VMD_MSI:
break;
default:
return false;
}
if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
return false;
if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
return false;
/*
* Checking the first MSI descriptor is sufficient. MSIX supports
* masking and MSI does so when the can_mask attribute is set.
*/
desc = msi_first_desc(dev, MSI_DESC_ALL);
return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
}
static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
int allocated)
{
switch(domain->bus_token) {
case DOMAIN_BUS_PCI_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
case DOMAIN_BUS_VMD_MSI:
if (IS_ENABLED(CONFIG_PCI_MSI))
break;
fallthrough;
default:
return -ENOSPC;
}
/* Let a failed PCI multi MSI allocation retry */
if (desc->nvec_used > 1)
return 1;
/* If there was a successful allocation let the caller know */
return allocated ? allocated : -ENOSPC;
}
#define VIRQ_CAN_RESERVE 0x01
#define VIRQ_ACTIVATE 0x02
#define VIRQ_NOMASK_QUIRK 0x04
static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
{
struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
int ret;
if (!(vflags & VIRQ_CAN_RESERVE)) {
irqd_clr_can_reserve(irqd);
if (vflags & VIRQ_NOMASK_QUIRK)
irqd_set_msi_nomask_quirk(irqd);
/*
* If the interrupt is managed but no CPU is available to
* service it, shut it down until better times. Note that
* we only do this on the !RESERVE path as x86 (the only
* architecture using this flag) deals with this in a
* different way by using a catch-all vector.
*/
if ((vflags & VIRQ_ACTIVATE) &&
irqd_affinity_is_managed(irqd) &&
!cpumask_intersects(irq_data_get_affinity_mask(irqd),
cpu_online_mask)) {
irqd_set_managed_shutdown(irqd);
return 0;
}
}
if (!(vflags & VIRQ_ACTIVATE))
return 0;
ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
if (ret)
return ret;
/*
* If the interrupt uses reservation mode, clear the activated bit
* so request_irq() will assign the final vector.
*/
if (vflags & VIRQ_CAN_RESERVE)
irqd_clr_activated(irqd);
return 0;
}
static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain,
struct msi_ctrl *ctrl)
{
struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
unsigned int vflags = 0, allocated = 0;
msi_alloc_info_t arg = { };
struct msi_desc *desc;
unsigned long idx;
int i, ret, virq;
ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg);
if (ret)
return ret;
/*
* This flag is set by the PCI layer as we need to activate
* the MSI entries before the PCI layer enables MSI in the
* card. Otherwise the card latches a random msi message.
*/
if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
vflags |= VIRQ_ACTIVATE;
/*
* Interrupt can use a reserved vector and will not occupy
* a real device vector until the interrupt is requested.
*/
if (msi_check_reservation_mode(domain, info, dev)) {
vflags |= VIRQ_CAN_RESERVE;
/*
* MSI affinity setting requires a special quirk (X86) when
* reservation mode is active.
*/
if (info->flags & MSI_FLAG_NOMASK_QUIRK)
vflags |= VIRQ_NOMASK_QUIRK;
}
xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
continue;
/* This should return -ECONFUSED... */
if (WARN_ON_ONCE(allocated >= ctrl->nirqs))
return -EINVAL;
if (ops->prepare_desc)
ops->prepare_desc(domain, &arg, desc);
ops->set_desc(&arg, desc);
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false,
desc->affinity);
if (virq < 0)
return msi_handle_pci_fail(domain, desc, allocated);
for (i = 0; i < desc->nvec_used; i++) {
irq_set_msi_desc_off(virq, i, desc);
irq_debugfs_copy_devname(virq + i, dev);
ret = msi_init_virq(domain, virq + i, vflags);
if (ret)
return ret;
}
if (info->flags & MSI_FLAG_DEV_SYSFS) {
ret = msi_sysfs_populate_desc(dev, desc);
if (ret)
return ret;
}
allocated++;
}
return 0;
}
static int msi_domain_alloc_simple_msi_descs(struct device *dev,
struct msi_domain_info *info,
struct msi_ctrl *ctrl)
{
if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
return 0;
return msi_domain_add_simple_msi_descs(dev, ctrl);
}
static int __msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
{
struct msi_domain_info *info;
struct msi_domain_ops *ops;
struct irq_domain *domain;
int ret;
if (!msi_ctrl_valid(dev, ctrl))
return -EINVAL;
domain = msi_get_device_domain(dev, ctrl->domid);
if (!domain)
return -ENODEV;
info = domain->host_data;
ret = msi_domain_alloc_simple_msi_descs(dev, info, ctrl);
if (ret)
return ret;
ops = info->ops;
if (ops->domain_alloc_irqs)
return ops->domain_alloc_irqs(domain, dev, ctrl->nirqs);
return __msi_domain_alloc_irqs(dev, domain, ctrl);
}
static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
{
int ret = __msi_domain_alloc_locked(dev, ctrl);
if (ret)
msi_domain_free_locked(dev, ctrl);
return ret;
}
/**
* msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
* @dev: Pointer to device struct of the device for which the interrupts
* are allocated
* @domid: Id of the interrupt domain to operate on
* @first: First index to allocate (inclusive)
* @last: Last index to allocate (inclusive)
*
* Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
* pair. Use this for MSI irqdomains which implement their own descriptor
* allocation/free.
*
* Return: %0 on success or an error code.
*/
int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
unsigned int first, unsigned int last)
{
struct msi_ctrl ctrl = {
.domid = domid,
.first = first,
.last = last,
.nirqs = last + 1 - first,
};
return msi_domain_alloc_locked(dev, &ctrl);
}
/**
* msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
* @dev: Pointer to device struct of the device for which the interrupts
* are allocated
* @domid: Id of the interrupt domain to operate on
* @first: First index to allocate (inclusive)
* @last: Last index to allocate (inclusive)
*
* Return: %0 on success or an error code.
*/
int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
unsigned int first, unsigned int last)
{
int ret;
msi_lock_descs(dev);
ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last);
msi_unlock_descs(dev);
return ret;
}
/**
* msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain
*
* @dev: Pointer to device struct of the device for which the interrupts
* are allocated
* @domid: Id of the interrupt domain to operate on
* @nirqs: The number of interrupts to allocate
*
* This function scans all MSI descriptors of the MSI domain and allocates interrupts
* for all unassigned ones. That function is to be used for MSI domain usage where
* the descriptor allocation is handled at the call site, e.g. PCI/MSI[X].
*
* Return: %0 on success or an error code.
*/
int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs)
{
struct msi_ctrl ctrl = {
.domid = domid,
.first = 0,
.last = msi_domain_get_hwsize(dev, domid) - 1,
.nirqs = nirqs,
};
return msi_domain_alloc_locked(dev, &ctrl);
}
/**
* msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at
* a given index - or at the next free index
*
* @dev: Pointer to device struct of the device for which the interrupts
* are allocated
* @domid: Id of the interrupt domain to operate on
* @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation
* uses the next free index.
* @affdesc: Optional pointer to an interrupt affinity descriptor structure
* @icookie: Optional pointer to a domain specific per instance cookie. If
* non-NULL the content of the cookie is stored in msi_desc::data.
* Must be NULL for MSI-X allocations
*
* This requires a MSI interrupt domain which lets the core code manage the
* MSI descriptors.
*
* Return: struct msi_map
*
* On success msi_map::index contains the allocated index number and
* msi_map::virq the corresponding Linux interrupt number
*
* On failure msi_map::index contains the error code and msi_map::virq
* is %0.
*/
struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
const struct irq_affinity_desc *affdesc,
union msi_instance_cookie *icookie)
{
struct msi_ctrl ctrl = { .domid = domid, .nirqs = 1, };
struct irq_domain *domain;
struct msi_map map = { };
struct msi_desc *desc;
int ret;
msi_lock_descs(dev);
domain = msi_get_device_domain(dev, domid);
if (!domain) {
map.index = -ENODEV;
goto unlock;
}
desc = msi_alloc_desc(dev, 1, affdesc);
if (!desc) {
map.index = -ENOMEM;
goto unlock;
}
if (icookie)
desc->data.icookie = *icookie;
ret = msi_insert_desc(dev, desc, domid, index);
if (ret) {
map.index = ret;
goto unlock;
}
ctrl.first = ctrl.last = desc->msi_index;
ret = __msi_domain_alloc_irqs(dev, domain, &ctrl);
if (ret) {
map.index = ret;
msi_domain_free_locked(dev, &ctrl);
} else {
map.index = desc->msi_index;
map.virq = desc->irq;
}
unlock:
msi_unlock_descs(dev);
return map;
}
static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain,
struct msi_ctrl *ctrl)
{
struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
struct msi_domain_info *info = domain->host_data;
struct irq_data *irqd;
struct msi_desc *desc;
unsigned long idx;
int i;
xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
/* Only handle MSI entries which have an interrupt associated */
if (!msi_desc_match(desc, MSI_DESC_ASSOCIATED))
continue;
/* Make sure all interrupts are deactivated */
for (i = 0; i < desc->nvec_used; i++) {
irqd = irq_domain_get_irq_data(domain, desc->irq + i);
if (irqd && irqd_is_activated(irqd))
irq_domain_deactivate_irq(irqd);
}
irq_domain_free_irqs(desc->irq, desc->nvec_used);
if (info->flags & MSI_FLAG_DEV_SYSFS)
msi_sysfs_remove_desc(dev, desc);
desc->irq = 0;
}
}
static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl)
{
struct msi_domain_info *info;
struct msi_domain_ops *ops;
struct irq_domain *domain;
if (!msi_ctrl_valid(dev, ctrl))
return;
domain = msi_get_device_domain(dev, ctrl->domid);
if (!domain)
return;
info = domain->host_data;
ops = info->ops;
if (ops->domain_free_irqs)
ops->domain_free_irqs(domain, dev);
else
__msi_domain_free_irqs(dev, domain, ctrl);
if (ops->msi_post_free)
ops->msi_post_free(domain, dev);
if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
msi_domain_free_descs(dev, ctrl);
}
/**
* msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain
* associated to @dev with msi_lock held
* @dev: Pointer to device struct of the device for which the interrupts
* are freed
* @domid: Id of the interrupt domain to operate on
* @first: First index to free (inclusive)
* @last: Last index to free (inclusive)
*/
void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
unsigned int first, unsigned int last)
{
struct msi_ctrl ctrl = {
.domid = domid,
.first = first,
.last = last,
};
msi_domain_free_locked(dev, &ctrl);
}
/**
* msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain
* associated to @dev
* @dev: Pointer to device struct of the device for which the interrupts
* are freed
* @domid: Id of the interrupt domain to operate on
* @first: First index to free (inclusive)
* @last: Last index to free (inclusive)
*/
void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
unsigned int first, unsigned int last)
{
msi_lock_descs(dev);
msi_domain_free_irqs_range_locked(dev, domid, first, last);
msi_unlock_descs(dev);
}
/**
* msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain
* associated to a device
* @dev: Pointer to device struct of the device for which the interrupts
* are freed
* @domid: The id of the domain to operate on
*
* Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
* pair. Use this for MSI irqdomains which implement their own vector
* allocation.
*/
void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid)
{
msi_domain_free_irqs_range_locked(dev, domid, 0,
msi_domain_get_hwsize(dev, domid) - 1);
}
/**
* msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain
* associated to a device
* @dev: Pointer to device struct of the device for which the interrupts
* are freed
* @domid: The id of the domain to operate on
*/
void msi_domain_free_irqs_all(struct device *dev, unsigned int domid)
{
msi_lock_descs(dev);
msi_domain_free_irqs_all_locked(dev, domid);
msi_unlock_descs(dev);
}
/**
* msi_get_domain_info - Get the MSI interrupt domain info for @domain
* @domain: The interrupt domain to retrieve data from
*
* Return: the pointer to the msi_domain_info stored in @domain->host_data.
*/
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
{
return (struct msi_domain_info *)domain->host_data;
}
/**
* msi_device_has_isolated_msi - True if the device has isolated MSI
* @dev: The device to check
*
* Isolated MSI means that HW modeled by an irq_domain on the path from the
* initiating device to the CPU will validate that the MSI message specifies an
* interrupt number that the device is authorized to trigger. This must block
* devices from triggering interrupts they are not authorized to trigger.
* Currently authorization means the MSI vector is one assigned to the device.
*
* This is interesting for securing VFIO use cases where a rouge MSI (eg created
* by abusing a normal PCI MemWr DMA) must not allow the VFIO userspace to
* impact outside its security domain, eg userspace triggering interrupts on
* kernel drivers, a VM triggering interrupts on the hypervisor, or a VM
* triggering interrupts on another VM.
*/
bool msi_device_has_isolated_msi(struct device *dev)
{
struct irq_domain *domain = dev_get_msi_domain(dev);
for (; domain; domain = domain->parent)
if (domain->flags & IRQ_DOMAIN_FLAG_ISOLATED_MSI)
return true;
return arch_is_isolated_msi();
}
EXPORT_SYMBOL_GPL(msi_device_has_isolated_msi);
| linux-master | kernel/irq/msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Multiplex several virtual IPIs over a single HW IPI.
*
* Copyright The Asahi Linux Contributors
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "ipi-mux: " fmt
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
#include <linux/smp.h>
struct ipi_mux_cpu {
atomic_t enable;
atomic_t bits;
};
static struct ipi_mux_cpu __percpu *ipi_mux_pcpu;
static struct irq_domain *ipi_mux_domain;
static void (*ipi_mux_send)(unsigned int cpu);
static void ipi_mux_mask(struct irq_data *d)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
atomic_andnot(BIT(irqd_to_hwirq(d)), &icpu->enable);
}
static void ipi_mux_unmask(struct irq_data *d)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
u32 ibit = BIT(irqd_to_hwirq(d));
atomic_or(ibit, &icpu->enable);
/*
* The atomic_or() above must complete before the atomic_read()
* below to avoid racing ipi_mux_send_mask().
*/
smp_mb__after_atomic();
/* If a pending IPI was unmasked, raise a parent IPI immediately. */
if (atomic_read(&icpu->bits) & ibit)
ipi_mux_send(smp_processor_id());
}
static void ipi_mux_send_mask(struct irq_data *d, const struct cpumask *mask)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
u32 ibit = BIT(irqd_to_hwirq(d));
unsigned long pending;
int cpu;
for_each_cpu(cpu, mask) {
icpu = per_cpu_ptr(ipi_mux_pcpu, cpu);
/*
* This sequence is the mirror of the one in ipi_mux_unmask();
* see the comment there. Additionally, release semantics
* ensure that the vIPI flag set is ordered after any shared
* memory accesses that precede it. This therefore also pairs
* with the atomic_fetch_andnot in ipi_mux_process().
*/
pending = atomic_fetch_or_release(ibit, &icpu->bits);
/*
* The atomic_fetch_or_release() above must complete
* before the atomic_read() below to avoid racing with
* ipi_mux_unmask().
*/
smp_mb__after_atomic();
/*
* The flag writes must complete before the physical IPI is
* issued to another CPU. This is implied by the control
* dependency on the result of atomic_read() below, which is
* itself already ordered after the vIPI flag write.
*/
if (!(pending & ibit) && (atomic_read(&icpu->enable) & ibit))
ipi_mux_send(cpu);
}
}
static const struct irq_chip ipi_mux_chip = {
.name = "IPI Mux",
.irq_mask = ipi_mux_mask,
.irq_unmask = ipi_mux_unmask,
.ipi_send_mask = ipi_mux_send_mask,
};
static int ipi_mux_domain_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
int i;
for (i = 0; i < nr_irqs; i++) {
irq_set_percpu_devid(virq + i);
irq_domain_set_info(d, virq + i, i, &ipi_mux_chip, NULL,
handle_percpu_devid_irq, NULL, NULL);
}
return 0;
}
static const struct irq_domain_ops ipi_mux_domain_ops = {
.alloc = ipi_mux_domain_alloc,
.free = irq_domain_free_irqs_top,
};
/**
* ipi_mux_process - Process multiplexed virtual IPIs
*/
void ipi_mux_process(void)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
irq_hw_number_t hwirq;
unsigned long ipis;
unsigned int en;
/*
* Reading enable mask does not need to be ordered as long as
* this function is called from interrupt handler because only
* the CPU itself can change it's own enable mask.
*/
en = atomic_read(&icpu->enable);
/*
* Clear the IPIs we are about to handle. This pairs with the
* atomic_fetch_or_release() in ipi_mux_send_mask().
*/
ipis = atomic_fetch_andnot(en, &icpu->bits) & en;
for_each_set_bit(hwirq, &ipis, BITS_PER_TYPE(int))
generic_handle_domain_irq(ipi_mux_domain, hwirq);
}
/**
* ipi_mux_create - Create virtual IPIs multiplexed on top of a single
* parent IPI.
* @nr_ipi: number of virtual IPIs to create. This should
* be <= BITS_PER_TYPE(int)
* @mux_send: callback to trigger parent IPI for a particular CPU
*
* Returns first virq of the newly created virtual IPIs upon success
* or <=0 upon failure
*/
int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu))
{
struct fwnode_handle *fwnode;
struct irq_domain *domain;
int rc;
if (ipi_mux_domain)
return -EEXIST;
if (BITS_PER_TYPE(int) < nr_ipi || !mux_send)
return -EINVAL;
ipi_mux_pcpu = alloc_percpu(typeof(*ipi_mux_pcpu));
if (!ipi_mux_pcpu)
return -ENOMEM;
fwnode = irq_domain_alloc_named_fwnode("IPI-Mux");
if (!fwnode) {
pr_err("unable to create IPI Mux fwnode\n");
rc = -ENOMEM;
goto fail_free_cpu;
}
domain = irq_domain_create_linear(fwnode, nr_ipi,
&ipi_mux_domain_ops, NULL);
if (!domain) {
pr_err("unable to add IPI Mux domain\n");
rc = -ENOMEM;
goto fail_free_fwnode;
}
domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
irq_domain_update_bus_token(domain, DOMAIN_BUS_IPI);
rc = irq_domain_alloc_irqs(domain, nr_ipi, NUMA_NO_NODE, NULL);
if (rc <= 0) {
pr_err("unable to alloc IRQs from IPI Mux domain\n");
goto fail_free_domain;
}
ipi_mux_domain = domain;
ipi_mux_send = mux_send;
return rc;
fail_free_domain:
irq_domain_remove(domain);
fail_free_fwnode:
irq_domain_free_fwnode(fwnode);
fail_free_cpu:
free_percpu(ipi_mux_pcpu);
return rc;
}
| linux-master | kernel/irq/ipi-mux.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "irq: " fmt
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/topology.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/fs.h>
static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);
static struct irq_domain *irq_default_domain;
static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct irq_affinity_desc *affinity);
static void irq_domain_check_hierarchy(struct irq_domain *domain);
struct irqchip_fwid {
struct fwnode_handle fwnode;
unsigned int type;
char *name;
phys_addr_t *pa;
};
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static void debugfs_add_domain_dir(struct irq_domain *d);
static void debugfs_remove_domain_dir(struct irq_domain *d);
#else
static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
#endif
static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode)
{
struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
return fwid->name;
}
const struct fwnode_operations irqchip_fwnode_ops = {
.get_name = irqchip_fwnode_get_name,
};
EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
/**
* __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
* identifying an irq domain
* @type: Type of irqchip_fwnode. See linux/irqdomain.h
* @id: Optional user provided id if name != NULL
* @name: Optional user provided domain name
* @pa: Optional user-provided physical address
*
* Allocate a struct irqchip_fwid, and return a pointer to the embedded
* fwnode_handle (or NULL on failure).
*
* Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
* solely to transport name information to irqdomain creation code. The
* node is not stored. For other types the pointer is kept in the irq
* domain struct.
*/
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
const char *name,
phys_addr_t *pa)
{
struct irqchip_fwid *fwid;
char *n;
fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
switch (type) {
case IRQCHIP_FWNODE_NAMED:
n = kasprintf(GFP_KERNEL, "%s", name);
break;
case IRQCHIP_FWNODE_NAMED_ID:
n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
break;
default:
n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa);
break;
}
if (!fwid || !n) {
kfree(fwid);
kfree(n);
return NULL;
}
fwid->type = type;
fwid->name = n;
fwid->pa = pa;
fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
return &fwid->fwnode;
}
EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
/**
* irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
*
* Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
*/
void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
{
struct irqchip_fwid *fwid;
if (!fwnode || WARN_ON(!is_fwnode_irqchip(fwnode)))
return;
fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
kfree(fwid->name);
kfree(fwid);
}
EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
unsigned int size,
irq_hw_number_t hwirq_max,
int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irqchip_fwid *fwid;
struct irq_domain *domain;
static atomic_t unknown_domains;
if (WARN_ON((size && direct_max) ||
(!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max) ||
(direct_max && (direct_max != hwirq_max))))
return NULL;
domain = kzalloc_node(struct_size(domain, revmap, size),
GFP_KERNEL, of_node_to_nid(to_of_node(fwnode)));
if (!domain)
return NULL;
if (is_fwnode_irqchip(fwnode)) {
fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
switch (fwid->type) {
case IRQCHIP_FWNODE_NAMED:
case IRQCHIP_FWNODE_NAMED_ID:
domain->fwnode = fwnode;
domain->name = kstrdup(fwid->name, GFP_KERNEL);
if (!domain->name) {
kfree(domain);
return NULL;
}
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
break;
default:
domain->fwnode = fwnode;
domain->name = fwid->name;
break;
}
} else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
is_software_node(fwnode)) {
char *name;
/*
* fwnode paths contain '/', which debugfs is legitimately
* unhappy about. Replace them with ':', which does
* the trick and is not as offensive as '\'...
*/
name = kasprintf(GFP_KERNEL, "%pfw", fwnode);
if (!name) {
kfree(domain);
return NULL;
}
domain->name = strreplace(name, '/', ':');
domain->fwnode = fwnode;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
}
if (!domain->name) {
if (fwnode)
pr_err("Invalid fwnode type for irqdomain\n");
domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
atomic_inc_return(&unknown_domains));
if (!domain->name) {
kfree(domain);
return NULL;
}
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
}
fwnode_handle_get(fwnode);
fwnode_dev_initialized(fwnode, true);
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
domain->ops = ops;
domain->host_data = host_data;
domain->hwirq_max = hwirq_max;
if (direct_max)
domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP;
domain->revmap_size = size;
/*
* Hierarchical domains use the domain lock of the root domain
* (innermost domain).
*
* For non-hierarchical domains (as for root domains), the root
* pointer is set to the domain itself so that &domain->root->mutex
* always points to the right lock.
*/
mutex_init(&domain->mutex);
domain->root = domain;
irq_domain_check_hierarchy(domain);
return domain;
}
static void __irq_domain_publish(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
debugfs_add_domain_dir(domain);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
pr_debug("Added domain %s\n", domain->name);
}
/**
* __irq_domain_add() - Allocate a new irq_domain data structure
* @fwnode: firmware node for the interrupt controller
* @size: Size of linear map; 0 for radix mapping only
* @hwirq_max: Maximum number of interrupts supported by controller
* @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
* direct mapping
* @ops: domain callbacks
* @host_data: Controller private data pointer
*
* Allocates and initializes an irq_domain structure.
* Returns pointer to IRQ domain, or NULL on failure.
*/
struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
ops, host_data);
if (domain)
__irq_domain_publish(domain);
return domain;
}
EXPORT_SYMBOL_GPL(__irq_domain_add);
/**
* irq_domain_remove() - Remove an irq domain.
* @domain: domain to remove
*
* This routine is used to remove an irq domain. The caller must ensure
* that all mappings within the domain have been disposed of prior to
* use, depending on the revmap type.
*/
void irq_domain_remove(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
debugfs_remove_domain_dir(domain);
WARN_ON(!radix_tree_empty(&domain->revmap_tree));
list_del(&domain->link);
/*
* If the going away domain is the default one, reset it.
*/
if (unlikely(irq_default_domain == domain))
irq_set_default_host(NULL);
mutex_unlock(&irq_domain_mutex);
pr_debug("Removed domain %s\n", domain->name);
fwnode_dev_initialized(domain->fwnode, false);
fwnode_handle_put(domain->fwnode);
if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
kfree(domain->name);
kfree(domain);
}
EXPORT_SYMBOL_GPL(irq_domain_remove);
void irq_domain_update_bus_token(struct irq_domain *domain,
enum irq_domain_bus_token bus_token)
{
char *name;
if (domain->bus_token == bus_token)
return;
mutex_lock(&irq_domain_mutex);
domain->bus_token = bus_token;
name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
if (!name) {
mutex_unlock(&irq_domain_mutex);
return;
}
debugfs_remove_domain_dir(domain);
if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
kfree(domain->name);
else
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
domain->name = name;
debugfs_add_domain_dir(domain);
mutex_unlock(&irq_domain_mutex);
}
EXPORT_SYMBOL_GPL(irq_domain_update_bus_token);
/**
* irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs
* @fwnode: firmware node for the interrupt controller
* @size: total number of irqs in mapping
* @first_irq: first number of irq block assigned to the domain,
* pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
* pre-map all of the irqs in the domain to virqs starting at first_irq.
* @ops: domain callbacks
* @host_data: Controller private data pointer
*
* Allocates an irq_domain, and optionally if first_irq is positive then also
* allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
*
* This is intended to implement the expected behaviour for most
* interrupt controllers. If device tree is used, then first_irq will be 0 and
* irqs get mapped dynamically on the fly. However, if the controller requires
* static virq assignments (non-DT boot) then it will set that up correctly.
*/
struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
unsigned int size,
unsigned int first_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
domain = __irq_domain_add(fwnode, size, size, 0, ops, host_data);
if (!domain)
return NULL;
if (first_irq > 0) {
if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
/* attempt to allocated irq_descs */
int rc = irq_alloc_descs(first_irq, first_irq, size,
of_node_to_nid(to_of_node(fwnode)));
if (rc < 0)
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
first_irq);
}
irq_domain_associate_many(domain, first_irq, 0, size);
}
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_simple);
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in legacy mapping
* @first_irq: first number of irq block assigned to the domain
* @first_hwirq: first hwirq number to use for the translation. Should normally
* be '0', but a positive integer can be used if the effective
* hwirqs numbering does not begin at zero.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
* Note: the map() callback will be called before this function returns
* for all legacy interrupts except 0 (which is always the invalid irq for
* a legacy controller).
*/
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data)
{
return irq_domain_create_legacy(of_node_to_fwnode(of_node), size,
first_irq, first_hwirq, ops, host_data);
}
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
unsigned int size,
unsigned int first_irq,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
domain = __irq_domain_add(fwnode, first_hwirq + size, first_hwirq + size, 0, ops, host_data);
if (domain)
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
/**
* irq_find_matching_fwspec() - Locates a domain for a given fwspec
* @fwspec: FW specifier for an interrupt
* @bus_token: domain-specific data
*/
struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
struct irq_domain *h, *found = NULL;
struct fwnode_handle *fwnode = fwspec->fwnode;
int rc;
/* We might want to match the legacy controller last since
* it might potentially be set to match all interrupts in
* the absence of a device node. This isn't a problem so far
* yet though...
*
* bus_token == DOMAIN_BUS_ANY matches any domain, any other
* values must generate an exact match for the domain to be
* selected.
*/
mutex_lock(&irq_domain_mutex);
list_for_each_entry(h, &irq_domain_list, link) {
if (h->ops->select && fwspec->param_count)
rc = h->ops->select(h, fwspec, bus_token);
else if (h->ops->match)
rc = h->ops->match(h, to_of_node(fwnode), bus_token);
else
rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
((bus_token == DOMAIN_BUS_ANY) ||
(h->bus_token == bus_token)));
if (rc) {
found = h;
break;
}
}
mutex_unlock(&irq_domain_mutex);
return found;
}
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
/**
* irq_set_default_host() - Set a "default" irq domain
* @domain: default domain pointer
*
* For convenience, it's possible to set a "default" domain that will be used
* whenever NULL is passed to irq_create_mapping(). It makes life easier for
* platforms that want to manipulate a few hard coded interrupt numbers that
* aren't properly represented in the device-tree.
*/
void irq_set_default_host(struct irq_domain *domain)
{
pr_debug("Default domain set to @0x%p\n", domain);
irq_default_domain = domain;
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
/**
* irq_get_default_host() - Retrieve the "default" irq domain
*
* Returns: the default domain, if any.
*
* Modern code should never use this. This should only be used on
* systems that cannot implement a firmware->fwnode mapping (which
* both DT and ACPI provide).
*/
struct irq_domain *irq_get_default_host(void)
{
return irq_default_domain;
}
EXPORT_SYMBOL_GPL(irq_get_default_host);
static bool irq_domain_is_nomap(struct irq_domain *domain)
{
return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) &&
(domain->flags & IRQ_DOMAIN_FLAG_NO_MAP);
}
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
if (hwirq < domain->revmap_size)
rcu_assign_pointer(domain->revmap[hwirq], NULL);
else
radix_tree_delete(&domain->revmap_tree, hwirq);
}
static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
/*
* This also makes sure that all domains point to the same root when
* called from irq_domain_insert_irq() for each domain in a hierarchy.
*/
lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
if (hwirq < domain->revmap_size)
rcu_assign_pointer(domain->revmap[hwirq], irq_data);
else
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
}
static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq;
if (WARN(!irq_data || irq_data->domain != domain,
"virq%i doesn't exist; cannot disassociate\n", irq))
return;
hwirq = irq_data->hwirq;
mutex_lock(&domain->root->mutex);
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(irq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(irq);
/* Tell the PIC about it */
if (domain->ops->unmap)
domain->ops->unmap(domain, irq);
smp_mb();
irq_data->domain = NULL;
irq_data->hwirq = 0;
domain->mapcount--;
/* Clear reverse map for this hwirq */
irq_domain_clear_mapping(domain, hwirq);
mutex_unlock(&domain->root->mutex);
}
static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
int ret;
if (WARN(hwirq >= domain->hwirq_max,
"error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
return -EINVAL;
if (WARN(!irq_data, "error: virq%i is not allocated", virq))
return -EINVAL;
if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
return -EINVAL;
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map) {
ret = domain->ops->map(domain, virq, hwirq);
if (ret != 0) {
/*
* If map() returns -EPERM, this interrupt is protected
* by the firmware or some other service and shall not
* be mapped. Don't bother telling the user about it.
*/
if (ret != -EPERM) {
pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
domain->name, hwirq, virq, ret);
}
irq_data->domain = NULL;
irq_data->hwirq = 0;
return ret;
}
}
domain->mapcount++;
irq_domain_set_mapping(domain, hwirq, irq_data);
irq_clear_status_flags(virq, IRQ_NOREQUEST);
return 0;
}
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
int ret;
mutex_lock(&domain->root->mutex);
ret = irq_domain_associate_locked(domain, virq, hwirq);
mutex_unlock(&domain->root->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(irq_domain_associate);
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
struct device_node *of_node;
int i;
of_node = irq_domain_get_of_node(domain);
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++)
irq_domain_associate(domain, irq_base + i, hwirq_base + i);
}
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
#ifdef CONFIG_IRQ_DOMAIN_NOMAP
/**
* irq_create_direct_mapping() - Allocate an irq for direct mapping
* @domain: domain to allocate the irq for or NULL for default domain
*
* This routine is used for irq controllers which can choose the hardware
* interrupt numbers they generate. In such a case it's simplest to use
* the linux irq as the hardware interrupt number. It still uses the linear
* or radix tree to store the mapping, but the irq controller can optimize
* the revmap path by using the hwirq directly.
*/
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
{
struct device_node *of_node;
unsigned int virq;
if (domain == NULL)
domain = irq_default_domain;
of_node = irq_domain_get_of_node(domain);
virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
if (!virq) {
pr_debug("create_direct virq allocation failed\n");
return 0;
}
if (virq >= domain->hwirq_max) {
pr_err("ERROR: no free irqs available below %lu maximum\n",
domain->hwirq_max);
irq_free_desc(virq);
return 0;
}
pr_debug("create_direct obtained virq %d\n", virq);
if (irq_domain_associate(domain, virq, virq)) {
irq_free_desc(virq);
return 0;
}
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
#endif
static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
irq_hw_number_t hwirq,
const struct irq_affinity_desc *affinity)
{
struct device_node *of_node = irq_domain_get_of_node(domain);
int virq;
pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
/* Allocate a virtual interrupt number */
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
affinity);
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
}
if (irq_domain_associate_locked(domain, virq, hwirq)) {
irq_free_desc(virq);
return 0;
}
pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
hwirq, of_node_full_name(of_node), virq);
return virq;
}
/**
* irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
* @domain: domain owning this hardware interrupt or NULL for default domain
* @hwirq: hardware irq number in that domain space
* @affinity: irq affinity
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* irq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
irq_hw_number_t hwirq,
const struct irq_affinity_desc *affinity)
{
int virq;
/* Look for default domain if necessary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL) {
WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
return 0;
}
mutex_lock(&domain->root->mutex);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
pr_debug("existing mapping on virq %d\n", virq);
goto out;
}
virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
out:
mutex_unlock(&domain->root->mutex);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
static int irq_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
irq_hw_number_t *hwirq, unsigned int *type)
{
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (d->ops->translate)
return d->ops->translate(d, fwspec, hwirq, type);
#endif
if (d->ops->xlate)
return d->ops->xlate(d, to_of_node(fwspec->fwnode),
fwspec->param, fwspec->param_count,
hwirq, type);
/* If domain has no translation, then we assume interrupt line */
*hwirq = fwspec->param[0];
return 0;
}
void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
unsigned int count, struct irq_fwspec *fwspec)
{
int i;
fwspec->fwnode = of_node_to_fwnode(np);
fwspec->param_count = count;
for (i = 0; i < count; i++)
fwspec->param[i] = args[i];
}
EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec);
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
{
struct irq_domain *domain;
struct irq_data *irq_data;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
int virq;
if (fwspec->fwnode) {
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
if (!domain)
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
} else {
domain = irq_default_domain;
}
if (!domain) {
pr_warn("no irq domain found for %s !\n",
of_node_full_name(to_of_node(fwspec->fwnode)));
return 0;
}
if (irq_domain_translate(domain, fwspec, &hwirq, &type))
return 0;
/*
* WARN if the irqchip returns a type with bits
* outside the sense mask set and clear these bits.
*/
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
mutex_lock(&domain->root->mutex);
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
*/
virq = irq_find_mapping(domain, hwirq);
if (virq) {
/*
* If the trigger type is not specified or matches the
* current trigger type then we are done so return the
* interrupt number.
*/
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
goto out;
/*
* If the trigger type has not been set yet, then set
* it now and return the interrupt number.
*/
if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
irq_data = irq_get_irq_data(virq);
if (!irq_data) {
virq = 0;
goto out;
}
irqd_set_trigger_type(irq_data, type);
goto out;
}
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
virq = 0;
goto out;
}
if (irq_domain_is_hierarchy(domain)) {
virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
fwspec, false, NULL);
if (virq <= 0) {
virq = 0;
goto out;
}
} else {
/* Create mapping */
virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
if (!virq)
goto out;
}
irq_data = irq_get_irq_data(virq);
if (WARN_ON(!irq_data)) {
virq = 0;
goto out;
}
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
out:
mutex_unlock(&domain->root->mutex);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
{
struct irq_fwspec fwspec;
of_phandle_args_to_fwspec(irq_data->np, irq_data->args,
irq_data->args_count, &fwspec);
return irq_create_fwspec_mapping(&fwspec);
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
/**
* irq_dispose_mapping() - Unmap an interrupt
* @virq: linux irq number of the interrupt to unmap
*/
void irq_dispose_mapping(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_domain *domain;
if (!virq || !irq_data)
return;
domain = irq_data->domain;
if (WARN_ON(domain == NULL))
return;
if (irq_domain_is_hierarchy(domain)) {
irq_domain_free_irqs(virq, 1);
} else {
irq_domain_disassociate(domain, virq);
irq_free_desc(virq);
}
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
/**
* __irq_resolve_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
* @irq: optional pointer to return the Linux irq if required
*
* Returns the interrupt descriptor.
*/
struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
unsigned int *irq)
{
struct irq_desc *desc = NULL;
struct irq_data *data;
/* Look for default domain if necessary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL)
return desc;
if (irq_domain_is_nomap(domain)) {
if (hwirq < domain->hwirq_max) {
data = irq_domain_get_irq_data(domain, hwirq);
if (data && data->hwirq == hwirq)
desc = irq_data_to_desc(data);
if (irq && desc)
*irq = hwirq;
}
return desc;
}
rcu_read_lock();
/* Check if the hwirq is in the linear revmap. */
if (hwirq < domain->revmap_size)
data = rcu_dereference(domain->revmap[hwirq]);
else
data = radix_tree_lookup(&domain->revmap_tree, hwirq);
if (likely(data)) {
desc = irq_data_to_desc(data);
if (irq)
*irq = data->irq;
}
rcu_read_unlock();
return desc;
}
EXPORT_SYMBOL_GPL(__irq_resolve_mapping);
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
*
* Device Tree IRQ specifier translation function which works with one cell
* bindings where the cell value maps directly to the hwirq number.
*/
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 1))
return -EINVAL;
*out_hwirq = intspec[0];
*out_type = IRQ_TYPE_NONE;
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
/**
* irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
*
* Device Tree IRQ specifier translation function which works with two cell
* bindings where the cell values map directly to the hwirq number
* and linux irq flags.
*/
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
struct irq_fwspec fwspec;
of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec);
return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type);
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
/**
* irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
*
* Device Tree IRQ specifier translation function which works with either one
* or two cell bindings where the cell values map directly to the hwirq number
* and linux irq flags.
*
* Note: don't use this function unless your interrupt controller explicitly
* supports both one and two cell bindings. For the majority of controllers
* the _onecell() or _twocell() variants above should be used.
*/
int irq_domain_xlate_onetwocell(struct irq_domain *d,
struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 1))
return -EINVAL;
*out_hwirq = intspec[0];
if (intsize > 1)
*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
else
*out_type = IRQ_TYPE_NONE;
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
const struct irq_domain_ops irq_domain_simple_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
/**
* irq_domain_translate_onecell() - Generic translate for direct one cell
* bindings
*/
int irq_domain_translate_onecell(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (WARN_ON(fwspec->param_count < 1))
return -EINVAL;
*out_hwirq = fwspec->param[0];
*out_type = IRQ_TYPE_NONE;
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
/**
* irq_domain_translate_twocell() - Generic translate for direct two cell
* bindings
*
* Device Tree IRQ specifier translation function which works with two cell
* bindings where the cell values map directly to the hwirq number
* and linux irq flags.
*/
int irq_domain_translate_twocell(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (WARN_ON(fwspec->param_count < 2))
return -EINVAL;
*out_hwirq = fwspec->param[0];
*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
int node, const struct irq_affinity_desc *affinity)
{
unsigned int hint;
if (virq >= 0) {
virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
affinity);
} else {
hint = hwirq % nr_irqs;
if (hint == 0)
hint++;
virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
affinity);
if (virq <= 0 && hint > 1) {
virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
affinity);
}
}
return virq;
}
/**
* irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
* @irq_data: The pointer to irq_data
*/
void irq_domain_reset_irq_data(struct irq_data *irq_data)
{
irq_data->hwirq = 0;
irq_data->chip = &no_irq_chip;
irq_data->chip_data = NULL;
}
EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
* @parent: Parent irq domain to associate with the new domain
* @flags: Irq domain flags associated to the domain
* @size: Size of the domain. See below
* @fwnode: Optional fwnode of the interrupt controller
* @ops: Pointer to the interrupt domain callbacks
* @host_data: Controller private data pointer
*
* If @size is 0 a tree domain is created, otherwise a linear domain.
*
* If successful the parent is associated to the new domain and the
* domain flags are set.
* Returns pointer to IRQ domain, or NULL on failure.
*/
struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags,
unsigned int size,
struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
if (size)
domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
else
domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
if (domain) {
if (parent)
domain->root = parent->root;
domain->parent = parent;
domain->flags |= flags;
__irq_domain_publish(domain);
}
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
static void irq_domain_insert_irq(int virq)
{
struct irq_data *data;
for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
struct irq_domain *domain = data->domain;
domain->mapcount++;
irq_domain_set_mapping(domain, data->hwirq, data);
}
irq_clear_status_flags(virq, IRQ_NOREQUEST);
}
static void irq_domain_remove_irq(int virq)
{
struct irq_data *data;
irq_set_status_flags(virq, IRQ_NOREQUEST);
irq_set_chip_and_handler(virq, NULL, NULL);
synchronize_irq(virq);
smp_mb();
for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
struct irq_domain *domain = data->domain;
irq_hw_number_t hwirq = data->hwirq;
domain->mapcount--;
irq_domain_clear_mapping(domain, hwirq);
}
}
static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
struct irq_data *child)
{
struct irq_data *irq_data;
irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
irq_data_get_node(child));
if (irq_data) {
child->parent_data = irq_data;
irq_data->irq = child->irq;
irq_data->common = child->common;
irq_data->domain = domain;
}
return irq_data;
}
static void __irq_domain_free_hierarchy(struct irq_data *irq_data)
{
struct irq_data *tmp;
while (irq_data) {
tmp = irq_data;
irq_data = irq_data->parent_data;
kfree(tmp);
}
}
static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *irq_data, *tmp;
int i;
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_get_irq_data(virq + i);
tmp = irq_data->parent_data;
irq_data->parent_data = NULL;
irq_data->domain = NULL;
__irq_domain_free_hierarchy(tmp);
}
}
/**
* irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy
* @domain: IRQ domain from which the hierarchy is to be disconnected
* @virq: IRQ number where the hierarchy is to be trimmed
*
* Marks the @virq level belonging to @domain as disconnected.
* Returns -EINVAL if @virq doesn't have a valid irq_data pointing
* to @domain.
*
* Its only use is to be able to trim levels of hierarchy that do not
* have any real meaning for this interrupt, and that the driver marks
* as such from its .alloc() callback.
*/
int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
unsigned int virq)
{
struct irq_data *irqd;
irqd = irq_domain_get_irq_data(domain, virq);
if (!irqd)
return -EINVAL;
irqd->chip = ERR_PTR(-ENOTCONN);
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy);
static int irq_domain_trim_hierarchy(unsigned int virq)
{
struct irq_data *tail, *irqd, *irq_data;
irq_data = irq_get_irq_data(virq);
tail = NULL;
/* The first entry must have a valid irqchip */
if (!irq_data->chip || IS_ERR(irq_data->chip))
return -EINVAL;
/*
* Validate that the irq_data chain is sane in the presence of
* a hierarchy trimming marker.
*/
for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) {
/* Can't have a valid irqchip after a trim marker */
if (irqd->chip && tail)
return -EINVAL;
/* Can't have an empty irqchip before a trim marker */
if (!irqd->chip && !tail)
return -EINVAL;
if (IS_ERR(irqd->chip)) {
/* Only -ENOTCONN is a valid trim marker */
if (PTR_ERR(irqd->chip) != -ENOTCONN)
return -EINVAL;
tail = irq_data;
}
}
/* No trim marker, nothing to do */
if (!tail)
return 0;
pr_info("IRQ%d: trimming hierarchy from %s\n",
virq, tail->parent_data->domain->name);
/* Sever the inner part of the hierarchy... */
irqd = tail;
tail = tail->parent_data;
irqd->parent_data = NULL;
__irq_domain_free_hierarchy(tail);
return 0;
}
static int irq_domain_alloc_irq_data(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *irq_data;
struct irq_domain *parent;
int i;
/* The outermost irq_data is embedded in struct irq_desc */
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_get_irq_data(virq + i);
irq_data->domain = domain;
for (parent = domain->parent; parent; parent = parent->parent) {
irq_data = irq_domain_insert_irq_data(parent, irq_data);
if (!irq_data) {
irq_domain_free_irq_data(virq, i + 1);
return -ENOMEM;
}
}
}
return 0;
}
/**
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
* @domain: domain to match
* @virq: IRQ number to get irq_data
*/
struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq)
{
struct irq_data *irq_data;
for (irq_data = irq_get_irq_data(virq); irq_data;
irq_data = irq_data->parent_data)
if (irq_data->domain == domain)
return irq_data;
return NULL;
}
EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
/**
* irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
* @domain: Interrupt domain to match
* @virq: IRQ number
* @hwirq: The hwirq number
* @chip: The associated interrupt chip
* @chip_data: The associated chip data
*/
int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq,
const struct irq_chip *chip,
void *chip_data)
{
struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
if (!irq_data)
return -ENOENT;
irq_data->hwirq = hwirq;
irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip);
irq_data->chip_data = chip_data;
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
/**
* irq_domain_set_info - Set the complete data for a @virq in @domain
* @domain: Interrupt domain to match
* @virq: IRQ number
* @hwirq: The hardware interrupt number
* @chip: The associated interrupt chip
* @chip_data: The associated interrupt chip data
* @handler: The interrupt flow handler
* @handler_data: The interrupt flow handler data
* @handler_name: The interrupt handler name
*/
void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq, const struct irq_chip *chip,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name)
{
irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
__irq_set_handler(virq, handler, 0, handler_name);
irq_set_handler_data(virq, handler_data);
}
EXPORT_SYMBOL(irq_domain_set_info);
/**
* irq_domain_free_irqs_common - Clear irq_data and free the parent
* @domain: Interrupt domain to match
* @virq: IRQ number to start with
* @nr_irqs: The number of irqs to free
*/
void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *irq_data;
int i;
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
if (irq_data)
irq_domain_reset_irq_data(irq_data);
}
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
/**
* irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
* @domain: Interrupt domain to match
* @virq: IRQ number to start with
* @nr_irqs: The number of irqs to free
*/
void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
int i;
for (i = 0; i < nr_irqs; i++) {
irq_set_handler_data(virq + i, NULL);
irq_set_handler(virq + i, NULL);
}
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs)
{
unsigned int i;
if (!domain->ops->free)
return;
for (i = 0; i < nr_irqs; i++) {
if (irq_domain_get_irq_data(domain, irq_base + i))
domain->ops->free(domain, irq_base + i, 1);
}
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg)
{
if (!domain->ops->alloc) {
pr_debug("domain->ops->alloc() is NULL\n");
return -ENOSYS;
}
return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
}
static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct irq_affinity_desc *affinity)
{
int i, ret, virq;
if (realloc && irq_base >= 0) {
virq = irq_base;
} else {
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
affinity);
if (virq < 0) {
pr_debug("cannot allocate IRQ(base %d, count %d)\n",
irq_base, nr_irqs);
return virq;
}
}
if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
pr_debug("cannot allocate memory for IRQ%d\n", virq);
ret = -ENOMEM;
goto out_free_desc;
}
ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
if (ret < 0)
goto out_free_irq_data;
for (i = 0; i < nr_irqs; i++) {
ret = irq_domain_trim_hierarchy(virq + i);
if (ret)
goto out_free_irq_data;
}
for (i = 0; i < nr_irqs; i++)
irq_domain_insert_irq(virq + i);
return virq;
out_free_irq_data:
irq_domain_free_irq_data(virq, nr_irqs);
out_free_desc:
irq_free_descs(virq, nr_irqs);
return ret;
}
/**
* __irq_domain_alloc_irqs - Allocate IRQs from domain
* @domain: domain to allocate from
* @irq_base: allocate specified IRQ number if irq_base >= 0
* @nr_irqs: number of IRQs to allocate
* @node: NUMA node id for memory allocation
* @arg: domain specific argument
* @realloc: IRQ descriptors have already been allocated if true
* @affinity: Optional irq affinity mask for multiqueue devices
*
* Allocate IRQ numbers and initialized all data structures to support
* hierarchy IRQ domains.
* Parameter @realloc is mainly to support legacy IRQs.
* Returns error code or allocated IRQ number
*
* The whole process to setup an IRQ has been split into two steps.
* The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
* descriptor and required hardware resources. The second step,
* irq_domain_activate_irq(), is to program the hardware with preallocated
* resources. In this way, it's easier to rollback when failing to
* allocate resources.
*/
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct irq_affinity_desc *affinity)
{
int ret;
if (domain == NULL) {
domain = irq_default_domain;
if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
return -EINVAL;
}
mutex_lock(&domain->root->mutex);
ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
realloc, affinity);
mutex_unlock(&domain->root->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs);
/* The irq_data was moved, fix the revmap to refer to the new location */
static void irq_domain_fix_revmap(struct irq_data *d)
{
void __rcu **slot;
lockdep_assert_held(&d->domain->root->mutex);
if (irq_domain_is_nomap(d->domain))
return;
/* Fix up the revmap. */
if (d->hwirq < d->domain->revmap_size) {
/* Not using radix tree */
rcu_assign_pointer(d->domain->revmap[d->hwirq], d);
} else {
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
if (slot)
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
}
}
/**
* irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
* @domain: Domain to push.
* @virq: Irq to push the domain in to.
* @arg: Passed to the irq_domain_ops alloc() function.
*
* For an already existing irqdomain hierarchy, as might be obtained
* via a call to pci_enable_msix(), add an additional domain to the
* head of the processing chain. Must be called before request_irq()
* has been called.
*/
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_data *parent_irq_data;
struct irq_desc *desc;
int rv = 0;
/*
* Check that no action has been set, which indicates the virq
* is in a state where this function doesn't have to deal with
* races between interrupt handling and maintaining the
* hierarchy. This will catch gross misuse. Attempting to
* make the check race free would require holding locks across
* calls to struct irq_domain_ops->alloc(), which could lead
* to deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (WARN_ON(!irq_domain_is_hierarchy(domain)))
return -EINVAL;
if (!irq_data)
return -EINVAL;
if (domain->parent != irq_data->domain)
return -EINVAL;
parent_irq_data = kzalloc_node(sizeof(*parent_irq_data), GFP_KERNEL,
irq_data_get_node(irq_data));
if (!parent_irq_data)
return -ENOMEM;
mutex_lock(&domain->root->mutex);
/* Copy the original irq_data. */
*parent_irq_data = *irq_data;
/*
* Overwrite the irq_data, which is embedded in struct irq_desc, with
* values for this domain.
*/
irq_data->parent_data = parent_irq_data;
irq_data->domain = domain;
irq_data->mask = 0;
irq_data->hwirq = 0;
irq_data->chip = NULL;
irq_data->chip_data = NULL;
/* May (probably does) set hwirq, chip, etc. */
rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (rv) {
/* Restore the original irq_data. */
*irq_data = *parent_irq_data;
kfree(parent_irq_data);
goto error;
}
irq_domain_fix_revmap(parent_irq_data);
irq_domain_set_mapping(domain, irq_data->hwirq, irq_data);
error:
mutex_unlock(&domain->root->mutex);
return rv;
}
EXPORT_SYMBOL_GPL(irq_domain_push_irq);
/**
* irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
* @domain: Domain to remove.
* @virq: Irq to remove the domain from.
*
* Undo the effects of a call to irq_domain_push_irq(). Must be
* called either before request_irq() or after free_irq().
*/
int irq_domain_pop_irq(struct irq_domain *domain, int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_data *parent_irq_data;
struct irq_data *tmp_irq_data;
struct irq_desc *desc;
/*
* Check that no action is set, which indicates the virq is in
* a state where this function doesn't have to deal with races
* between interrupt handling and maintaining the hierarchy.
* This will catch gross misuse. Attempting to make the check
* race free would require holding locks across calls to
* struct irq_domain_ops->free(), which could lead to
* deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (!irq_data)
return -EINVAL;
tmp_irq_data = irq_domain_get_irq_data(domain, virq);
/* We can only "pop" if this domain is at the top of the list */
if (WARN_ON(irq_data != tmp_irq_data))
return -EINVAL;
if (WARN_ON(irq_data->domain != domain))
return -EINVAL;
parent_irq_data = irq_data->parent_data;
if (WARN_ON(!parent_irq_data))
return -EINVAL;
mutex_lock(&domain->root->mutex);
irq_data->parent_data = NULL;
irq_domain_clear_mapping(domain, irq_data->hwirq);
irq_domain_free_irqs_hierarchy(domain, virq, 1);
/* Restore the original irq_data. */
*irq_data = *parent_irq_data;
irq_domain_fix_revmap(irq_data);
mutex_unlock(&domain->root->mutex);
kfree(parent_irq_data);
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
/**
* irq_domain_free_irqs - Free IRQ number and associated data structures
* @virq: base IRQ number
* @nr_irqs: number of IRQs to free
*/
void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *data = irq_get_irq_data(virq);
struct irq_domain *domain;
int i;
if (WARN(!data || !data->domain || !data->domain->ops->free,
"NULL pointer, cannot free irq\n"))
return;
domain = data->domain;
mutex_lock(&domain->root->mutex);
for (i = 0; i < nr_irqs; i++)
irq_domain_remove_irq(virq + i);
irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs);
mutex_unlock(&domain->root->mutex);
irq_domain_free_irq_data(virq, nr_irqs);
irq_free_descs(virq, nr_irqs);
}
/**
* irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
* @domain: Domain below which interrupts must be allocated
* @irq_base: Base IRQ number
* @nr_irqs: Number of IRQs to allocate
* @arg: Allocation data (arch/domain specific)
*/
int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base, unsigned int nr_irqs,
void *arg)
{
if (!domain->parent)
return -ENOSYS;
return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
nr_irqs, arg);
}
EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
/**
* irq_domain_free_irqs_parent - Free interrupts from parent domain
* @domain: Domain below which interrupts must be freed
* @irq_base: Base IRQ number
* @nr_irqs: Number of IRQs to free
*/
void irq_domain_free_irqs_parent(struct irq_domain *domain,
unsigned int irq_base, unsigned int nr_irqs)
{
if (!domain->parent)
return;
irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
}
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
{
if (irq_data && irq_data->domain) {
struct irq_domain *domain = irq_data->domain;
if (domain->ops->deactivate)
domain->ops->deactivate(domain, irq_data);
if (irq_data->parent_data)
__irq_domain_deactivate_irq(irq_data->parent_data);
}
}
static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
{
int ret = 0;
if (irqd && irqd->domain) {
struct irq_domain *domain = irqd->domain;
if (irqd->parent_data)
ret = __irq_domain_activate_irq(irqd->parent_data,
reserve);
if (!ret && domain->ops->activate) {
ret = domain->ops->activate(domain, irqd, reserve);
/* Rollback in case of error */
if (ret && irqd->parent_data)
__irq_domain_deactivate_irq(irqd->parent_data);
}
}
return ret;
}
/**
* irq_domain_activate_irq - Call domain_ops->activate recursively to activate
* interrupt
* @irq_data: Outermost irq_data associated with interrupt
* @reserve: If set only reserve an interrupt vector instead of assigning one
*
* This is the second step to call domain_ops->activate to program interrupt
* controllers, so the interrupt could actually get delivered.
*/
int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
{
int ret = 0;
if (!irqd_is_activated(irq_data))
ret = __irq_domain_activate_irq(irq_data, reserve);
if (!ret)
irqd_set_activated(irq_data);
return ret;
}
/**
* irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
* deactivate interrupt
* @irq_data: outermost irq_data associated with interrupt
*
* It calls domain_ops->deactivate to program interrupt controllers to disable
* interrupt delivery.
*/
void irq_domain_deactivate_irq(struct irq_data *irq_data)
{
if (irqd_is_activated(irq_data)) {
__irq_domain_deactivate_irq(irq_data);
irqd_clr_activated(irq_data);
}
}
static void irq_domain_check_hierarchy(struct irq_domain *domain)
{
/* Hierarchy irq_domains must implement callback alloc() */
if (domain->ops->alloc)
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
/**
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
* @domain: domain to match
* @virq: IRQ number to get irq_data
*/
struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
/**
* irq_domain_set_info - Set the complete data for a @virq in @domain
* @domain: Interrupt domain to match
* @virq: IRQ number
* @hwirq: The hardware interrupt number
* @chip: The associated interrupt chip
* @chip_data: The associated interrupt chip data
* @handler: The interrupt flow handler
* @handler_data: The interrupt flow handler data
* @handler_name: The interrupt handler name
*/
void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq, const struct irq_chip *chip,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name)
{
irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
irq_set_chip_data(virq, chip_data);
irq_set_handler_data(virq, handler_data);
}
static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct irq_affinity_desc *affinity)
{
return -EINVAL;
}
static void irq_domain_check_hierarchy(struct irq_domain *domain)
{
}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
#include "internals.h"
static struct dentry *domain_dir;
static void
irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
{
seq_printf(m, "%*sname: %s\n", ind, "", d->name);
seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size);
seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
if (d->ops && d->ops->debug_show)
d->ops->debug_show(m, d, NULL, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!d->parent)
return;
seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
irq_domain_debug_show_one(m, d->parent, ind + 4);
#endif
}
static int irq_domain_debug_show(struct seq_file *m, void *p)
{
struct irq_domain *d = m->private;
/* Default domain? Might be NULL */
if (!d) {
if (!irq_default_domain)
return 0;
d = irq_default_domain;
}
irq_domain_debug_show_one(m, d, 0);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
static void debugfs_add_domain_dir(struct irq_domain *d)
{
if (!d->name || !domain_dir)
return;
debugfs_create_file(d->name, 0444, domain_dir, d,
&irq_domain_debug_fops);
}
static void debugfs_remove_domain_dir(struct irq_domain *d)
{
debugfs_lookup_and_remove(d->name, domain_dir);
}
void __init irq_domain_debugfs_init(struct dentry *root)
{
struct irq_domain *d;
domain_dir = debugfs_create_dir("domains", root);
debugfs_create_file("default", 0444, domain_dir, NULL,
&irq_domain_debug_fops);
mutex_lock(&irq_domain_mutex);
list_for_each_entry(d, &irq_domain_list, link)
debugfs_add_domain_dir(d);
mutex_unlock(&irq_domain_mutex);
}
#endif
| linux-master | kernel/irq/irqdomain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic cpu hotunplug interrupt migration code copied from the
* arch/arm implementation
*
* Copyright (C) Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/irq.h>
#include <linux/sched/isolation.h>
#include "internals.h"
/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
static inline bool irq_needs_fixup(struct irq_data *d)
{
const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
unsigned int cpu = smp_processor_id();
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
/*
* The cpumask_empty() check is a workaround for interrupt chips,
* which do not implement effective affinity, but the architecture has
* enabled the config switch. Use the general affinity mask instead.
*/
if (cpumask_empty(m))
m = irq_data_get_affinity_mask(d);
/*
* Sanity check. If the mask is not empty when excluding the outgoing
* CPU then it must contain at least one online CPU. The outgoing CPU
* has been removed from the online mask already.
*/
if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
/*
* If this happens then there was a missed IRQ fixup at some
* point. Warn about it and enforce fixup.
*/
pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
cpumask_pr_args(m), d->irq, cpu);
return true;
}
#endif
return cpumask_test_cpu(cpu, m);
}
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(d);
bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
const struct cpumask *affinity;
bool brokeaff = false;
int err;
/*
* IRQ chip might be already torn down, but the irq descriptor is
* still in the radix tree. Also if the chip has no affinity setter,
* nothing can be done here.
*/
if (!chip || !chip->irq_set_affinity) {
pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
return false;
}
/*
* No move required, if:
* - Interrupt is per cpu
* - Interrupt is not started
* - Affinity mask does not include this CPU.
*
* Note: Do not check desc->action as this might be a chained
* interrupt.
*/
if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
/*
* If an irq move is pending, abort it if the dying CPU is
* the sole target.
*/
irq_fixup_move_pending(desc, false);
return false;
}
/*
* Complete an eventually pending irq move cleanup. If this
* interrupt was moved in hard irq context, then the vectors need
* to be cleaned up. It can't wait until this interrupt actually
* happens and this CPU was involved.
*/
irq_force_complete_move(desc);
/*
* If there is a setaffinity pending, then try to reuse the pending
* mask, so the last change of the affinity does not get lost. If
* there is no move pending or the pending mask does not contain
* any online CPU, use the current affinity mask.
*/
if (irq_fixup_move_pending(desc, true))
affinity = irq_desc_get_pending_mask(desc);
else
affinity = irq_data_get_affinity_mask(d);
/* Mask the chip for interrupts which cannot move in process context */
if (maskchip && chip->irq_mask)
chip->irq_mask(d);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
/*
* If the interrupt is managed, then shut it down and leave
* the affinity untouched.
*/
if (irqd_affinity_is_managed(d)) {
irqd_set_managed_shutdown(d);
irq_shutdown_and_deactivate(desc);
return false;
}
affinity = cpu_online_mask;
brokeaff = true;
}
/*
* Do not set the force argument of irq_do_set_affinity() as this
* disables the masking of offline CPUs from the supplied affinity
* mask and therefore might keep/reassign the irq to the outgoing
* CPU.
*/
err = irq_do_set_affinity(d, affinity, false);
if (err) {
pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
d->irq, err);
brokeaff = false;
}
if (maskchip && chip->irq_unmask)
chip->irq_unmask(d);
return brokeaff;
}
/**
* irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void irq_migrate_all_off_this_cpu(void)
{
struct irq_desc *desc;
unsigned int irq;
for_each_active_irq(irq) {
bool affinity_broken;
desc = irq_to_desc(irq);
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken) {
pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
irq, smp_processor_id());
}
}
}
static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
{
const struct cpumask *hk_mask;
if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ))
return false;
hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
return false;
return cpumask_test_cpu(cpu, hk_mask);
}
static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(data);
if (!irqd_affinity_is_managed(data) || !desc->action ||
!irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
return;
if (irqd_is_managed_and_shutdown(data)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
return;
}
/*
* If the interrupt can only be directed to a single target
* CPU then it is already assigned to a CPU in the affinity
* mask. No point in trying to move it around unless the
* isolation mechanism requests to move it to an upcoming
* housekeeping CPU.
*/
if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
irq_set_affinity_locked(data, affinity, false);
}
/**
* irq_affinity_online_cpu - Restore affinity for managed interrupts
* @cpu: Upcoming CPU for which interrupts should be restored
*/
int irq_affinity_online_cpu(unsigned int cpu)
{
struct irq_desc *desc;
unsigned int irq;
irq_lock_sparse();
for_each_active_irq(irq) {
desc = irq_to_desc(irq);
raw_spin_lock_irq(&desc->lock);
irq_restore_affinity_of_irq(desc, cpu);
raw_spin_unlock_irq(&desc->lock);
}
irq_unlock_sparse();
return 0;
}
| linux-master | kernel/irq/cpuhotplug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the core interrupt handling code, for irq-chip based
* architectures. Detailed information is available in
* Documentation/core-api/genericirq.rst
*/
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
#include <trace/events/irq.h>
#include "internals.h"
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
return IRQ_NONE;
}
/*
* Chained handlers should never call action on their IRQ. This default
* action will emit warning if such thing happens.
*/
struct irqaction chained_action = {
.handler = bad_chained_irq,
};
/**
* irq_set_chip - set the irq chip for an irq
* @irq: irq number
* @chip: pointer to irq chip description structure
*/
int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
irq_put_desc_unlock(desc, flags);
/*
* For !CONFIG_SPARSE_IRQ make the irq show up in
* allocated_irqs.
*/
irq_mark_irq(irq);
return 0;
}
EXPORT_SYMBOL(irq_set_chip);
/**
* irq_set_irq_type - set the irq trigger type for an irq
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
int irq_set_irq_type(unsigned int irq, unsigned int type)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
return -EINVAL;
ret = __irq_set_trigger(desc, type);
irq_put_desc_busunlock(desc, flags);
return ret;
}
EXPORT_SYMBOL(irq_set_irq_type);
/**
* irq_set_handler_data - set irq handler data for an irq
* @irq: Interrupt number
* @data: Pointer to interrupt specific data
*
* Set the hardware irq controller data for an irq
*/
int irq_set_handler_data(unsigned int irq, void *data)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->irq_common_data.handler_data = data;
irq_put_desc_unlock(desc, flags);
return 0;
}
EXPORT_SYMBOL(irq_set_handler_data);
/**
* irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
* @irq_base: Interrupt number base
* @irq_offset: Interrupt number offset
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq at offset
*/
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->irq_common_data.msi_desc = entry;
if (entry && !irq_offset)
entry->irq = irq_base;
irq_put_desc_unlock(desc, flags);
return 0;
}
/**
* irq_set_msi_desc - set MSI descriptor data for an irq
* @irq: Interrupt number
* @entry: Pointer to MSI descriptor data
*
* Set the MSI descriptor entry for an irq
*/
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
return irq_set_msi_desc_off(irq, 0, entry);
}
/**
* irq_set_chip_data - set irq chip data for an irq
* @irq: Interrupt number
* @data: Pointer to chip specific data
*
* Set the hardware irq chip data for an irq
*/
int irq_set_chip_data(unsigned int irq, void *data)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->irq_data.chip_data = data;
irq_put_desc_unlock(desc, flags);
return 0;
}
EXPORT_SYMBOL(irq_set_chip_data);
struct irq_data *irq_get_irq_data(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);
static void irq_state_clr_disabled(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
}
static void irq_state_clr_masked(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
}
static void irq_state_clr_started(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
}
static void irq_state_set_started(struct irq_desc *desc)
{
irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
}
enum {
IRQ_STARTUP_NORMAL,
IRQ_STARTUP_MANAGED,
IRQ_STARTUP_ABORT,
};
#ifdef CONFIG_SMP
static int
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
bool force)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
if (!irqd_affinity_is_managed(d))
return IRQ_STARTUP_NORMAL;
irqd_clr_managed_shutdown(d);
if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
/*
* Catch code which fiddles with enable_irq() on a managed
* and potentially shutdown IRQ. Chained interrupt
* installment or irq auto probing should not happen on
* managed irqs either.
*/
if (WARN_ON_ONCE(force))
return IRQ_STARTUP_ABORT;
/*
* The interrupt was requested, but there is no online CPU
* in it's affinity mask. Put it into managed shutdown
* state and let the cpu hotplug mechanism start it up once
* a CPU in the mask becomes available.
*/
return IRQ_STARTUP_ABORT;
}
/*
* Managed interrupts have reserved resources, so this should not
* happen.
*/
if (WARN_ON(irq_domain_activate_irq(d, false)))
return IRQ_STARTUP_ABORT;
return IRQ_STARTUP_MANAGED;
}
#else
static __always_inline int
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
bool force)
{
return IRQ_STARTUP_NORMAL;
}
#endif
static int __irq_startup(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
int ret = 0;
/* Warn if this interrupt is not activated but try nevertheless */
WARN_ON_ONCE(!irqd_is_activated(d));
if (d->chip->irq_startup) {
ret = d->chip->irq_startup(d);
irq_state_clr_disabled(desc);
irq_state_clr_masked(desc);
} else {
irq_enable(desc);
}
irq_state_set_started(desc);
return ret;
}
int irq_startup(struct irq_desc *desc, bool resend, bool force)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *aff = irq_data_get_affinity_mask(d);
int ret = 0;
desc->depth = 0;
if (irqd_is_started(d)) {
irq_enable(desc);
} else {
switch (__irq_startup_managed(desc, aff, force)) {
case IRQ_STARTUP_NORMAL:
if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
irq_setup_affinity(desc);
ret = __irq_startup(desc);
if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
irq_do_set_affinity(d, aff, false);
ret = __irq_startup(desc);
break;
case IRQ_STARTUP_ABORT:
irqd_set_managed_shutdown(d);
return 0;
}
}
if (resend)
check_irq_resend(desc, false);
return ret;
}
int irq_activate(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
if (!irqd_affinity_is_managed(d))
return irq_domain_activate_irq(d, false);
return 0;
}
int irq_activate_and_startup(struct irq_desc *desc, bool resend)
{
if (WARN_ON(irq_activate(desc)))
return 0;
return irq_startup(desc, resend, IRQ_START_FORCE);
}
static void __irq_disable(struct irq_desc *desc, bool mask);
void irq_shutdown(struct irq_desc *desc)
{
if (irqd_is_started(&desc->irq_data)) {
clear_irq_resend(desc);
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown) {
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
irq_state_set_disabled(desc);
irq_state_set_masked(desc);
} else {
__irq_disable(desc, true);
}
irq_state_clr_started(desc);
}
}
void irq_shutdown_and_deactivate(struct irq_desc *desc)
{
irq_shutdown(desc);
/*
* This must be called even if the interrupt was never started up,
* because the activation can happen before the interrupt is
* available for request/startup. It has it's own state tracking so
* it's safe to call it unconditionally.
*/
irq_domain_deactivate_irq(&desc->irq_data);
}
void irq_enable(struct irq_desc *desc)
{
if (!irqd_irq_disabled(&desc->irq_data)) {
unmask_irq(desc);
} else {
irq_state_clr_disabled(desc);
if (desc->irq_data.chip->irq_enable) {
desc->irq_data.chip->irq_enable(&desc->irq_data);
irq_state_clr_masked(desc);
} else {
unmask_irq(desc);
}
}
}
static void __irq_disable(struct irq_desc *desc, bool mask)
{
if (irqd_irq_disabled(&desc->irq_data)) {
if (mask)
mask_irq(desc);
} else {
irq_state_set_disabled(desc);
if (desc->irq_data.chip->irq_disable) {
desc->irq_data.chip->irq_disable(&desc->irq_data);
irq_state_set_masked(desc);
} else if (mask) {
mask_irq(desc);
}
}
}
/**
* irq_disable - Mark interrupt disabled
* @desc: irq descriptor which should be disabled
*
* If the chip does not implement the irq_disable callback, we
* use a lazy disable approach. That means we mark the interrupt
* disabled, but leave the hardware unmasked. That's an
* optimization because we avoid the hardware access for the
* common case where no interrupt happens after we marked it
* disabled. If an interrupt happens, then the interrupt flow
* handler masks the line at the hardware level and marks it
* pending.
*
* If the interrupt chip does not implement the irq_disable callback,
* a driver can disable the lazy approach for a particular irq line by
* calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
* be used for devices which cannot disable the interrupt at the
* device level under certain circumstances and have to use
* disable_irq[_nosync] instead.
*/
void irq_disable(struct irq_desc *desc)
{
__irq_disable(desc, irq_settings_disable_unlazy(desc));
}
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
cpumask_set_cpu(cpu, desc->percpu_enabled);
}
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
cpumask_clear_cpu(cpu, desc->percpu_enabled);
}
static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask_ack) {
desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
irq_state_set_masked(desc);
} else {
mask_irq(desc);
if (desc->irq_data.chip->irq_ack)
desc->irq_data.chip->irq_ack(&desc->irq_data);
}
}
void mask_irq(struct irq_desc *desc)
{
if (irqd_irq_masked(&desc->irq_data))
return;
if (desc->irq_data.chip->irq_mask) {
desc->irq_data.chip->irq_mask(&desc->irq_data);
irq_state_set_masked(desc);
}
}
void unmask_irq(struct irq_desc *desc)
{
if (!irqd_irq_masked(&desc->irq_data))
return;
if (desc->irq_data.chip->irq_unmask) {
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
}
}
void unmask_threaded_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
if (chip->flags & IRQCHIP_EOI_THREADED)
chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
}
/*
* handle_nested_irq - Handle a nested irq from a irq thread
* @irq: the interrupt number
*
* Handle interrupts which are nested into a threaded interrupt
* handler. The handler function is called inside the calling
* threads context.
*/
void handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
irqreturn_t action_ret;
might_sleep();
raw_spin_lock_irq(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
raw_spin_unlock_irq(&desc->lock);
return;
}
kstat_incr_irqs_this_cpu(desc);
atomic_inc(&desc->threads_active);
raw_spin_unlock_irq(&desc->lock);
action_ret = IRQ_NONE;
for_each_action_of_desc(desc, action)
action_ret |= action->thread_fn(action->irq, action->dev_id);
if (!irq_settings_no_debug(desc))
note_interrupt(desc, action_ret);
wake_threads_waitq(desc);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
static bool irq_check_poll(struct irq_desc *desc)
{
if (!(desc->istate & IRQS_POLL_INPROGRESS))
return false;
return irq_wait_for_poll(desc);
}
static bool irq_may_run(struct irq_desc *desc)
{
unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
/*
* If the interrupt is not in progress and is not an armed
* wakeup interrupt, proceed.
*/
if (!irqd_has_set(&desc->irq_data, mask))
return true;
/*
* If the interrupt is an armed wakeup source, mark it pending
* and suspended, disable it and notify the pm core about the
* event.
*/
if (irq_pm_check_wakeup(desc))
return false;
/*
* Handle a potential concurrent poll on a different core.
*/
return irq_check_poll(desc);
}
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @desc: the interrupt description structure for this irq
*
* Simple interrupts are either sent from a demultiplexing interrupt
* handler or come from hardware, where no interrupt hardware control
* is necessary.
*
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
void handle_simple_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
/**
* handle_untracked_irq - Simple and software-decoded IRQs.
* @desc: the interrupt description structure for this irq
*
* Untracked interrupts are sent from a demultiplexing interrupt
* handler when the demultiplexer does not know which device it its
* multiplexed irq domain generated the interrupt. IRQ's handled
* through here are not subjected to stats tracking, randomness, or
* spurious interrupt detection.
*
* Note: Like handle_simple_irq, the caller is expected to handle
* the ack, clear, mask and unmask issues if necessary.
*/
void handle_untracked_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
desc->istate &= ~IRQS_PENDING;
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
__handle_irq_event_percpu(desc);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);
/*
* Called unconditionally from handle_level_irq() and only for oneshot
* interrupts from handle_fasteoi_irq()
*/
static void cond_unmask_irq(struct irq_desc *desc)
{
/*
* We need to unmask in the following cases:
* - Standard level irq (IRQF_ONESHOT is not set)
* - Oneshot irq which did not wake the thread (caused by a
* spurious interrupt or a primary handler handling it
* completely).
*/
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
unmask_irq(desc);
}
/**
* handle_level_irq - Level type irq handler
* @desc: the interrupt description structure for this irq
*
* Level type interrupts are active as long as the hardware line has
* the active level. This may require to mask the interrupt and unmask
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
void handle_level_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* keep it masked and get out of here
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
cond_unmask_irq(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
if (!(desc->istate & IRQS_ONESHOT)) {
chip->irq_eoi(&desc->irq_data);
return;
}
/*
* We need to unmask in the following cases:
* - Oneshot irq which did not wake the thread (caused by a
* spurious interrupt or a primary handler handling it
* completely).
*/
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
chip->irq_eoi(&desc->irq_data);
}
}
/**
* handle_fasteoi_irq - irq handler for transparent controllers
* @desc: the interrupt description structure for this irq
*
* Only a single callback will be issued to the chip: an ->eoi()
* call when the interrupt has been serviced. This enables support
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
void handle_fasteoi_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
/*
* When an affinity change races with IRQ handling, the next interrupt
* can arrive on the new CPU before the original CPU has completed
* handling the previous one - it may need to be resent.
*/
if (!irq_may_run(desc)) {
if (irqd_needs_resend_when_in_progress(&desc->irq_data))
desc->istate |= IRQS_PENDING;
goto out;
}
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
/*
* When the race described above happens this will resend the interrupt.
*/
if (unlikely(desc->istate & IRQS_PENDING))
check_irq_resend(desc, false);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
/**
* handle_fasteoi_nmi - irq handler for NMI interrupt lines
* @desc: the interrupt description structure for this irq
*
* A simple NMI-safe handler, considering the restrictions
* from request_nmi.
*
* Only a single callback will be issued to the chip: an ->eoi()
* call when the interrupt has been serviced. This enables support
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
void handle_fasteoi_nmi(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
__kstat_incr_irqs_this_cpu(desc);
trace_irq_handler_entry(irq, action);
/*
* NMIs cannot be shared, there is only one action.
*/
res = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, res);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
/**
* handle_edge_irq - edge type IRQ handler
* @desc: the interrupt description structure for this irq
*
* Interrupt occurs on the falling and/or rising edge of a hardware
* signal. The occurrence is latched into the irq controller hardware
* and must be acked in order to be reenabled. After the ack another
* interrupt can happen on the same source even before the first one
* is handled by the associated event handler. If this happens it
* might be necessary to disable (mask) the interrupt depending on the
* controller hardware. This requires to reenable the interrupt inside
* of the loop which handles the interrupts which have arrived while
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
void handle_edge_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (!irq_may_run(desc)) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
}
/*
* If its disabled or no action available then mask it and get
* out of here.
*/
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
do {
if (unlikely(!desc->action)) {
mask_irq(desc);
goto out_unlock;
}
/*
* When another irq arrived while we were handling
* one, we could have masked the irq.
* Reenable it, if it was not disabled in meantime.
*/
if (unlikely(desc->istate & IRQS_PENDING)) {
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data))
unmask_irq(desc);
}
handle_irq_event(desc);
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL(handle_edge_irq);
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
* handle_edge_eoi_irq - edge eoi type IRQ handler
* @desc: the interrupt description structure for this irq
*
* Similar as the above handle_edge_irq, but using eoi and w/o the
* mask/unmask logic.
*/
void handle_edge_eoi_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (!irq_may_run(desc)) {
desc->istate |= IRQS_PENDING;
goto out_eoi;
}
/*
* If its disabled or no action available then mask it and get
* out of here.
*/
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
desc->istate |= IRQS_PENDING;
goto out_eoi;
}
kstat_incr_irqs_this_cpu(desc);
do {
if (unlikely(!desc->action))
goto out_eoi;
handle_irq_event(desc);
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_eoi:
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
#endif
/**
* handle_percpu_irq - Per CPU local irq handler
* @desc: the interrupt description structure for this irq
*
* Per CPU interrupts on SMP machines without locking requirements
*/
void handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
/*
* PER CPU interrupts are not serialized. Do not touch
* desc->tot_count.
*/
__kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
handle_irq_event_percpu(desc);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
/**
* handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
* @desc: the interrupt description structure for this irq
*
* Per CPU interrupts on SMP machines without locking requirements. Same as
* handle_percpu_irq() above but with the following extras:
*
* action->percpu_dev_id is a pointer to percpu variables which
* contain the real device id for the cpu on which this handler is
* called
*/
void handle_percpu_devid_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
/*
* PER CPU interrupts are not serialized. Do not touch
* desc->tot_count.
*/
__kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
if (likely(action)) {
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
} else {
unsigned int cpu = smp_processor_id();
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
if (enabled)
irq_percpu_disable(desc, cpu);
pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
enabled ? " and unmasked" : "", irq, cpu);
}
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
/**
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
* dev ids
* @desc: the interrupt description structure for this irq
*
* Similar to handle_fasteoi_nmi, but handling the dev_id cookie
* as a percpu pointer.
*/
void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
__kstat_incr_irqs_this_cpu(desc);
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
static void
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
int is_chained, const char *name)
{
if (!handle) {
handle = handle_bad_irq;
} else {
struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/*
* With hierarchical domains we might run into a
* situation where the outermost chip is not yet set
* up, but the inner chips are there. Instead of
* bailing we install the handler, but obviously we
* cannot enable/startup the interrupt at this point.
*/
while (irq_data) {
if (irq_data->chip != &no_irq_chip)
break;
/*
* Bail out if the outer chip is not set up
* and the interrupt supposed to be started
* right away.
*/
if (WARN_ON(is_chained))
return;
/* Try the parent */
irq_data = irq_data->parent_data;
}
#endif
if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
return;
}
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->irq_data.chip != &no_irq_chip)
mask_ack_irq(desc);
irq_state_set_disabled(desc);
if (is_chained) {
desc->action = NULL;
WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
}
desc->depth = 1;
}
desc->handle_irq = handle;
desc->name = name;
if (handle != handle_bad_irq && is_chained) {
unsigned int type = irqd_get_trigger_type(&desc->irq_data);
/*
* We're about to start this interrupt immediately,
* hence the need to set the trigger configuration.
* But the .set_type callback may have overridden the
* flow handler, ignoring that we're dealing with a
* chained interrupt. Reset it immediately because we
* do know better.
*/
if (type != IRQ_TYPE_NONE) {
__irq_set_trigger(desc, type);
desc->handle_irq = handle;
}
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
desc->action = &chained_action;
WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
irq_activate_and_startup(desc, IRQ_RESEND);
}
}
void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return;
__irq_do_set_handler(desc, handle, is_chained, name);
irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL_GPL(__irq_set_handler);
void
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
void *data)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return;
desc->irq_common_data.handler_data = data;
__irq_do_set_handler(desc, handle, 1, NULL);
irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
void
irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name)
{
irq_set_chip(irq, chip);
__irq_set_handler(irq, handle, 0, name);
}
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
unsigned long flags, trigger, tmp;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return;
/*
* Warn when a driver sets the no autoenable flag on an already
* active interrupt.
*/
WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
irq_settings_clr_and_set(desc, clr, set);
trigger = irqd_get_trigger_type(&desc->irq_data);
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
if (irq_settings_has_no_balance_set(desc))
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
if (irq_settings_is_per_cpu(desc))
irqd_set(&desc->irq_data, IRQD_PER_CPU);
if (irq_settings_can_move_pcntxt(desc))
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
if (irq_settings_is_level(desc))
irqd_set(&desc->irq_data, IRQD_LEVEL);
tmp = irq_settings_get_trigger_mask(desc);
if (tmp != IRQ_TYPE_NONE)
trigger = tmp;
irqd_set(&desc->irq_data, trigger);
irq_put_desc_unlock(desc, flags);
}
EXPORT_SYMBOL_GPL(irq_modify_status);
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
/**
* irq_cpu_online - Invoke all irq_cpu_online functions.
*
* Iterate through all irqs and invoke the chip.irq_cpu_online()
* for each.
*/
void irq_cpu_online(void)
{
struct irq_desc *desc;
struct irq_chip *chip;
unsigned long flags;
unsigned int irq;
for_each_active_irq(irq) {
desc = irq_to_desc(irq);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
chip = irq_data_get_irq_chip(&desc->irq_data);
if (chip && chip->irq_cpu_online &&
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
!irqd_irq_disabled(&desc->irq_data)))
chip->irq_cpu_online(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
/**
* irq_cpu_offline - Invoke all irq_cpu_offline functions.
*
* Iterate through all irqs and invoke the chip.irq_cpu_offline()
* for each.
*/
void irq_cpu_offline(void)
{
struct irq_desc *desc;
struct irq_chip *chip;
unsigned long flags;
unsigned int irq;
for_each_active_irq(irq) {
desc = irq_to_desc(irq);
if (!desc)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
chip = irq_data_get_irq_chip(&desc->irq_data);
if (chip && chip->irq_cpu_offline &&
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
!irqd_irq_disabled(&desc->irq_data)))
chip->irq_cpu_offline(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
/**
* handle_fasteoi_ack_irq - irq handler for edge hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_ack() function
* called.
*/
void handle_fasteoi_ack_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
/**
* handle_fasteoi_mask_irq - irq handler for level hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_mask_ack() function
* called.
*/
void handle_fasteoi_mask_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/**
* irq_chip_set_parent_state - set the state of a parent interrupt.
*
* @data: Pointer to interrupt specific data
* @which: State to be restored (one of IRQCHIP_STATE_*)
* @val: Value corresponding to @which
*
* Conditional success, if the underlying irqchip does not implement it.
*/
int irq_chip_set_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool val)
{
data = data->parent_data;
if (!data || !data->chip->irq_set_irqchip_state)
return 0;
return data->chip->irq_set_irqchip_state(data, which, val);
}
EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
/**
* irq_chip_get_parent_state - get the state of a parent interrupt.
*
* @data: Pointer to interrupt specific data
* @which: one of IRQCHIP_STATE_* the caller wants to know
* @state: a pointer to a boolean where the state is to be stored
*
* Conditional success, if the underlying irqchip does not implement it.
*/
int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state)
{
data = data->parent_data;
if (!data || !data->chip->irq_get_irqchip_state)
return 0;
return data->chip->irq_get_irqchip_state(data, which, state);
}
EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
/**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL)
* @data: Pointer to interrupt specific data
*/
void irq_chip_enable_parent(struct irq_data *data)
{
data = data->parent_data;
if (data->chip->irq_enable)
data->chip->irq_enable(data);
else
data->chip->irq_unmask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
/**
* irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
* NULL)
* @data: Pointer to interrupt specific data
*/
void irq_chip_disable_parent(struct irq_data *data)
{
data = data->parent_data;
if (data->chip->irq_disable)
data->chip->irq_disable(data);
else
data->chip->irq_mask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
/**
* irq_chip_ack_parent - Acknowledge the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_ack_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_ack(data);
}
EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
/**
* irq_chip_mask_parent - Mask the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_mask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_mask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
/**
* irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_mask_ack_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_mask_ack(data);
}
EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
/**
* irq_chip_unmask_parent - Unmask the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_unmask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_unmask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
/**
* irq_chip_eoi_parent - Invoke EOI on the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_eoi_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_eoi(data);
}
EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
/**
* irq_chip_set_affinity_parent - Set affinity on the parent interrupt
* @data: Pointer to interrupt specific data
* @dest: The affinity mask to set
* @force: Flag to enforce setting (disable online checks)
*
* Conditional, as the underlying parent chip might not implement it.
*/
int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest, bool force)
{
data = data->parent_data;
if (data->chip->irq_set_affinity)
return data->chip->irq_set_affinity(data, dest, force);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
/**
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt
* @data: Pointer to interrupt specific data
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*
* Conditional, as the underlying parent chip might not implement it.
*/
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
{
data = data->parent_data;
if (data->chip->irq_set_type)
return data->chip->irq_set_type(data, type);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
*
* Iterate through the domain hierarchy of the interrupt and check
* whether a hw retrigger function exists. If yes, invoke it.
*/
int irq_chip_retrigger_hierarchy(struct irq_data *data)
{
for (data = data->parent_data; data; data = data->parent_data)
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
return 0;
}
EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
/**
* irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
* @data: Pointer to interrupt specific data
* @vcpu_info: The vcpu affinity information
*/
int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
{
data = data->parent_data;
if (data->chip->irq_set_vcpu_affinity)
return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
/**
* irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
* @data: Pointer to interrupt specific data
* @on: Whether to set or reset the wake-up capability of this irq
*
* Conditional, as the underlying parent chip might not implement it.
*/
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
data = data->parent_data;
if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
return 0;
if (data->chip->irq_set_wake)
return data->chip->irq_set_wake(data, on);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
/**
* irq_chip_request_resources_parent - Request resources on the parent interrupt
* @data: Pointer to interrupt specific data
*/
int irq_chip_request_resources_parent(struct irq_data *data)
{
data = data->parent_data;
if (data->chip->irq_request_resources)
return data->chip->irq_request_resources(data);
/* no error on missing optional irq_chip::irq_request_resources */
return 0;
}
EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
/**
* irq_chip_release_resources_parent - Release resources on the parent interrupt
* @data: Pointer to interrupt specific data
*/
void irq_chip_release_resources_parent(struct irq_data *data)
{
data = data->parent_data;
if (data->chip->irq_release_resources)
data->chip->irq_release_resources(data);
}
EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
#endif
/**
* irq_chip_compose_msi_msg - Compose msi message for a irq chip
* @data: Pointer to interrupt specific data
* @msg: Pointer to the MSI message
*
* For hierarchical domains we find the first chip in the hierarchy
* which implements the irq_compose_msi_msg callback. For non
* hierarchical we use the top level chip.
*/
int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct irq_data *pos;
for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
if (data->chip && data->chip->irq_compose_msi_msg)
pos = data;
}
if (!pos)
return -ENOSYS;
pos->chip->irq_compose_msi_msg(pos, msg);
return 0;
}
static struct device *irq_get_pm_device(struct irq_data *data)
{
if (data->domain)
return data->domain->pm_dev;
return NULL;
}
/**
* irq_chip_pm_get - Enable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Enable the power to the IRQ chip referenced by the interrupt data
* structure.
*/
int irq_chip_pm_get(struct irq_data *data)
{
struct device *dev = irq_get_pm_device(data);
int retval = 0;
if (IS_ENABLED(CONFIG_PM) && dev)
retval = pm_runtime_resume_and_get(dev);
return retval;
}
/**
* irq_chip_pm_put - Disable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Disable the power to the IRQ chip referenced by the interrupt data
* structure, belongs. Note that power will only be disabled, once this
* function has been called for all IRQs that have called irq_chip_pm_get().
*/
int irq_chip_pm_put(struct irq_data *data)
{
struct device *dev = irq_get_pm_device(data);
int retval = 0;
if (IS_ENABLED(CONFIG_PM) && dev)
retval = pm_runtime_put(dev);
return (retval < 0) ? retval : 0;
}
| linux-master | kernel/irq/chip.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the core interrupt handling code. Detailed
* information is available in Documentation/core-api/genericirq.rst
*
*/
#include <linux/irq.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <asm/irq_regs.h>
#include <trace/events/irq.h>
#include "internals.h"
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
#endif
/**
* handle_bad_irq - handle spurious and unhandled irqs
* @desc: description of the interrupt
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
void handle_bad_irq(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
print_irq_desc(irq, desc);
kstat_incr_irqs_this_cpu(desc);
ack_bad_irq(irq);
}
EXPORT_SYMBOL_GPL(handle_bad_irq);
/*
* Special, empty irq handler:
*/
irqreturn_t no_action(int cpl, void *dev_id)
{
return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(no_action);
static void warn_no_thread(unsigned int irq, struct irqaction *action)
{
if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
return;
printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
"but no thread function available.", irq, action->name);
}
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
{
/*
* In case the thread crashed and was killed we just pretend that
* we handled the interrupt. The hardirq handler has disabled the
* device interrupt, so no irq storm is lurking.
*/
if (action->thread->flags & PF_EXITING)
return;
/*
* Wake up the handler thread for this action. If the
* RUNTHREAD bit is already set, nothing to do.
*/
if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
return;
/*
* It's safe to OR the mask lockless here. We have only two
* places which write to threads_oneshot: This code and the
* irq thread.
*
* This code is the hard irq context and can never run on two
* cpus in parallel. If it ever does we have more serious
* problems than this bitmask.
*
* The irq threads of this irq which clear their "running" bit
* in threads_oneshot are serialized via desc->lock against
* each other and they are serialized against this code by
* IRQS_INPROGRESS.
*
* Hard irq handler:
*
* spin_lock(desc->lock);
* desc->state |= IRQS_INPROGRESS;
* spin_unlock(desc->lock);
* set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
* desc->threads_oneshot |= mask;
* spin_lock(desc->lock);
* desc->state &= ~IRQS_INPROGRESS;
* spin_unlock(desc->lock);
*
* irq thread:
*
* again:
* spin_lock(desc->lock);
* if (desc->state & IRQS_INPROGRESS) {
* spin_unlock(desc->lock);
* while(desc->state & IRQS_INPROGRESS)
* cpu_relax();
* goto again;
* }
* if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
* desc->threads_oneshot &= ~mask;
* spin_unlock(desc->lock);
*
* So either the thread waits for us to clear IRQS_INPROGRESS
* or we are waiting in the flow handler for desc->lock to be
* released before we reach this point. The thread also checks
* IRQTF_RUNTHREAD under desc->lock. If set it leaves
* threads_oneshot untouched and runs the thread another time.
*/
desc->threads_oneshot |= action->thread_mask;
/*
* We increment the threads_active counter in case we wake up
* the irq thread. The irq thread decrements the counter when
* it returns from the handler or in the exit path and wakes
* up waiters which are stuck in synchronize_irq() when the
* active count becomes zero. synchronize_irq() is serialized
* against this code (hard irq handler) via IRQS_INPROGRESS
* like the finalize_oneshot() code. See comment above.
*/
atomic_inc(&desc->threads_active);
wake_up_process(action->thread);
}
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval = IRQ_NONE;
unsigned int irq = desc->irq_data.irq;
struct irqaction *action;
record_irq_time(desc);
for_each_action_of_desc(desc, action) {
irqreturn_t res;
/*
* If this IRQ would be threaded under force_irqthreads, mark it so.
*/
if (irq_settings_can_thread(desc) &&
!(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
lockdep_hardirq_threaded();
trace_irq_handler_entry(irq, action);
res = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, res);
if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pS enabled interrupts\n",
irq, action->handler))
local_irq_disable();
switch (res) {
case IRQ_WAKE_THREAD:
/*
* Catch drivers which return WAKE_THREAD but
* did not set up a thread function
*/
if (unlikely(!action->thread_fn)) {
warn_no_thread(irq, action);
break;
}
__irq_wake_thread(desc, action);
break;
default:
break;
}
retval |= res;
}
return retval;
}
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval;
retval = __handle_irq_event_percpu(desc);
add_interrupt_randomness(desc->irq_data.irq);
if (!irq_settings_no_debug(desc))
note_interrupt(desc, retval);
return retval;
}
irqreturn_t handle_irq_event(struct irq_desc *desc)
{
irqreturn_t ret;
desc->istate &= ~IRQS_PENDING;
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
ret = handle_irq_event_percpu(desc);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
return ret;
}
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
if (handle_arch_irq)
return -EBUSY;
handle_arch_irq = handle_irq;
return 0;
}
/**
* generic_handle_arch_irq - root irq handler for architectures which do no
* entry accounting themselves
* @regs: Register file coming from the low-level handling code
*/
asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs)
{
struct pt_regs *old_regs;
irq_enter();
old_regs = set_irq_regs(regs);
handle_arch_irq(regs);
set_irq_regs(old_regs);
irq_exit();
}
#endif
| linux-master | kernel/irq/handle.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <[email protected]>
#define pr_fmt(fmt) "irq_timings: " fmt
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/math64.h>
#include <linux/log2.h>
#include <trace/events/irq.h>
#include "internals.h"
DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
DEFINE_PER_CPU(struct irq_timings, irq_timings);
static DEFINE_IDR(irqt_stats);
void irq_timings_enable(void)
{
static_branch_enable(&irq_timing_enabled);
}
void irq_timings_disable(void)
{
static_branch_disable(&irq_timing_enabled);
}
/*
* The main goal of this algorithm is to predict the next interrupt
* occurrence on the current CPU.
*
* Currently, the interrupt timings are stored in a circular array
* buffer every time there is an interrupt, as a tuple: the interrupt
* number and the associated timestamp when the event occurred <irq,
* timestamp>.
*
* For every interrupt occurring in a short period of time, we can
* measure the elapsed time between the occurrences for the same
* interrupt and we end up with a suite of intervals. The experience
* showed the interrupts are often coming following a periodic
* pattern.
*
* The objective of the algorithm is to find out this periodic pattern
* in a fastest way and use its period to predict the next irq event.
*
* When the next interrupt event is requested, we are in the situation
* where the interrupts are disabled and the circular buffer
* containing the timings is filled with the events which happened
* after the previous next-interrupt-event request.
*
* At this point, we read the circular buffer and we fill the irq
* related statistics structure. After this step, the circular array
* containing the timings is empty because all the values are
* dispatched in their corresponding buffers.
*
* Now for each interrupt, we can predict the next event by using the
* suffix array, log interval and exponential moving average
*
* 1. Suffix array
*
* Suffix array is an array of all the suffixes of a string. It is
* widely used as a data structure for compression, text search, ...
* For instance for the word 'banana', the suffixes will be: 'banana'
* 'anana' 'nana' 'ana' 'na' 'a'
*
* Usually, the suffix array is sorted but for our purpose it is
* not necessary and won't provide any improvement in the context of
* the solved problem where we clearly define the boundaries of the
* search by a max period and min period.
*
* The suffix array will build a suite of intervals of different
* length and will look for the repetition of each suite. If the suite
* is repeating then we have the period because it is the length of
* the suite whatever its position in the buffer.
*
* 2. Log interval
*
* We saw the irq timings allow to compute the interval of the
* occurrences for a specific interrupt. We can reasonably assume the
* longer is the interval, the higher is the error for the next event
* and we can consider storing those interval values into an array
* where each slot in the array correspond to an interval at the power
* of 2 of the index. For example, index 12 will contain values
* between 2^11 and 2^12.
*
* At the end we have an array of values where at each index defines a
* [2^index - 1, 2 ^ index] interval values allowing to store a large
* number of values inside a small array.
*
* For example, if we have the value 1123, then we store it at
* ilog2(1123) = 10 index value.
*
* Storing those value at the specific index is done by computing an
* exponential moving average for this specific slot. For instance,
* for values 1800, 1123, 1453, ... fall under the same slot (10) and
* the exponential moving average is computed every time a new value
* is stored at this slot.
*
* 3. Exponential Moving Average
*
* The EMA is largely used to track a signal for stocks or as a low
* pass filter. The magic of the formula, is it is very simple and the
* reactivity of the average can be tuned with the factors called
* alpha.
*
* The higher the alphas are, the faster the average respond to the
* signal change. In our case, if a slot in the array is a big
* interval, we can have numbers with a big difference between
* them. The impact of those differences in the average computation
* can be tuned by changing the alpha value.
*
*
* -- The algorithm --
*
* We saw the different processing above, now let's see how they are
* used together.
*
* For each interrupt:
* For each interval:
* Compute the index = ilog2(interval)
* Compute a new_ema(buffer[index], interval)
* Store the index in a circular buffer
*
* Compute the suffix array of the indexes
*
* For each suffix:
* If the suffix is reverse-found 3 times
* Return suffix
*
* Return Not found
*
* However we can not have endless suffix array to be build, it won't
* make sense and it will add an extra overhead, so we can restrict
* this to a maximum suffix length of 5 and a minimum suffix length of
* 2. The experience showed 5 is the majority of the maximum pattern
* period found for different devices.
*
* The result is a pattern finding less than 1us for an interrupt.
*
* Example based on real values:
*
* Example 1 : MMC write/read interrupt interval:
*
* 223947, 1240, 1384, 1386, 1386,
* 217416, 1236, 1384, 1386, 1387,
* 214719, 1241, 1386, 1387, 1384,
* 213696, 1234, 1384, 1386, 1388,
* 219904, 1240, 1385, 1389, 1385,
* 212240, 1240, 1386, 1386, 1386,
* 214415, 1236, 1384, 1386, 1387,
* 214276, 1234, 1384, 1388, ?
*
* For each element, apply ilog2(value)
*
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, ?
*
* Max period of 5, we take the last (max_period * 3) 15 elements as
* we can be confident if the pattern repeats itself three times it is
* a repeating pattern.
*
* 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, 8,
* 15, 8, 8, 8, ?
*
* Suffixes are:
*
* 1) 8, 15, 8, 8, 8 <- max period
* 2) 8, 15, 8, 8
* 3) 8, 15, 8
* 4) 8, 15 <- min period
*
* From there we search the repeating pattern for each suffix.
*
* buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8
* | | | | | | | | | | | | | | |
* 8, 15, 8, 8, 8 | | | | | | | | | |
* 8, 15, 8, 8, 8 | | | | |
* 8, 15, 8, 8, 8
*
* When moving the suffix, we found exactly 3 matches.
*
* The first suffix with period 5 is repeating.
*
* The next event is (3 * max_period) % suffix_period
*
* In this example, the result 0, so the next event is suffix[0] => 8
*
* However, 8 is the index in the array of exponential moving average
* which was calculated on the fly when storing the values, so the
* interval is ema[8] = 1366
*
*
* Example 2:
*
* 4, 3, 5, 100,
* 3, 3, 5, 117,
* 4, 4, 5, 112,
* 4, 3, 4, 110,
* 3, 5, 3, 117,
* 4, 4, 5, 112,
* 4, 3, 4, 110,
* 3, 4, 5, 112,
* 4, 3, 4, 110
*
* ilog2
*
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4
*
* Max period 5:
* 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4,
* 0, 0, 0, 4
*
* Suffixes:
*
* 1) 0, 0, 4, 0, 0
* 2) 0, 0, 4, 0
* 3) 0, 0, 4
* 4) 0, 0
*
* buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
* | | | | | | X
* 0, 0, 4, 0, 0, | X
* 0, 0
*
* buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
* | | | | | | | | | | | | | | |
* 0, 0, 4, 0, | | | | | | | | | | |
* 0, 0, 4, 0, | | | | | | |
* 0, 0, 4, 0, | | |
* 0 0 4
*
* Pattern is found 3 times, the remaining is 1 which results from
* (max_period * 3) % suffix_period. This value is the index in the
* suffix arrays. The suffix array for a period 4 has the value 4
* at index 1.
*/
#define EMA_ALPHA_VAL 64
#define EMA_ALPHA_SHIFT 7
#define PREDICTION_PERIOD_MIN 3
#define PREDICTION_PERIOD_MAX 5
#define PREDICTION_FACTOR 4
#define PREDICTION_MAX 10 /* 2 ^ PREDICTION_MAX useconds */
#define PREDICTION_BUFFER_SIZE 16 /* slots for EMAs, hardly more than 16 */
/*
* Number of elements in the circular buffer: If it happens it was
* flushed before, then the number of elements could be smaller than
* IRQ_TIMINGS_SIZE, so the count is used, otherwise the array size is
* used as we wrapped. The index begins from zero when we did not
* wrap. That could be done in a nicer way with the proper circular
* array structure type but with the cost of extra computation in the
* interrupt handler hot path. We choose efficiency.
*/
#define for_each_irqts(i, irqts) \
for (i = irqts->count < IRQ_TIMINGS_SIZE ? \
0 : irqts->count & IRQ_TIMINGS_MASK, \
irqts->count = min(IRQ_TIMINGS_SIZE, \
irqts->count); \
irqts->count > 0; irqts->count--, \
i = (i + 1) & IRQ_TIMINGS_MASK)
struct irqt_stat {
u64 last_ts;
u64 ema_time[PREDICTION_BUFFER_SIZE];
int timings[IRQ_TIMINGS_SIZE];
int circ_timings[IRQ_TIMINGS_SIZE];
int count;
};
/*
* Exponential moving average computation
*/
static u64 irq_timings_ema_new(u64 value, u64 ema_old)
{
s64 diff;
if (unlikely(!ema_old))
return value;
diff = (value - ema_old) * EMA_ALPHA_VAL;
/*
* We can use a s64 type variable to be added with the u64
* ema_old variable as this one will never have its topmost
* bit set, it will be always smaller than 2^63 nanosec
* interrupt interval (292 years).
*/
return ema_old + (diff >> EMA_ALPHA_SHIFT);
}
static int irq_timings_next_event_index(int *buffer, size_t len, int period_max)
{
int period;
/*
* Move the beginning pointer to the end minus the max period x 3.
* We are at the point we can begin searching the pattern
*/
buffer = &buffer[len - (period_max * 3)];
/* Adjust the length to the maximum allowed period x 3 */
len = period_max * 3;
/*
* The buffer contains the suite of intervals, in a ilog2
* basis, we are looking for a repetition. We point the
* beginning of the search three times the length of the
* period beginning at the end of the buffer. We do that for
* each suffix.
*/
for (period = period_max; period >= PREDICTION_PERIOD_MIN; period--) {
/*
* The first comparison always succeed because the
* suffix is deduced from the first n-period bytes of
* the buffer and we compare the initial suffix with
* itself, so we can skip the first iteration.
*/
int idx = period;
size_t size = period;
/*
* We look if the suite with period 'i' repeat
* itself. If it is truncated at the end, as it
* repeats we can use the period to find out the next
* element with the modulo.
*/
while (!memcmp(buffer, &buffer[idx], size * sizeof(int))) {
/*
* Move the index in a period basis
*/
idx += size;
/*
* If this condition is reached, all previous
* memcmp were successful, so the period is
* found.
*/
if (idx == len)
return buffer[len % period];
/*
* If the remaining elements to compare are
* smaller than the period, readjust the size
* of the comparison for the last iteration.
*/
if (len - idx < period)
size = len - idx;
}
}
return -1;
}
static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
{
int index, i, period_max, count, start, min = INT_MAX;
if ((now - irqs->last_ts) >= NSEC_PER_SEC) {
irqs->count = irqs->last_ts = 0;
return U64_MAX;
}
/*
* As we want to find three times the repetition, we need a
* number of intervals greater or equal to three times the
* maximum period, otherwise we truncate the max period.
*/
period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ?
PREDICTION_PERIOD_MAX : irqs->count / 3;
/*
* If we don't have enough irq timings for this prediction,
* just bail out.
*/
if (period_max <= PREDICTION_PERIOD_MIN)
return U64_MAX;
/*
* 'count' will depends if the circular buffer wrapped or not
*/
count = irqs->count < IRQ_TIMINGS_SIZE ?
irqs->count : IRQ_TIMINGS_SIZE;
start = irqs->count < IRQ_TIMINGS_SIZE ?
0 : (irqs->count & IRQ_TIMINGS_MASK);
/*
* Copy the content of the circular buffer into another buffer
* in order to linearize the buffer instead of dealing with
* wrapping indexes and shifted array which will be prone to
* error and extremely difficult to debug.
*/
for (i = 0; i < count; i++) {
int index = (start + i) & IRQ_TIMINGS_MASK;
irqs->timings[i] = irqs->circ_timings[index];
min = min_t(int, irqs->timings[i], min);
}
index = irq_timings_next_event_index(irqs->timings, count, period_max);
if (index < 0)
return irqs->last_ts + irqs->ema_time[min];
return irqs->last_ts + irqs->ema_time[index];
}
static __always_inline int irq_timings_interval_index(u64 interval)
{
/*
* The PREDICTION_FACTOR increase the interval size for the
* array of exponential average.
*/
u64 interval_us = (interval >> 10) / PREDICTION_FACTOR;
return likely(interval_us) ? ilog2(interval_us) : 0;
}
static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
u64 interval)
{
int index;
/*
* Get the index in the ema table for this interrupt.
*/
index = irq_timings_interval_index(interval);
if (index > PREDICTION_BUFFER_SIZE - 1) {
irqs->count = 0;
return;
}
/*
* Store the index as an element of the pattern in another
* circular array.
*/
irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index;
irqs->ema_time[index] = irq_timings_ema_new(interval,
irqs->ema_time[index]);
irqs->count++;
}
static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
{
u64 old_ts = irqs->last_ts;
u64 interval;
/*
* The timestamps are absolute time values, we need to compute
* the timing interval between two interrupts.
*/
irqs->last_ts = ts;
/*
* The interval type is u64 in order to deal with the same
* type in our computation, that prevent mindfuck issues with
* overflow, sign and division.
*/
interval = ts - old_ts;
/*
* The interrupt triggered more than one second apart, that
* ends the sequence as predictable for our purpose. In this
* case, assume we have the beginning of a sequence and the
* timestamp is the first value. As it is impossible to
* predict anything at this point, return.
*
* Note the first timestamp of the sequence will always fall
* in this test because the old_ts is zero. That is what we
* want as we need another timestamp to compute an interval.
*/
if (interval >= NSEC_PER_SEC) {
irqs->count = 0;
return;
}
__irq_timings_store(irq, irqs, interval);
}
/**
* irq_timings_next_event - Return when the next event is supposed to arrive
*
* During the last busy cycle, the number of interrupts is incremented
* and stored in the irq_timings structure. This information is
* necessary to:
*
* - know if the index in the table wrapped up:
*
* If more than the array size interrupts happened during the
* last busy/idle cycle, the index wrapped up and we have to
* begin with the next element in the array which is the last one
* in the sequence, otherwise it is at the index 0.
*
* - have an indication of the interrupts activity on this CPU
* (eg. irq/sec)
*
* The values are 'consumed' after inserting in the statistical model,
* thus the count is reinitialized.
*
* The array of values **must** be browsed in the time direction, the
* timestamp must increase between an element and the next one.
*
* Returns a nanosec time based estimation of the earliest interrupt,
* U64_MAX otherwise.
*/
u64 irq_timings_next_event(u64 now)
{
struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
struct irqt_stat *irqs;
struct irqt_stat __percpu *s;
u64 ts, next_evt = U64_MAX;
int i, irq = 0;
/*
* This function must be called with the local irq disabled in
* order to prevent the timings circular buffer to be updated
* while we are reading it.
*/
lockdep_assert_irqs_disabled();
if (!irqts->count)
return next_evt;
/*
* Number of elements in the circular buffer: If it happens it
* was flushed before, then the number of elements could be
* smaller than IRQ_TIMINGS_SIZE, so the count is used,
* otherwise the array size is used as we wrapped. The index
* begins from zero when we did not wrap. That could be done
* in a nicer way with the proper circular array structure
* type but with the cost of extra computation in the
* interrupt handler hot path. We choose efficiency.
*
* Inject measured irq/timestamp to the pattern prediction
* model while decrementing the counter because we consume the
* data from our circular buffer.
*/
for_each_irqts(i, irqts) {
irq = irq_timing_decode(irqts->values[i], &ts);
s = idr_find(&irqt_stats, irq);
if (s)
irq_timings_store(irq, this_cpu_ptr(s), ts);
}
/*
* Look in the list of interrupts' statistics, the earliest
* next event.
*/
idr_for_each_entry(&irqt_stats, s, i) {
irqs = this_cpu_ptr(s);
ts = __irq_timings_next_event(irqs, i, now);
if (ts <= now)
return now;
if (ts < next_evt)
next_evt = ts;
}
return next_evt;
}
void irq_timings_free(int irq)
{
struct irqt_stat __percpu *s;
s = idr_find(&irqt_stats, irq);
if (s) {
free_percpu(s);
idr_remove(&irqt_stats, irq);
}
}
int irq_timings_alloc(int irq)
{
struct irqt_stat __percpu *s;
int id;
/*
* Some platforms can have the same private interrupt per cpu,
* so this function may be called several times with the
* same interrupt number. Just bail out in case the per cpu
* stat structure is already allocated.
*/
s = idr_find(&irqt_stats, irq);
if (s)
return 0;
s = alloc_percpu(*s);
if (!s)
return -ENOMEM;
idr_preload(GFP_KERNEL);
id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
free_percpu(s);
return id;
}
return 0;
}
#ifdef CONFIG_TEST_IRQ_TIMINGS
struct timings_intervals {
u64 *intervals;
size_t count;
};
/*
* Intervals are given in nanosecond base
*/
static u64 intervals0[] __initdata = {
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000, 500000,
10000, 50000, 200000,
};
static u64 intervals1[] __initdata = {
223947000, 1240000, 1384000, 1386000, 1386000,
217416000, 1236000, 1384000, 1386000, 1387000,
214719000, 1241000, 1386000, 1387000, 1384000,
213696000, 1234000, 1384000, 1386000, 1388000,
219904000, 1240000, 1385000, 1389000, 1385000,
212240000, 1240000, 1386000, 1386000, 1386000,
214415000, 1236000, 1384000, 1386000, 1387000,
214276000, 1234000,
};
static u64 intervals2[] __initdata = {
4000, 3000, 5000, 100000,
3000, 3000, 5000, 117000,
4000, 4000, 5000, 112000,
4000, 3000, 4000, 110000,
3000, 5000, 3000, 117000,
4000, 4000, 5000, 112000,
4000, 3000, 4000, 110000,
3000, 4000, 5000, 112000,
4000,
};
static u64 intervals3[] __initdata = {
1385000, 212240000, 1240000,
1386000, 214415000, 1236000,
1384000, 214276000, 1234000,
1386000, 214415000, 1236000,
1385000, 212240000, 1240000,
1386000, 214415000, 1236000,
1384000, 214276000, 1234000,
1386000, 214415000, 1236000,
1385000, 212240000, 1240000,
};
static u64 intervals4[] __initdata = {
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000, 50000, 10000, 50000,
10000,
};
static struct timings_intervals tis[] __initdata = {
{ intervals0, ARRAY_SIZE(intervals0) },
{ intervals1, ARRAY_SIZE(intervals1) },
{ intervals2, ARRAY_SIZE(intervals2) },
{ intervals3, ARRAY_SIZE(intervals3) },
{ intervals4, ARRAY_SIZE(intervals4) },
};
static int __init irq_timings_test_next_index(struct timings_intervals *ti)
{
int _buffer[IRQ_TIMINGS_SIZE];
int buffer[IRQ_TIMINGS_SIZE];
int index, start, i, count, period_max;
count = ti->count - 1;
period_max = count > (3 * PREDICTION_PERIOD_MAX) ?
PREDICTION_PERIOD_MAX : count / 3;
/*
* Inject all values except the last one which will be used
* to compare with the next index result.
*/
pr_debug("index suite: ");
for (i = 0; i < count; i++) {
index = irq_timings_interval_index(ti->intervals[i]);
_buffer[i & IRQ_TIMINGS_MASK] = index;
pr_cont("%d ", index);
}
start = count < IRQ_TIMINGS_SIZE ? 0 :
count & IRQ_TIMINGS_MASK;
count = min_t(int, count, IRQ_TIMINGS_SIZE);
for (i = 0; i < count; i++) {
int index = (start + i) & IRQ_TIMINGS_MASK;
buffer[i] = _buffer[index];
}
index = irq_timings_next_event_index(buffer, count, period_max);
i = irq_timings_interval_index(ti->intervals[ti->count - 1]);
if (index != i) {
pr_err("Expected (%d) and computed (%d) next indexes differ\n",
i, index);
return -EINVAL;
}
return 0;
}
static int __init irq_timings_next_index_selftest(void)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(tis); i++) {
pr_info("---> Injecting intervals number #%d (count=%zd)\n",
i, tis[i].count);
ret = irq_timings_test_next_index(&tis[i]);
if (ret)
break;
}
return ret;
}
static int __init irq_timings_test_irqs(struct timings_intervals *ti)
{
struct irqt_stat __percpu *s;
struct irqt_stat *irqs;
int i, index, ret, irq = 0xACE5;
ret = irq_timings_alloc(irq);
if (ret) {
pr_err("Failed to allocate irq timings\n");
return ret;
}
s = idr_find(&irqt_stats, irq);
if (!s) {
ret = -EIDRM;
goto out;
}
irqs = this_cpu_ptr(s);
for (i = 0; i < ti->count; i++) {
index = irq_timings_interval_index(ti->intervals[i]);
pr_debug("%d: interval=%llu ema_index=%d\n",
i, ti->intervals[i], index);
__irq_timings_store(irq, irqs, ti->intervals[i]);
if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
ret = -EBADSLT;
pr_err("Failed to store in the circular buffer\n");
goto out;
}
}
if (irqs->count != ti->count) {
ret = -ERANGE;
pr_err("Count differs\n");
goto out;
}
ret = 0;
out:
irq_timings_free(irq);
return ret;
}
static int __init irq_timings_irqs_selftest(void)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(tis); i++) {
pr_info("---> Injecting intervals number #%d (count=%zd)\n",
i, tis[i].count);
ret = irq_timings_test_irqs(&tis[i]);
if (ret)
break;
}
return ret;
}
static int __init irq_timings_test_irqts(struct irq_timings *irqts,
unsigned count)
{
int start = count >= IRQ_TIMINGS_SIZE ? count - IRQ_TIMINGS_SIZE : 0;
int i, irq, oirq = 0xBEEF;
u64 ots = 0xDEAD, ts;
/*
* Fill the circular buffer by using the dedicated function.
*/
for (i = 0; i < count; i++) {
pr_debug("%d: index=%d, ts=%llX irq=%X\n",
i, i & IRQ_TIMINGS_MASK, ots + i, oirq + i);
irq_timings_push(ots + i, oirq + i);
}
/*
* Compute the first elements values after the index wrapped
* up or not.
*/
ots += start;
oirq += start;
/*
* Test the circular buffer count is correct.
*/
pr_debug("---> Checking timings array count (%d) is right\n", count);
if (WARN_ON(irqts->count != count))
return -EINVAL;
/*
* Test the macro allowing to browse all the irqts.
*/
pr_debug("---> Checking the for_each_irqts() macro\n");
for_each_irqts(i, irqts) {
irq = irq_timing_decode(irqts->values[i], &ts);
pr_debug("index=%d, ts=%llX / %llX, irq=%X / %X\n",
i, ts, ots, irq, oirq);
if (WARN_ON(ts != ots || irq != oirq))
return -EINVAL;
ots++; oirq++;
}
/*
* The circular buffer should have be flushed when browsed
* with for_each_irqts
*/
pr_debug("---> Checking timings array is empty after browsing it\n");
if (WARN_ON(irqts->count))
return -EINVAL;
return 0;
}
static int __init irq_timings_irqts_selftest(void)
{
struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
int i, ret;
/*
* Test the circular buffer with different number of
* elements. The purpose is to test at the limits (empty, half
* full, full, wrapped with the cursor at the boundaries,
* wrapped several times, etc ...
*/
int count[] = { 0,
IRQ_TIMINGS_SIZE >> 1,
IRQ_TIMINGS_SIZE,
IRQ_TIMINGS_SIZE + (IRQ_TIMINGS_SIZE >> 1),
2 * IRQ_TIMINGS_SIZE,
(2 * IRQ_TIMINGS_SIZE) + 3,
};
for (i = 0; i < ARRAY_SIZE(count); i++) {
pr_info("---> Checking the timings with %d/%d values\n",
count[i], IRQ_TIMINGS_SIZE);
ret = irq_timings_test_irqts(irqts, count[i]);
if (ret)
break;
}
return ret;
}
static int __init irq_timings_selftest(void)
{
int ret;
pr_info("------------------- selftest start -----------------\n");
/*
* At this point, we don't except any subsystem to use the irq
* timings but us, so it should not be enabled.
*/
if (static_branch_unlikely(&irq_timing_enabled)) {
pr_warn("irq timings already initialized, skipping selftest\n");
return 0;
}
ret = irq_timings_irqts_selftest();
if (ret)
goto out;
ret = irq_timings_irqs_selftest();
if (ret)
goto out;
ret = irq_timings_next_index_selftest();
out:
pr_info("---------- selftest end with %s -----------\n",
ret ? "failure" : "success");
return ret;
}
early_initcall(irq_timings_selftest);
#endif
| linux-master | kernel/irq/timings.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright 2017 Thomas Gleixner <[email protected]>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include "internals.h"
static struct dentry *irq_dir;
struct irq_bit_descr {
unsigned int mask;
char *name;
};
#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
const struct irq_bit_descr *sd, int size)
{
int i;
for (i = 0; i < size; i++, sd++) {
if (state & sd->mask)
seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
}
}
#ifdef CONFIG_SMP
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
const struct cpumask *msk;
msk = irq_data_get_affinity_mask(data);
seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
msk = irq_data_get_effective_affinity_mask(data);
seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk));
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
msk = desc->pending_mask;
seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));
#endif
}
#else
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
#endif
static const struct irq_bit_descr irqchip_flags[] = {
BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
BIT_MASK_DESCR(IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND),
BIT_MASK_DESCR(IRQCHIP_IMMUTABLE),
};
static void
irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
{
struct irq_chip *chip = data->chip;
if (!chip) {
seq_printf(m, "chip: None\n");
return;
}
seq_printf(m, "%*schip: ", ind, "");
if (chip->irq_print_chip)
chip->irq_print_chip(data, m);
else
seq_printf(m, "%s", chip->name);
seq_printf(m, "\n%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
ARRAY_SIZE(irqchip_flags));
}
static void
irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
{
seq_printf(m, "%*sdomain: %s\n", ind, "",
data->domain ? data->domain->name : "");
seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
irq_debug_show_chip(m, data, ind + 1);
if (data->domain && data->domain->ops && data->domain->ops->debug_show)
data->domain->ops->debug_show(m, NULL, data, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!data->parent_data)
return;
seq_printf(m, "%*sparent:\n", ind + 1, "");
irq_debug_show_data(m, data->parent_data, ind + 4);
#endif
}
static const struct irq_bit_descr irqdata_states[] = {
BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
BIT_MASK_DESCR(IRQD_LEVEL),
BIT_MASK_DESCR(IRQD_ACTIVATED),
BIT_MASK_DESCR(IRQD_IRQ_STARTED),
BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
BIT_MASK_DESCR(IRQD_IRQ_MASKED),
BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
BIT_MASK_DESCR(IRQD_PER_CPU),
BIT_MASK_DESCR(IRQD_NO_BALANCING),
BIT_MASK_DESCR(IRQD_SINGLE_TARGET),
BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
BIT_MASK_DESCR(IRQD_AFFINITY_SET),
BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
BIT_MASK_DESCR(IRQD_CAN_RESERVE),
BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
BIT_MASK_DESCR(IRQD_DEFAULT_TRIGGER_SET),
BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND),
BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS),
};
static const struct irq_bit_descr irqdesc_states[] = {
BIT_MASK_DESCR(_IRQ_NOPROBE),
BIT_MASK_DESCR(_IRQ_NOREQUEST),
BIT_MASK_DESCR(_IRQ_NOTHREAD),
BIT_MASK_DESCR(_IRQ_NOAUTOEN),
BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
BIT_MASK_DESCR(_IRQ_IS_POLLED),
BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
BIT_MASK_DESCR(_IRQ_HIDDEN),
};
static const struct irq_bit_descr irqdesc_istates[] = {
BIT_MASK_DESCR(IRQS_AUTODETECT),
BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
BIT_MASK_DESCR(IRQS_ONESHOT),
BIT_MASK_DESCR(IRQS_REPLAY),
BIT_MASK_DESCR(IRQS_WAITING),
BIT_MASK_DESCR(IRQS_PENDING),
BIT_MASK_DESCR(IRQS_SUSPENDED),
BIT_MASK_DESCR(IRQS_NMI),
};
static int irq_debug_show(struct seq_file *m, void *p)
{
struct irq_desc *desc = m->private;
struct irq_data *data;
raw_spin_lock_irq(&desc->lock);
data = irq_desc_get_irq_data(desc);
seq_printf(m, "handler: %ps\n", desc->handle_irq);
seq_printf(m, "device: %s\n", desc->dev_name);
seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
ARRAY_SIZE(irqdesc_states));
seq_printf(m, "istate: 0x%08x\n", desc->istate);
irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
ARRAY_SIZE(irqdesc_istates));
seq_printf(m, "ddepth: %u\n", desc->depth);
seq_printf(m, "wdepth: %u\n", desc->wake_depth);
seq_printf(m, "dstate: 0x%08x\n", irqd_get(data));
irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
ARRAY_SIZE(irqdata_states));
seq_printf(m, "node: %d\n", irq_data_get_node(data));
irq_debug_show_masks(m, desc);
irq_debug_show_data(m, data, 0);
raw_spin_unlock_irq(&desc->lock);
return 0;
}
static int irq_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_debug_show, inode->i_private);
}
static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct irq_desc *desc = file_inode(file)->i_private;
char buf[8] = { 0, };
size_t size;
size = min(sizeof(buf) - 1, count);
if (copy_from_user(buf, user_buf, size))
return -EFAULT;
if (!strncmp(buf, "trigger", size)) {
int err = irq_inject_interrupt(irq_desc_get_irq(desc));
return err ? err : count;
}
return count;
}
static const struct file_operations dfs_irq_ops = {
.open = irq_debug_open,
.write = irq_debug_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void irq_debugfs_copy_devname(int irq, struct device *dev)
{
struct irq_desc *desc = irq_to_desc(irq);
const char *name = dev_name(dev);
if (name)
desc->dev_name = kstrdup(name, GFP_KERNEL);
}
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
{
char name [10];
if (!irq_dir || !desc || desc->debugfs_file)
return;
sprintf(name, "%d", irq);
desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
&dfs_irq_ops);
}
static int __init irq_debugfs_init(void)
{
struct dentry *root_dir;
int irq;
root_dir = debugfs_create_dir("irq", NULL);
irq_domain_debugfs_init(root_dir);
irq_dir = debugfs_create_dir("irqs", root_dir);
irq_lock_sparse();
for_each_active_irq(irq)
irq_add_debugfs_entry(irq, irq_to_desc(irq));
irq_unlock_sparse();
return 0;
}
__initcall(irq_debugfs_init);
| linux-master | kernel/irq/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains spurious interrupt handling.
*/
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include "internals.h"
static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
static void poll_spurious_irqs(struct timer_list *unused);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
static int irq_poll_cpu;
static atomic_t irq_poll_active;
/*
* We wait here for a poller to finish.
*
* If the poll runs on this CPU, then we yell loudly and return
* false. That will leave the interrupt line disabled in the worst
* case, but it should never happen.
*
* We wait until the poller is done and then recheck disabled and
* action (about to be disabled). Only if it's still active, we return
* true and let the handler run.
*/
bool irq_wait_for_poll(struct irq_desc *desc)
__must_hold(&desc->lock)
{
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
"irq poll in progress on cpu %d for irq %d\n",
smp_processor_id(), desc->irq_data.irq))
return false;
#ifdef CONFIG_SMP
do {
raw_spin_unlock(&desc->lock);
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
raw_spin_lock(&desc->lock);
} while (irqd_irq_inprogress(&desc->irq_data));
/* Might have been disabled in meantime */
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
#else
return false;
#endif
}
/*
* Recovery handler for misrouted interrupts.
*/
static int try_one_irq(struct irq_desc *desc, bool force)
{
irqreturn_t ret = IRQ_NONE;
struct irqaction *action;
raw_spin_lock(&desc->lock);
/*
* PER_CPU, nested thread interrupts and interrupts explicitly
* marked polled are excluded from polling.
*/
if (irq_settings_is_per_cpu(desc) ||
irq_settings_is_nested_thread(desc) ||
irq_settings_is_polled(desc))
goto out;
/*
* Do not poll disabled interrupts unless the spurious
* disabled poller asks explicitly.
*/
if (irqd_irq_disabled(&desc->irq_data) && !force)
goto out;
/*
* All handlers must agree on IRQF_SHARED, so we test just the
* first.
*/
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
(action->flags & __IRQF_TIMER))
goto out;
/* Already running on another processor */
if (irqd_irq_inprogress(&desc->irq_data)) {
/*
* Already running: If it is shared get the other
* CPU to go looking for our mystery interrupt too
*/
desc->istate |= IRQS_PENDING;
goto out;
}
/* Mark it poll in progress */
desc->istate |= IRQS_POLL_INPROGRESS;
do {
if (handle_irq_event(desc) == IRQ_HANDLED)
ret = IRQ_HANDLED;
/* Make sure that there is still a valid action */
action = desc->action;
} while ((desc->istate & IRQS_PENDING) && action);
desc->istate &= ~IRQS_POLL_INPROGRESS;
out:
raw_spin_unlock(&desc->lock);
return ret == IRQ_HANDLED;
}
static int misrouted_irq(int irq)
{
struct irq_desc *desc;
int i, ok = 0;
if (atomic_inc_return(&irq_poll_active) != 1)
goto out;
irq_poll_cpu = smp_processor_id();
for_each_irq_desc(i, desc) {
if (!i)
continue;
if (i == irq) /* Already tried */
continue;
if (try_one_irq(desc, false))
ok = 1;
}
out:
atomic_dec(&irq_poll_active);
/* So the caller can adjust the irq error counts */
return ok;
}
static void poll_spurious_irqs(struct timer_list *unused)
{
struct irq_desc *desc;
int i;
if (atomic_inc_return(&irq_poll_active) != 1)
goto out;
irq_poll_cpu = smp_processor_id();
for_each_irq_desc(i, desc) {
unsigned int state;
if (!i)
continue;
/* Racy but it doesn't matter */
state = desc->istate;
barrier();
if (!(state & IRQS_SPURIOUS_DISABLED))
continue;
local_irq_disable();
try_one_irq(desc, true);
local_irq_enable();
}
out:
atomic_dec(&irq_poll_active);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
static inline int bad_action_ret(irqreturn_t action_ret)
{
unsigned int r = action_ret;
if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
return 0;
return 1;
}
/*
* If 99,900 of the previous 100,000 interrupts have not been handled
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
* and try to turn the IRQ off.
*
* (The other 100-of-100,000 interrupts may have been a correctly
* functioning device sharing an IRQ with the failing one)
*/
static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irqaction *action;
unsigned long flags;
if (bad_action_ret(action_ret)) {
printk(KERN_ERR "irq event %d: bogus return value %x\n",
irq, action_ret);
} else {
printk(KERN_ERR "irq %d: nobody cared (try booting with "
"the \"irqpoll\" option)\n", irq);
}
dump_stack();
printk(KERN_ERR "handlers:\n");
/*
* We need to take desc->lock here. note_interrupt() is called
* w/o desc->lock held, but IRQ_PROGRESS set. We might race
* with something else removing an action. It's ok to take
* desc->lock here. See synchronize_irq().
*/
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_action_of_desc(desc, action) {
printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler);
if (action->thread_fn)
printk(KERN_CONT " threaded [<%p>] %ps",
action->thread_fn, action->thread_fn);
printk(KERN_CONT "\n");
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
{
static int count = 100;
if (count > 0) {
count--;
__report_bad_irq(desc, action_ret);
}
}
static inline int
try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
struct irqaction *action;
if (!irqfixup)
return 0;
/* We didn't actually handle the IRQ - see if it was misrouted? */
if (action_ret == IRQ_NONE)
return 1;
/*
* But for 'irqfixup == 2' we also do it for handled interrupts if
* they are marked as IRQF_IRQPOLL (or for irq zero, which is the
* traditional PC timer interrupt.. Legacy)
*/
if (irqfixup < 2)
return 0;
if (!irq)
return 1;
/*
* Since we don't get the descriptor lock, "action" can
* change under us. We don't really care, but we don't
* want to follow a NULL pointer. So tell the compiler to
* just load it once by using a barrier.
*/
action = desc->action;
barrier();
return action && (action->flags & IRQF_IRQPOLL);
}
#define SPURIOUS_DEFERRED 0x80000000
void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
{
unsigned int irq;
if (desc->istate & IRQS_POLL_INPROGRESS ||
irq_settings_is_polled(desc))
return;
if (bad_action_ret(action_ret)) {
report_bad_irq(desc, action_ret);
return;
}
/*
* We cannot call note_interrupt from the threaded handler
* because we need to look at the compound of all handlers
* (primary and threaded). Aside of that in the threaded
* shared case we have no serialization against an incoming
* hardware interrupt while we are dealing with a threaded
* result.
*
* So in case a thread is woken, we just note the fact and
* defer the analysis to the next hardware interrupt.
*
* The threaded handlers store whether they successfully
* handled an interrupt and we check whether that number
* changed versus the last invocation.
*
* We could handle all interrupts with the delayed by one
* mechanism, but for the non forced threaded case we'd just
* add pointless overhead to the straight hardirq interrupts
* for the sake of a few lines less code.
*/
if (action_ret & IRQ_WAKE_THREAD) {
/*
* There is a thread woken. Check whether one of the
* shared primary handlers returned IRQ_HANDLED. If
* not we defer the spurious detection to the next
* interrupt.
*/
if (action_ret == IRQ_WAKE_THREAD) {
int handled;
/*
* We use bit 31 of thread_handled_last to
* denote the deferred spurious detection
* active. No locking necessary as
* thread_handled_last is only accessed here
* and we have the guarantee that hard
* interrupts are not reentrant.
*/
if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
desc->threads_handled_last |= SPURIOUS_DEFERRED;
return;
}
/*
* Check whether one of the threaded handlers
* returned IRQ_HANDLED since the last
* interrupt happened.
*
* For simplicity we just set bit 31, as it is
* set in threads_handled_last as well. So we
* avoid extra masking. And we really do not
* care about the high bits of the handled
* count. We just care about the count being
* different than the one we saw before.
*/
handled = atomic_read(&desc->threads_handled);
handled |= SPURIOUS_DEFERRED;
if (handled != desc->threads_handled_last) {
action_ret = IRQ_HANDLED;
/*
* Note: We keep the SPURIOUS_DEFERRED
* bit set. We are handling the
* previous invocation right now.
* Keep it for the current one, so the
* next hardware interrupt will
* account for it.
*/
desc->threads_handled_last = handled;
} else {
/*
* None of the threaded handlers felt
* responsible for the last interrupt
*
* We keep the SPURIOUS_DEFERRED bit
* set in threads_handled_last as we
* need to account for the current
* interrupt as well.
*/
action_ret = IRQ_NONE;
}
} else {
/*
* One of the primary handlers returned
* IRQ_HANDLED. So we don't care about the
* threaded handlers on the same line. Clear
* the deferred detection bit.
*
* In theory we could/should check whether the
* deferred bit is set and take the result of
* the previous run into account here as
* well. But it's really not worth the
* trouble. If every other interrupt is
* handled we never trigger the spurious
* detector. And if this is just the one out
* of 100k unhandled ones which is handled
* then we merily delay the spurious detection
* by one hard interrupt. Not a real problem.
*/
desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
}
}
if (unlikely(action_ret == IRQ_NONE)) {
/*
* If we are seeing only the odd spurious IRQ caused by
* bus asynchronicity then don't eventually trigger an error,
* otherwise the counter becomes a doomsday timer for otherwise
* working systems
*/
if (time_after(jiffies, desc->last_unhandled + HZ/10))
desc->irqs_unhandled = 1;
else
desc->irqs_unhandled++;
desc->last_unhandled = jiffies;
}
irq = irq_desc_get_irq(desc);
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
int ok = misrouted_irq(irq);
if (action_ret == IRQ_NONE)
desc->irqs_unhandled -= ok;
}
if (likely(!desc->irqs_unhandled))
return;
/* Now getting into unhandled irq detection */
desc->irq_count++;
if (likely(desc->irq_count < 100000))
return;
desc->irq_count = 0;
if (unlikely(desc->irqs_unhandled > 99900)) {
/*
* The interrupt is stuck
*/
__report_bad_irq(desc, action_ret);
/*
* Now kill the IRQ
*/
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->istate |= IRQS_SPURIOUS_DISABLED;
desc->depth++;
irq_disable(desc);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
desc->irqs_unhandled = 0;
}
bool noirqdebug __read_mostly;
int noirqdebug_setup(char *str)
{
noirqdebug = 1;
printk(KERN_INFO "IRQ lockup detection disabled\n");
return 1;
}
__setup("noirqdebug", noirqdebug_setup);
module_param(noirqdebug, bool, 0644);
MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
pr_warn("irqfixup boot option not supported with PREEMPT_RT\n");
return 1;
}
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
return 1;
}
__setup("irqfixup", irqfixup_setup);
module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
pr_warn("irqpoll boot option not supported with PREEMPT_RT\n");
return 1;
}
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
printk(KERN_WARNING "This may significantly impact system "
"performance\n");
return 1;
}
__setup("irqpoll", irqpoll_setup);
| linux-master | kernel/irq/spurious.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the interrupt descriptor management code. Detailed
* information is available in Documentation/core-api/genericirq.rst
*
*/
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/maple_tree.h>
#include <linux/irqdomain.h>
#include <linux/sysfs.h>
#include "internals.h"
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
static int __init irq_affinity_setup(char *str)
{
alloc_bootmem_cpumask_var(&irq_default_affinity);
cpulist_parse(str, irq_default_affinity);
/*
* Set at least the boot cpu. We don't want to end up with
* bugreports caused by random commandline masks
*/
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
return 1;
}
__setup("irqaffinity=", irq_affinity_setup);
static void __init init_irq_default_affinity(void)
{
if (!cpumask_available(irq_default_affinity))
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
if (cpumask_empty(irq_default_affinity))
cpumask_setall(irq_default_affinity);
}
#else
static void __init init_irq_default_affinity(void)
{
}
#endif
#ifdef CONFIG_SMP
static int alloc_masks(struct irq_desc *desc, int node)
{
if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
GFP_KERNEL, node))
return -ENOMEM;
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
GFP_KERNEL, node)) {
free_cpumask_var(desc->irq_common_data.affinity);
return -ENOMEM;
}
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
free_cpumask_var(desc->irq_common_data.affinity);
return -ENOMEM;
}
#endif
return 0;
}
static void desc_smp_init(struct irq_desc *desc, int node,
const struct cpumask *affinity)
{
if (!affinity)
affinity = irq_default_affinity;
cpumask_copy(desc->irq_common_data.affinity, affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
#ifdef CONFIG_NUMA
desc->irq_common_data.node = node;
#endif
}
#else
static inline int
alloc_masks(struct irq_desc *desc, int node) { return 0; }
static inline void
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
#endif
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
const struct cpumask *affinity, struct module *owner)
{
int cpu;
desc->irq_common_data.handler_data = NULL;
desc->irq_common_data.msi_desc = NULL;
desc->irq_data.common = &desc->irq_common_data;
desc->irq_data.irq = irq;
desc->irq_data.chip = &no_irq_chip;
desc->irq_data.chip_data = NULL;
irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
desc->tot_count = 0;
desc->name = NULL;
desc->owner = owner;
for_each_possible_cpu(cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
desc_smp_init(desc, node, affinity);
}
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
static DEFINE_MUTEX(sparse_irq_lock);
static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs,
MT_FLAGS_ALLOC_RANGE |
MT_FLAGS_LOCK_EXTERN |
MT_FLAGS_USE_RCU,
sparse_irq_lock);
static int irq_find_free_area(unsigned int from, unsigned int cnt)
{
MA_STATE(mas, &sparse_irqs, 0, 0);
if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt))
return -ENOSPC;
return mas.index;
}
static unsigned int irq_find_at_or_after(unsigned int offset)
{
unsigned long index = offset;
struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
return desc ? irq_desc_get_irq(desc) : nr_irqs;
}
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
{
MA_STATE(mas, &sparse_irqs, irq, irq);
WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0);
}
static void delete_irq_desc(unsigned int irq)
{
MA_STATE(mas, &sparse_irqs, irq, irq);
mas_erase(&mas);
}
#ifdef CONFIG_SPARSE_IRQ
static void irq_kobj_release(struct kobject *kobj);
#ifdef CONFIG_SYSFS
static struct kobject *irq_kobj_base;
#define IRQ_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
static ssize_t per_cpu_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
char *p = "";
int cpu;
for_each_possible_cpu(cpu) {
unsigned int c = irq_desc_kstat_cpu(desc, cpu);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
p = ",";
}
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
return ret;
}
IRQ_ATTR_RO(per_cpu_count);
static ssize_t chip_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
if (desc->irq_data.chip && desc->irq_data.chip->name) {
ret = scnprintf(buf, PAGE_SIZE, "%s\n",
desc->irq_data.chip->name);
}
raw_spin_unlock_irq(&desc->lock);
return ret;
}
IRQ_ATTR_RO(chip_name);
static ssize_t hwirq_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
if (desc->irq_data.domain)
ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
raw_spin_unlock_irq(&desc->lock);
return ret;
}
IRQ_ATTR_RO(hwirq);
static ssize_t type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
ret = sprintf(buf, "%s\n",
irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
raw_spin_unlock_irq(&desc->lock);
return ret;
}
IRQ_ATTR_RO(type);
static ssize_t wakeup_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
ret = sprintf(buf, "%s\n",
irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
raw_spin_unlock_irq(&desc->lock);
return ret;
}
IRQ_ATTR_RO(wakeup);
static ssize_t name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
if (desc->name)
ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
raw_spin_unlock_irq(&desc->lock);
return ret;
}
IRQ_ATTR_RO(name);
static ssize_t actions_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
struct irqaction *action;
ssize_t ret = 0;
char *p = "";
raw_spin_lock_irq(&desc->lock);
for_each_action_of_desc(desc, action) {
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
p, action->name);
p = ",";
}
raw_spin_unlock_irq(&desc->lock);
if (ret)
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
return ret;
}
IRQ_ATTR_RO(actions);
static struct attribute *irq_attrs[] = {
&per_cpu_count_attr.attr,
&chip_name_attr.attr,
&hwirq_attr.attr,
&type_attr.attr,
&wakeup_attr.attr,
&name_attr.attr,
&actions_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(irq);
static const struct kobj_type irq_kobj_type = {
.release = irq_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = irq_groups,
};
static void irq_sysfs_add(int irq, struct irq_desc *desc)
{
if (irq_kobj_base) {
/*
* Continue even in case of failure as this is nothing
* crucial and failures in the late irq_sysfs_init()
* cannot be rolled back.
*/
if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
pr_warn("Failed to add kobject for irq %d\n", irq);
else
desc->istate |= IRQS_SYSFS;
}
}
static void irq_sysfs_del(struct irq_desc *desc)
{
/*
* Only invoke kobject_del() when kobject_add() was successfully
* invoked for the descriptor. This covers both early boot, where
* sysfs is not initialized yet, and the case of a failed
* kobject_add() invocation.
*/
if (desc->istate & IRQS_SYSFS)
kobject_del(&desc->kobj);
}
static int __init irq_sysfs_init(void)
{
struct irq_desc *desc;
int irq;
/* Prevent concurrent irq alloc/free */
irq_lock_sparse();
irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
if (!irq_kobj_base) {
irq_unlock_sparse();
return -ENOMEM;
}
/* Add the already allocated interrupts */
for_each_irq_desc(irq, desc)
irq_sysfs_add(irq, desc);
irq_unlock_sparse();
return 0;
}
postcore_initcall(irq_sysfs_init);
#else /* !CONFIG_SYSFS */
static const struct kobj_type irq_kobj_type = {
.release = irq_kobj_release,
};
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
static void irq_sysfs_del(struct irq_desc *desc) {}
#endif /* CONFIG_SYSFS */
struct irq_desc *irq_to_desc(unsigned int irq)
{
return mtree_load(&sparse_irqs, irq);
}
#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
EXPORT_SYMBOL_GPL(irq_to_desc);
#endif
#ifdef CONFIG_SMP
static void free_masks(struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var(desc->pending_mask);
#endif
free_cpumask_var(desc->irq_common_data.affinity);
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
}
#else
static inline void free_masks(struct irq_desc *desc) { }
#endif
void irq_lock_sparse(void)
{
mutex_lock(&sparse_irq_lock);
}
void irq_unlock_sparse(void)
{
mutex_unlock(&sparse_irq_lock);
}
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
const struct cpumask *affinity,
struct module *owner)
{
struct irq_desc *desc;
desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
if (!desc)
return NULL;
/* allocate based on nr_cpu_ids */
desc->kstat_irqs = alloc_percpu(unsigned int);
if (!desc->kstat_irqs)
goto err_desc;
if (alloc_masks(desc, node))
goto err_kstat;
raw_spin_lock_init(&desc->lock);
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
mutex_init(&desc->request_mutex);
init_rcu_head(&desc->rcu);
init_waitqueue_head(&desc->wait_for_threads);
desc_set_defaults(irq, desc, node, affinity, owner);
irqd_set(&desc->irq_data, flags);
kobject_init(&desc->kobj, &irq_kobj_type);
irq_resend_init(desc);
return desc;
err_kstat:
free_percpu(desc->kstat_irqs);
err_desc:
kfree(desc);
return NULL;
}
static void irq_kobj_release(struct kobject *kobj)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
free_masks(desc);
free_percpu(desc->kstat_irqs);
kfree(desc);
}
static void delayed_free_desc(struct rcu_head *rhp)
{
struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
kobject_put(&desc->kobj);
}
static void free_desc(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
irq_remove_debugfs_entry(desc);
unregister_irq_proc(irq, desc);
/*
* sparse_irq_lock protects also show_interrupts() and
* kstat_irq_usr(). Once we deleted the descriptor from the
* sparse tree we can free it. Access in proc will fail to
* lookup the descriptor.
*
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init() as well.
*/
irq_sysfs_del(desc);
delete_irq_desc(irq);
/*
* We free the descriptor, masks and stat fields via RCU. That
* allows demultiplex interrupts to do rcu based management of
* the child interrupts.
* This also allows us to use rcu in kstat_irqs_usr().
*/
call_rcu(&desc->rcu, delayed_free_desc);
}
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct irq_affinity_desc *affinity,
struct module *owner)
{
struct irq_desc *desc;
int i;
/* Validate affinity mask(s) */
if (affinity) {
for (i = 0; i < cnt; i++) {
if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
}
for (i = 0; i < cnt; i++) {
const struct cpumask *mask = NULL;
unsigned int flags = 0;
if (affinity) {
if (affinity->is_managed) {
flags = IRQD_AFFINITY_MANAGED |
IRQD_MANAGED_SHUTDOWN;
}
mask = &affinity->mask;
node = cpu_to_node(cpumask_first(mask));
affinity++;
}
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;
irq_insert_desc(start + i, desc);
irq_sysfs_add(start + i, desc);
irq_add_debugfs_entry(start + i, desc);
}
return start;
err:
for (i--; i >= 0; i--)
free_desc(start + i);
return -ENOMEM;
}
static int irq_expand_nr_irqs(unsigned int nr)
{
if (nr > MAX_SPARSE_IRQS)
return -ENOMEM;
nr_irqs = nr;
return 0;
}
int __init early_irq_init(void)
{
int i, initcnt, node = first_online_node;
struct irq_desc *desc;
init_irq_default_affinity();
/* Let arch update nr_irqs and return the nr of preallocated irqs */
initcnt = arch_probe_nr_irqs();
printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
NR_IRQS, nr_irqs, initcnt);
if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS))
nr_irqs = MAX_SPARSE_IRQS;
if (WARN_ON(initcnt > MAX_SPARSE_IRQS))
initcnt = MAX_SPARSE_IRQS;
if (initcnt > nr_irqs)
nr_irqs = initcnt;
for (i = 0; i < initcnt; i++) {
desc = alloc_desc(i, node, 0, NULL, NULL);
irq_insert_desc(i, desc);
}
return arch_early_irq_init();
}
#else /* !CONFIG_SPARSE_IRQ */
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
int __init early_irq_init(void)
{
int count, i, node = first_online_node;
struct irq_desc *desc;
init_irq_default_affinity();
printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
for (i = 0; i < count; i++) {
desc[i].kstat_irqs = alloc_percpu(unsigned int);
alloc_masks(&desc[i], node);
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
mutex_init(&desc[i].request_mutex);
init_waitqueue_head(&desc[i].wait_for_threads);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
irq_resend_init(desc);
}
return arch_early_irq_init();
}
struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
raw_spin_unlock_irqrestore(&desc->lock, flags);
delete_irq_desc(irq);
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct irq_affinity_desc *affinity,
struct module *owner)
{
u32 i;
for (i = 0; i < cnt; i++) {
struct irq_desc *desc = irq_to_desc(start + i);
desc->owner = owner;
irq_insert_desc(start + i, desc);
}
return start;
}
static int irq_expand_nr_irqs(unsigned int nr)
{
return -ENOMEM;
}
void irq_mark_irq(unsigned int irq)
{
mutex_lock(&sparse_irq_lock);
irq_insert_desc(irq, irq_desc + irq);
mutex_unlock(&sparse_irq_lock);
}
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
free_desc(irq);
}
#endif
#endif /* !CONFIG_SPARSE_IRQ */
int handle_irq_desc(struct irq_desc *desc)
{
struct irq_data *data;
if (!desc)
return -EINVAL;
data = irq_desc_get_irq_data(desc);
if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data)))
return -EPERM;
generic_handle_irq_desc(desc);
return 0;
}
/**
* generic_handle_irq - Invoke the handler for a particular irq
* @irq: The irq number to handle
*
* Returns: 0 on success, or -EINVAL if conversion has failed
*
* This function must be called from an IRQ context with irq regs
* initialized.
*/
int generic_handle_irq(unsigned int irq)
{
return handle_irq_desc(irq_to_desc(irq));
}
EXPORT_SYMBOL_GPL(generic_handle_irq);
/**
* generic_handle_irq_safe - Invoke the handler for a particular irq from any
* context.
* @irq: The irq number to handle
*
* Returns: 0 on success, a negative value on error.
*
* This function can be called from any context (IRQ or process context). It
* will report an error if not invoked from IRQ context and the irq has been
* marked to enforce IRQ-context only.
*/
int generic_handle_irq_safe(unsigned int irq)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = handle_irq_desc(irq_to_desc(irq));
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
#ifdef CONFIG_IRQ_DOMAIN
/**
* generic_handle_domain_irq - Invoke the handler for a HW irq belonging
* to a domain.
* @domain: The domain where to perform the lookup
* @hwirq: The HW irq number to convert to a logical one
*
* Returns: 0 on success, or -EINVAL if conversion has failed
*
* This function must be called from an IRQ context with irq regs
* initialized.
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
{
return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
}
EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
/**
* generic_handle_irq_safe - Invoke the handler for a HW irq belonging
* to a domain from any context.
* @domain: The domain where to perform the lookup
* @hwirq: The HW irq number to convert to a logical one
*
* Returns: 0 on success, a negative value on error.
*
* This function can be called from any context (IRQ or process
* context). If the interrupt is marked as 'enforce IRQ-context only' then
* the function must be invoked from hard interrupt context.
*/
int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq));
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
/**
* generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
* to a domain.
* @domain: The domain where to perform the lookup
* @hwirq: The HW irq number to convert to a logical one
*
* Returns: 0 on success, or -EINVAL if conversion has failed
*
* This function must be called from an NMI context with irq regs
* initialized.
**/
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
{
WARN_ON_ONCE(!in_nmi());
return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
}
#endif
/* Dynamic interrupt handling */
/**
* irq_free_descs - free irq descriptors
* @from: Start of descriptor range
* @cnt: Number of consecutive irqs to free
*/
void irq_free_descs(unsigned int from, unsigned int cnt)
{
int i;
if (from >= nr_irqs || (from + cnt) > nr_irqs)
return;
mutex_lock(&sparse_irq_lock);
for (i = 0; i < cnt; i++)
free_desc(from + i);
mutex_unlock(&sparse_irq_lock);
}
EXPORT_SYMBOL_GPL(irq_free_descs);
/**
* __irq_alloc_descs - allocate and initialize a range of irq descriptors
* @irq: Allocate for specific irq number if irq >= 0
* @from: Start the search from this irq number
* @cnt: Number of consecutive irqs to allocate.
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
* @affinity: Optional pointer to an affinity mask array of size @cnt which
* hints where the irq descriptors should be allocated and which
* default affinities to use
*
* Returns the first irq number or error code
*/
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner, const struct irq_affinity_desc *affinity)
{
int start, ret;
if (!cnt)
return -EINVAL;
if (irq >= 0) {
if (from > irq)
return -EINVAL;
from = irq;
} else {
/*
* For interrupts which are freely allocated the
* architecture can force a lower bound to the @from
* argument. x86 uses this to exclude the GSI space.
*/
from = arch_dynirq_lower_bound(from);
}
mutex_lock(&sparse_irq_lock);
start = irq_find_free_area(from, cnt);
ret = -EEXIST;
if (irq >=0 && start != irq)
goto unlock;
if (start + cnt > nr_irqs) {
ret = irq_expand_nr_irqs(start + cnt);
if (ret)
goto unlock;
}
ret = alloc_descs(start, cnt, node, affinity, owner);
unlock:
mutex_unlock(&sparse_irq_lock);
return ret;
}
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
/**
* irq_get_next_irq - get next allocated irq number
* @offset: where to start the search
*
* Returns next irq number after offset or nr_irqs if none is found.
*/
unsigned int irq_get_next_irq(unsigned int offset)
{
return irq_find_at_or_after(offset);
}
struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
unsigned int check)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
if (check & _IRQ_DESC_CHECK) {
if ((check & _IRQ_DESC_PERCPU) &&
!irq_settings_is_per_cpu_devid(desc))
return NULL;
if (!(check & _IRQ_DESC_PERCPU) &&
irq_settings_is_per_cpu_devid(desc))
return NULL;
}
if (bus)
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, *flags);
}
return desc;
}
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
__releases(&desc->lock)
{
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (bus)
chip_bus_sync_unlock(desc);
}
int irq_set_percpu_devid_partition(unsigned int irq,
const struct cpumask *affinity)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc)
return -EINVAL;
if (desc->percpu_enabled)
return -EINVAL;
desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
if (!desc->percpu_enabled)
return -ENOMEM;
if (affinity)
desc->percpu_affinity = affinity;
else
desc->percpu_affinity = cpu_possible_mask;
irq_set_percpu_devid_flags(irq);
return 0;
}
int irq_set_percpu_devid(unsigned int irq)
{
return irq_set_percpu_devid_partition(irq, NULL);
}
int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc || !desc->percpu_enabled)
return -EINVAL;
if (affinity)
cpumask_copy(affinity, desc->percpu_affinity);
return 0;
}
EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
void kstat_incr_irq_this_cpu(unsigned int irq)
{
kstat_incr_irqs_this_cpu(irq_to_desc(irq));
}
/**
* kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
* @irq: The interrupt number
* @cpu: The cpu number
*
* Returns the sum of interrupt counts on @cpu since boot for
* @irq. The caller must ensure that the interrupt is not removed
* concurrently.
*/
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc && desc->kstat_irqs ?
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}
static bool irq_is_nmi(struct irq_desc *desc)
{
return desc->istate & IRQS_NMI;
}
static unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned int sum = 0;
int cpu;
if (!desc || !desc->kstat_irqs)
return 0;
if (!irq_settings_is_per_cpu_devid(desc) &&
!irq_settings_is_per_cpu(desc) &&
!irq_is_nmi(desc))
return data_race(desc->tot_count);
for_each_possible_cpu(cpu)
sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
return sum;
}
/**
* kstat_irqs_usr - Get the statistics for an interrupt from thread context
* @irq: The interrupt number
*
* Returns the sum of interrupt counts on all cpus since boot for @irq.
*
* It uses rcu to protect the access since a concurrent removal of an
* interrupt descriptor is observing an rcu grace period before
* delayed_free_desc()/irq_kobj_release().
*/
unsigned int kstat_irqs_usr(unsigned int irq)
{
unsigned int sum;
rcu_read_lock();
sum = kstat_irqs(irq);
rcu_read_unlock();
return sum;
}
#ifdef CONFIG_LOCKDEP
void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
lockdep_set_class(&desc->lock, lock_class);
lockdep_set_class(&desc->request_mutex, request_class);
}
}
EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
#endif
| linux-master | kernel/irq/irqdesc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006 Thomas Gleixner
*
* This file contains driver APIs to the irq subsystem.
*/
#define pr_fmt(fmt) "genirq: " fmt
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <linux/sched/isolation.h>
#include <uapi/linux/sched/types.h>
#include <linux/task_work.h>
#include "internals.h"
#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
static int __init setup_forced_irqthreads(char *arg)
{
static_branch_enable(&force_irqthreads_key);
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
#endif
static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
{
struct irq_data *irqd = irq_desc_get_irq_data(desc);
bool inprogress;
do {
unsigned long flags;
/*
* Wait until we're out of the critical section. This might
* give the wrong answer due to the lack of memory barriers.
*/
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
raw_spin_lock_irqsave(&desc->lock, flags);
inprogress = irqd_irq_inprogress(&desc->irq_data);
/*
* If requested and supported, check at the chip whether it
* is in flight at the hardware level, i.e. already pending
* in a CPU and waiting for service and acknowledge.
*/
if (!inprogress && sync_chip) {
/*
* Ignore the return code. inprogress is only updated
* when the chip supports it.
*/
__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
&inprogress);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (inprogress);
}
/**
* synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
*
* This function waits for any pending hard IRQ handlers for this
* interrupt to complete before returning. If you use this
* function while holding a resource the IRQ handler may need you
* will deadlock. It does not take associated threaded handlers
* into account.
*
* Do not use this for shutdown scenarios where you must be sure
* that all parts (hardirq and threaded handler) have completed.
*
* Returns: false if a threaded handler is active.
*
* This function may be called - with care - from IRQ context.
*
* It does not check whether there is an interrupt in flight at the
* hardware level, but not serviced yet, as this might deadlock when
* called with interrupts disabled and the target CPU of the interrupt
* is the current CPU.
*/
bool synchronize_hardirq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
__synchronize_hardirq(desc, false);
return !atomic_read(&desc->threads_active);
}
return true;
}
EXPORT_SYMBOL(synchronize_hardirq);
static void __synchronize_irq(struct irq_desc *desc)
{
__synchronize_hardirq(desc, true);
/*
* We made sure that no hardirq handler is running. Now verify that no
* threaded handlers are active.
*/
wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
}
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
*
* This function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* Can only be called from preemptible code as it might sleep when
* an interrupt thread is associated to @irq.
*
* It optionally makes sure (when the irq chip supports that method)
* that the interrupt is not pending in any CPU and waiting for
* service.
*/
void synchronize_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc)
__synchronize_irq(desc);
}
EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
return false;
return true;
}
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
*
*/
int irq_can_set_affinity(unsigned int irq)
{
return __irq_can_set_affinity(irq_to_desc(irq));
}
/**
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
* @irq: Interrupt to check
*
* Like irq_can_set_affinity() above, but additionally checks for the
* AFFINITY_MANAGED flag.
*/
bool irq_can_set_affinity_usr(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return __irq_can_set_affinity(desc) &&
!irqd_affinity_is_managed(&desc->irq_data);
}
/**
* irq_set_thread_affinity - Notify irq threads to adjust affinity
* @desc: irq descriptor which has affinity changed
*
* We just set IRQTF_AFFINITY and delegate the affinity setting
* to the interrupt thread itself. We can not call
* set_cpus_allowed_ptr() here as we hold desc->lock and this
* code can be called from hard interrupt context.
*/
void irq_set_thread_affinity(struct irq_desc *desc)
{
struct irqaction *action;
for_each_action_of_desc(desc, action) {
if (action->thread)
set_bit(IRQTF_AFFINITY, &action->thread_flags);
if (action->secondary && action->secondary->thread)
set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
}
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static void irq_validate_effective_affinity(struct irq_data *data)
{
const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
if (!cpumask_empty(m))
return;
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
chip->name, data->irq);
}
#else
static inline void irq_validate_effective_affinity(struct irq_data *data) { }
#endif
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
const struct cpumask *prog_mask;
int ret;
static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
static struct cpumask tmp_mask;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
raw_spin_lock(&tmp_mask_lock);
/*
* If this is a managed interrupt and housekeeping is enabled on
* it check whether the requested affinity mask intersects with
* a housekeeping CPU. If so, then remove the isolated CPUs from
* the mask and just keep the housekeeping CPU(s). This prevents
* the affinity setter from routing the interrupt to an isolated
* CPU to avoid that I/O submitted from a housekeeping CPU causes
* interrupts on an isolated one.
*
* If the masks do not intersect or include online CPU(s) then
* keep the requested mask. The isolated target CPUs are only
* receiving interrupts when the I/O operation was submitted
* directly from them.
*
* If all housekeeping CPUs in the affinity mask are offline, the
* interrupt will be migrated by the CPU hotplug code once a
* housekeeping CPU which belongs to the affinity mask comes
* online.
*/
if (irqd_affinity_is_managed(data) &&
housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
const struct cpumask *hk_mask;
hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
cpumask_and(&tmp_mask, mask, hk_mask);
if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
prog_mask = mask;
else
prog_mask = &tmp_mask;
} else {
prog_mask = mask;
}
/*
* Make sure we only provide online CPUs to the irqchip,
* unless we are being asked to force the affinity (in which
* case we do as we are told).
*/
cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
if (!force && !cpumask_empty(&tmp_mask))
ret = chip->irq_set_affinity(data, &tmp_mask, force);
else if (force)
ret = chip->irq_set_affinity(data, mask, force);
else
ret = -EINVAL;
raw_spin_unlock(&tmp_mask_lock);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
fallthrough;
case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
ret = 0;
}
return ret;
}
#ifdef CONFIG_GENERIC_PENDING_IRQ
static inline int irq_set_affinity_pending(struct irq_data *data,
const struct cpumask *dest)
{
struct irq_desc *desc = irq_data_to_desc(data);
irqd_set_move_pending(data);
irq_copy_pending(desc, dest);
return 0;
}
#else
static inline int irq_set_affinity_pending(struct irq_data *data,
const struct cpumask *dest)
{
return -EBUSY;
}
#endif
static int irq_try_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force)
{
int ret = irq_do_set_affinity(data, dest, force);
/*
* In case that the underlying vector management is busy and the
* architecture supports the generic pending mechanism then utilize
* this to avoid returning an error to user space.
*/
if (ret == -EBUSY && !force)
ret = irq_set_affinity_pending(data, dest);
return ret;
}
static bool irq_set_affinity_deactivated(struct irq_data *data,
const struct cpumask *mask)
{
struct irq_desc *desc = irq_data_to_desc(data);
/*
* Handle irq chips which can handle affinity only in activated
* state correctly
*
* If the interrupt is not yet activated, just store the affinity
* mask and do not call the chip driver at all. On activation the
* driver has to make sure anyway that the interrupt is in a
* usable state so startup works.
*/
if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
irqd_is_activated(data) || !irqd_affinity_on_activate(data))
return false;
cpumask_copy(desc->irq_common_data.affinity, mask);
irq_data_update_effective_affinity(data, mask);
irqd_set(data, IRQD_AFFINITY_SET);
return true;
}
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
if (irq_set_affinity_deactivated(data, mask))
return 0;
if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
ret = irq_try_set_affinity(data, mask, force);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
}
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
if (!schedule_work(&desc->affinity_notify->work)) {
/* Work was already scheduled, drop our extra ref */
kref_put(&desc->affinity_notify->kref,
desc->affinity_notify->release);
}
}
irqd_set(data, IRQD_AFFINITY_SET);
return ret;
}
/**
* irq_update_affinity_desc - Update affinity management for an interrupt
* @irq: The interrupt number to update
* @affinity: Pointer to the affinity descriptor
*
* This interface can be used to configure the affinity management of
* interrupts which have been allocated already.
*
* There are certain limitations on when it may be used - attempts to use it
* for when the kernel is configured for generic IRQ reservation mode (in
* config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
* managed/non-managed interrupt accounting. In addition, attempts to use it on
* an interrupt which is already started or which has already been configured
* as managed will also fail, as these mean invalid init state or double init.
*/
int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity)
{
struct irq_desc *desc;
unsigned long flags;
bool activated;
int ret = 0;
/*
* Supporting this with the reservation scheme used by x86 needs
* some more thought. Fail it for now.
*/
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
return -EOPNOTSUPP;
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return -EINVAL;
/* Requires the interrupt to be shut down */
if (irqd_is_started(&desc->irq_data)) {
ret = -EBUSY;
goto out_unlock;
}
/* Interrupts which are already managed cannot be modified */
if (irqd_affinity_is_managed(&desc->irq_data)) {
ret = -EBUSY;
goto out_unlock;
}
/*
* Deactivate the interrupt. That's required to undo
* anything an earlier activation has established.
*/
activated = irqd_is_activated(&desc->irq_data);
if (activated)
irq_domain_deactivate_irq(&desc->irq_data);
if (affinity->is_managed) {
irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
}
cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
/* Restore the activation state */
if (activated)
irq_domain_activate_irq(&desc->irq_data, false);
out_unlock:
irq_put_desc_busunlock(desc, flags);
return ret;
}
static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret;
if (!desc)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @cpumask: cpumask
*
* Fails if cpumask does not contain an online CPU
*/
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
return __irq_set_affinity(irq, cpumask, false);
}
EXPORT_SYMBOL_GPL(irq_set_affinity);
/**
* irq_force_affinity - Force the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @cpumask: cpumask
*
* Same as irq_set_affinity, but without checking the mask against
* online cpus.
*
* Solely for low level cpu hotplug code, where we need to make per
* cpu interrupts affine before the cpu becomes online.
*/
int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
{
return __irq_set_affinity(irq, cpumask, true);
}
EXPORT_SYMBOL_GPL(irq_force_affinity);
int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags);
if (m && setaffinity)
__irq_set_affinity(irq, m, false);
return 0;
}
EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
static void irq_affinity_notify(struct work_struct *work)
{
struct irq_affinity_notify *notify =
container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
goto out;
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_move_pending(&desc->irq_data))
irq_get_pending(cpumask, desc);
else
cpumask_copy(cpumask, desc->irq_common_data.affinity);
raw_spin_unlock_irqrestore(&desc->lock, flags);
notify->notify(notify, cpumask);
free_cpumask_var(cpumask);
out:
kref_put(¬ify->kref, notify->release);
}
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
* @notify: Context for notification, or %NULL to disable
* notification. Function pointers must be initialised;
* the other fields will be initialised by this function.
*
* Must be called in process context. Notification may only be enabled
* after the IRQ is allocated and must be disabled before the IRQ is
* freed using free_irq().
*/
int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_affinity_notify *old_notify;
unsigned long flags;
/* The release function is promised process context */
might_sleep();
if (!desc || desc->istate & IRQS_NMI)
return -EINVAL;
/* Complete initialisation of *notify */
if (notify) {
notify->irq = irq;
kref_init(¬ify->kref);
INIT_WORK(¬ify->work, irq_affinity_notify);
}
raw_spin_lock_irqsave(&desc->lock, flags);
old_notify = desc->affinity_notify;
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (old_notify) {
if (cancel_work_sync(&old_notify->work)) {
/* Pending work had a ref, put that one too */
kref_put(&old_notify->kref, old_notify->release);
}
kref_put(&old_notify->kref, old_notify->release);
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
*/
int irq_setup_affinity(struct irq_desc *desc)
{
struct cpumask *set = irq_default_affinity;
int ret, node = irq_desc_get_node(desc);
static DEFINE_RAW_SPINLOCK(mask_lock);
static struct cpumask mask;
/* Excludes PER_CPU and NO_BALANCE interrupts */
if (!__irq_can_set_affinity(desc))
return 0;
raw_spin_lock(&mask_lock);
/*
* Preserve the managed affinity setting and a userspace affinity
* setup, but make sure that one of the targets is online.
*/
if (irqd_affinity_is_managed(&desc->irq_data) ||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_common_data.affinity,
cpu_online_mask))
set = desc->irq_common_data.affinity;
else
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
}
cpumask_and(&mask, cpu_online_mask, set);
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpu_online_mask);
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
/* make sure at least one of the cpus in nodemask is online */
if (cpumask_intersects(&mask, nodemask))
cpumask_and(&mask, &mask, nodemask);
}
ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
raw_spin_unlock(&mask_lock);
return ret;
}
#else
/* Wrapper for ALPHA specific affinity selector magic */
int irq_setup_affinity(struct irq_desc *desc)
{
return irq_select_affinity(irq_desc_get_irq(desc));
}
#endif /* CONFIG_AUTO_IRQ_AFFINITY */
#endif /* CONFIG_SMP */
/**
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
* @irq: interrupt number to set affinity
* @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
* specific data for percpu_devid interrupts
*
* This function uses the vCPU specific data to set the vCPU
* affinity for an irq. The vCPU specific data is passed from
* outside, such as KVM. One example code path is as below:
* KVM -> IOMMU -> irq_set_vcpu_affinity().
*/
int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
struct irq_data *data;
struct irq_chip *chip;
int ret = -ENOSYS;
if (!desc)
return -EINVAL;
data = irq_desc_get_irq_data(desc);
do {
chip = irq_data_get_irq_chip(data);
if (chip && chip->irq_set_vcpu_affinity)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
irq_put_desc_unlock(desc, flags);
return ret;
}
EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
void __disable_irq(struct irq_desc *desc)
{
if (!desc->depth++)
irq_disable(desc);
}
static int __disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
__disable_irq(desc);
irq_put_desc_busunlock(desc, flags);
return 0;
}
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables and Enables are
* nested.
* Unlike disable_irq(), this function does not ensure existing
* instances of the IRQ handler have completed before returning.
*
* This function may be called from IRQ context.
*/
void disable_irq_nosync(unsigned int irq)
{
__disable_irq_nosync(irq);
}
EXPORT_SYMBOL(disable_irq_nosync);
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and Disables are
* nested.
* This function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* Can only be called from preemptible code as it might sleep when
* an interrupt thread is associated to @irq.
*
*/
void disable_irq(unsigned int irq)
{
might_sleep();
if (!__disable_irq_nosync(irq))
synchronize_irq(irq);
}
EXPORT_SYMBOL(disable_irq);
/**
* disable_hardirq - disables an irq and waits for hardirq completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and Disables are
* nested.
* This function waits for any pending hard IRQ handlers for this
* interrupt to complete before returning. If you use this function while
* holding a resource the hard IRQ handler may need you will deadlock.
*
* When used to optimistically disable an interrupt from atomic context
* the return value must be checked.
*
* Returns: false if a threaded handler is active.
*
* This function may be called - with care - from IRQ context.
*/
bool disable_hardirq(unsigned int irq)
{
if (!__disable_irq_nosync(irq))
return synchronize_hardirq(irq);
return false;
}
EXPORT_SYMBOL_GPL(disable_hardirq);
/**
* disable_nmi_nosync - disable an nmi without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables and enables are
* nested.
* The interrupt to disable must have been requested through request_nmi.
* Unlike disable_nmi(), this function does not ensure existing
* instances of the IRQ handler have completed before returning.
*/
void disable_nmi_nosync(unsigned int irq)
{
disable_irq_nosync(irq);
}
void __enable_irq(struct irq_desc *desc)
{
switch (desc->depth) {
case 0:
err_out:
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
irq_desc_get_irq(desc));
break;
case 1: {
if (desc->istate & IRQS_SUSPENDED)
goto err_out;
/* Prevent probing on this irq: */
irq_settings_set_noprobe(desc);
/*
* Call irq_startup() not irq_enable() here because the
* interrupt might be marked NOAUTOEN. So irq_startup()
* needs to be invoked when it gets enabled the first
* time. If it was already started up, then irq_startup()
* will invoke irq_enable() under the hood.
*/
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
break;
}
default:
desc->depth--;
}
}
/**
* enable_irq - enable handling of an irq
* @irq: Interrupt to enable
*
* Undoes the effect of one call to disable_irq(). If this
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*
* This function may be called from IRQ context only when
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return;
if (WARN(!desc->irq_data.chip,
KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
goto out;
__enable_irq(desc);
out:
irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL(enable_irq);
/**
* enable_nmi - enable handling of an nmi
* @irq: Interrupt to enable
*
* The interrupt to enable must have been requested through request_nmi.
* Undoes the effect of one call to disable_nmi(). If this
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*/
void enable_nmi(unsigned int irq)
{
enable_irq(irq);
}
static int set_irq_wake_real(unsigned int irq, unsigned int on)
{
struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
return 0;
if (desc->irq_data.chip->irq_set_wake)
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
return ret;
}
/**
* irq_set_irq_wake - control irq power management wakeup
* @irq: interrupt to control
* @on: enable/disable power management wakeup
*
* Enable/disable power management wakeup mode, which is
* disabled by default. Enables and disables must match,
* just as they match for non-wakeup mode support.
*
* Wakeup mode lets this IRQ wake the system from sleep
* states like "suspend to RAM".
*
* Note: irq enable/disable state is completely orthogonal
* to the enable/disable state of irq wake. An irq can be
* disabled with disable_irq() and still wake the system as
* long as the irq has wake enabled. If this does not hold,
* then the underlying irq chip and the related driver need
* to be investigated.
*/
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
return -EINVAL;
/* Don't use NMIs as wake up interrupts please */
if (desc->istate & IRQS_NMI) {
ret = -EINVAL;
goto out_unlock;
}
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
if (ret)
desc->wake_depth = 0;
else
irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
}
} else {
if (desc->wake_depth == 0) {
WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
} else if (--desc->wake_depth == 0) {
ret = set_irq_wake_real(irq, on);
if (ret)
desc->wake_depth = 1;
else
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
}
}
out_unlock:
irq_put_desc_busunlock(desc, flags);
return ret;
}
EXPORT_SYMBOL(irq_set_irq_wake);
/*
* Internal function that tells the architecture code whether a
* particular irq has been exclusively allocated or is available
* for driver use.
*/
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
int canrequest = 0;
if (!desc)
return 0;
if (irq_settings_can_request(desc)) {
if (!desc->action ||
irqflags & desc->action->flags & IRQF_SHARED)
canrequest = 1;
}
irq_put_desc_unlock(desc, flags);
return canrequest;
}
int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
{
struct irq_chip *chip = desc->irq_data.chip;
int ret, unmask = 0;
if (!chip || !chip->irq_set_type) {
/*
* IRQF_TRIGGER_* but the PIC does not support multiple
* flow-types?
*/
pr_debug("No set_type function for IRQ %d (%s)\n",
irq_desc_get_irq(desc),
chip ? (chip->name ? : "unknown") : "unknown");
return 0;
}
if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
if (!irqd_irq_masked(&desc->irq_data))
mask_irq(desc);
if (!irqd_irq_disabled(&desc->irq_data))
unmask = 1;
}
/* Mask all flags except trigger mode */
flags &= IRQ_TYPE_SENSE_MASK;
ret = chip->irq_set_type(&desc->irq_data, flags);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
irqd_set(&desc->irq_data, flags);
fallthrough;
case IRQ_SET_MASK_OK_NOCOPY:
flags = irqd_get_trigger_type(&desc->irq_data);
irq_settings_set_trigger_mask(desc, flags);
irqd_clear(&desc->irq_data, IRQD_LEVEL);
irq_settings_clr_level(desc);
if (flags & IRQ_TYPE_LEVEL_MASK) {
irq_settings_set_level(desc);
irqd_set(&desc->irq_data, IRQD_LEVEL);
}
ret = 0;
break;
default:
pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
flags, irq_desc_get_irq(desc), chip->irq_set_type);
}
if (unmask)
unmask_irq(desc);
return ret;
}
#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->parent_irq = parent_irq;
irq_put_desc_unlock(desc, flags);
return 0;
}
EXPORT_SYMBOL_GPL(irq_set_parent);
#endif
/*
* Default primary interrupt handler for threaded interrupts. Is
* assigned as primary handler when request_threaded_irq is called
* with handler == NULL. Useful for oneshot interrupts.
*/
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
{
return IRQ_WAKE_THREAD;
}
/*
* Primary handler for nested threaded interrupts. Should never be
* called.
*/
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
{
WARN(1, "Primary handler called for nested irq %d\n", irq);
return IRQ_NONE;
}
static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
{
WARN(1, "Secondary action handler called for irq %d\n", irq);
return IRQ_NONE;
}
static int irq_wait_for_interrupt(struct irqaction *action)
{
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
/* may need to run one last time */
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
__set_current_state(TASK_RUNNING);
return 0;
}
__set_current_state(TASK_RUNNING);
return -1;
}
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
}
}
/*
* Oneshot interrupts keep the irq line masked until the threaded
* handler finished. unmask if the interrupt has not been disabled and
* is marked MASKED.
*/
static void irq_finalize_oneshot(struct irq_desc *desc,
struct irqaction *action)
{
if (!(desc->istate & IRQS_ONESHOT) ||
action->handler == irq_forced_secondary_handler)
return;
again:
chip_bus_lock(desc);
raw_spin_lock_irq(&desc->lock);
/*
* Implausible though it may be we need to protect us against
* the following scenario:
*
* The thread is faster done than the hard interrupt handler
* on the other CPU. If we unmask the irq line then the
* interrupt can come in again and masks the line, leaves due
* to IRQS_INPROGRESS and the irq line is masked forever.
*
* This also serializes the state of shared oneshot handlers
* versus "desc->threads_oneshot |= action->thread_mask;" in
* irq_wake_thread(). See the comment there which explains the
* serialization.
*/
if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(desc);
cpu_relax();
goto again;
}
/*
* Now check again, whether the thread should run. Otherwise
* we would clear the threads_oneshot bit of this thread which
* was just set.
*/
if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
goto out_unlock;
desc->threads_oneshot &= ~action->thread_mask;
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data))
unmask_threaded_irq(desc);
out_unlock:
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(desc);
}
#ifdef CONFIG_SMP
/*
* Check whether we need to change the affinity of the interrupt thread.
*/
static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
/*
* In case we are out of memory we set IRQTF_AFFINITY again and
* try again next time
*/
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
set_bit(IRQTF_AFFINITY, &action->thread_flags);
return;
}
raw_spin_lock_irq(&desc->lock);
/*
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
if (cpumask_available(desc->irq_common_data.affinity)) {
const struct cpumask *m;
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
cpumask_copy(mask, m);
} else {
valid = false;
}
raw_spin_unlock_irq(&desc->lock);
if (valid)
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
static inline void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
* Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.
*/
static irqreturn_t
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t ret;
local_bh_disable();
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_disable();
ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_enable();
local_bh_enable();
return ret;
}
/*
* Interrupts explicitly requested as threaded interrupts want to be
* preemptible - many of them need to sleep and wait for slow busses to
* complete.
*/
static irqreturn_t irq_thread_fn(struct irq_desc *desc,
struct irqaction *action)
{
irqreturn_t ret;
ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
return ret;
}
void wake_threads_waitq(struct irq_desc *desc)
{
if (atomic_dec_and_test(&desc->threads_active))
wake_up(&desc->wait_for_threads);
}
static void irq_thread_dtor(struct callback_head *unused)
{
struct task_struct *tsk = current;
struct irq_desc *desc;
struct irqaction *action;
if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
return;
action = kthread_data(tsk);
pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm, tsk->pid, action->irq);
desc = irq_to_desc(action->irq);
/*
* If IRQTF_RUNTHREAD is set, we need to decrement
* desc->threads_active and wake possible waiters.
*/
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
wake_threads_waitq(desc);
/* Prevent a stale desc->threads_oneshot */
irq_finalize_oneshot(desc, action);
}
static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
{
struct irqaction *secondary = action->secondary;
if (WARN_ON_ONCE(!secondary))
return;
raw_spin_lock_irq(&desc->lock);
__irq_wake_thread(desc, secondary);
raw_spin_unlock_irq(&desc->lock);
}
/*
* Internal function to notify that a interrupt thread is ready.
*/
static void irq_thread_set_ready(struct irq_desc *desc,
struct irqaction *action)
{
set_bit(IRQTF_READY, &action->thread_flags);
wake_up(&desc->wait_for_threads);
}
/*
* Internal function to wake up a interrupt thread and wait until it is
* ready.
*/
static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
struct irqaction *action)
{
if (!action || !action->thread)
return;
wake_up_process(action->thread);
wait_event(desc->wait_for_threads,
test_bit(IRQTF_READY, &action->thread_flags));
}
/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
{
struct callback_head on_exit_work;
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
irqreturn_t (*handler_fn)(struct irq_desc *desc,
struct irqaction *action);
irq_thread_set_ready(desc, action);
sched_set_fifo(current);
if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags))
handler_fn = irq_forced_thread_fn;
else
handler_fn = irq_thread_fn;
init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, TWA_NONE);
irq_thread_check_affinity(desc, action);
while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret;
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
wake_threads_waitq(desc);
}
/*
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
* synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
* oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
return 0;
}
/**
* irq_wake_thread - wake the irq thread for the action identified by dev_id
* @irq: Interrupt line
* @dev_id: Device identity for which the thread should be woken
*
*/
void irq_wake_thread(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_action_of_desc(desc, action) {
if (action->dev_id == dev_id) {
if (action->thread)
__irq_wake_thread(desc, action);
break;
}
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(irq_wake_thread);
static int irq_setup_forced_threading(struct irqaction *new)
{
if (!force_irqthreads())
return 0;
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
return 0;
/*
* No further action required for interrupts which are requested as
* threaded interrupts already
*/
if (new->handler == irq_default_primary_handler)
return 0;
new->flags |= IRQF_ONESHOT;
/*
* Handle the case where we have a real primary handler and a
* thread handler. We force thread them as well by creating a
* secondary action.
*/
if (new->handler && new->thread_fn) {
/* Allocate the secondary action */
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!new->secondary)
return -ENOMEM;
new->secondary->handler = irq_forced_secondary_handler;
new->secondary->thread_fn = new->thread_fn;
new->secondary->dev_id = new->dev_id;
new->secondary->irq = new->irq;
new->secondary->name = new->name;
}
/* Deal with the primary handler */
set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
new->thread_fn = new->handler;
new->handler = irq_default_primary_handler;
return 0;
}
static int irq_request_resources(struct irq_desc *desc)
{
struct irq_data *d = &desc->irq_data;
struct irq_chip *c = d->chip;
return c->irq_request_resources ? c->irq_request_resources(d) : 0;
}
static void irq_release_resources(struct irq_desc *desc)
{
struct irq_data *d = &desc->irq_data;
struct irq_chip *c = d->chip;
if (c->irq_release_resources)
c->irq_release_resources(d);
}
static bool irq_supports_nmi(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* Only IRQs directly managed by the root irqchip can be set as NMI */
if (d->parent_data)
return false;
#endif
/* Don't support NMIs for chips behind a slow bus */
if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
return false;
return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
}
static int irq_nmi_setup(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
struct irq_chip *c = d->chip;
return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
}
static void irq_nmi_teardown(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
struct irq_chip *c = d->chip;
if (c->irq_nmi_teardown)
c->irq_nmi_teardown(d);
}
static int
setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
{
struct task_struct *t;
if (!secondary) {
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
new->name);
} else {
t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
new->name);
}
if (IS_ERR(t))
return PTR_ERR(t);
/*
* We keep the reference to the task struct even if
* the thread dies to avoid that the interrupt code
* references an already freed task_struct.
*/
new->thread = get_task_struct(t);
/*
* Tell the thread to set its affinity. This is
* important for shared interrupt handlers as we do
* not invoke setup_affinity() for the secondary
* handlers as everything is already set up. Even for
* interrupts marked with IRQF_NO_BALANCE this is
* correct as we want the thread to move to the cpu(s)
* on which the requesting code placed the interrupt.
*/
set_bit(IRQTF_AFFINITY, &new->thread_flags);
return 0;
}
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*
* Locking rules:
*
* desc->request_mutex Provides serialization against a concurrent free_irq()
* chip_bus_lock Provides serialization for slow bus operations
* desc->lock Provides serialization against hard interrupts
*
* chip_bus_lock and desc->lock are sufficient for all other management and
* interrupt related functions. desc->request_mutex solely serializes
* request/free_irq().
*/
static int
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
struct irqaction *old, **old_ptr;
unsigned long flags, thread_mask = 0;
int ret, nested, shared = 0;
if (!desc)
return -EINVAL;
if (desc->irq_data.chip == &no_irq_chip)
return -ENOSYS;
if (!try_module_get(desc->owner))
return -ENODEV;
new->irq = irq;
/*
* If the trigger type is not specified by the caller,
* then use the default for this interrupt.
*/
if (!(new->flags & IRQF_TRIGGER_MASK))
new->flags |= irqd_get_trigger_type(&desc->irq_data);
/*
* Check whether the interrupt nests into another interrupt
* thread.
*/
nested = irq_settings_is_nested_thread(desc);
if (nested) {
if (!new->thread_fn) {
ret = -EINVAL;
goto out_mput;
}
/*
* Replace the primary handler which was provided from
* the driver for non nested interrupt handling by the
* dummy function which warns when called.
*/
new->handler = irq_nested_primary_handler;
} else {
if (irq_settings_can_thread(desc)) {
ret = irq_setup_forced_threading(new);
if (ret)
goto out_mput;
}
}
/*
* Create a handler thread when a thread function is supplied
* and the interrupt does not nest into another interrupt
* thread.
*/
if (new->thread_fn && !nested) {
ret = setup_irq_thread(new, irq, false);
if (ret)
goto out_mput;
if (new->secondary) {
ret = setup_irq_thread(new->secondary, irq, true);
if (ret)
goto out_thread;
}
}
/*
* Drivers are often written to work w/o knowledge about the
* underlying irq chip implementation, so a request for a
* threaded irq without a primary hard irq context handler
* requires the ONESHOT flag to be set. Some irq chips like
* MSI based interrupts are per se one shot safe. Check the
* chip flags, so we can avoid the unmask dance at the end of
* the threaded handler for those.
*/
if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
new->flags &= ~IRQF_ONESHOT;
/*
* Protects against a concurrent __free_irq() call which might wait
* for synchronize_hardirq() to complete without holding the optional
* chip bus lock and desc->lock. Also protects against handing out
* a recycled oneshot thread_mask bit while it's still in use by
* its previous owner.
*/
mutex_lock(&desc->request_mutex);
/*
* Acquire bus lock as the irq_request_resources() callback below
* might rely on the serialization or the magic power management
* functions which are abusing the irq_bus_lock() callback,
*/
chip_bus_lock(desc);
/* First installed action requests resources. */
if (!desc->action) {
ret = irq_request_resources(desc);
if (ret) {
pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
new->name, irq, desc->irq_data.chip->name);
goto out_bus_unlock;
}
}
/*
* The following block of code has to be executed atomically
* protected against a concurrent interrupt and any of the other
* management calls which are not serialized via
* desc->request_mutex or the optional bus lock.
*/
raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
/*
* Can't share interrupts unless both agree to and are
* the same type (level, edge, polarity). So both flag
* fields must have IRQF_SHARED set and the bits which
* set the trigger type must match. Also all must
* agree on ONESHOT.
* Interrupt lines used for NMIs cannot be shared.
*/
unsigned int oldtype;
if (desc->istate & IRQS_NMI) {
pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
new->name, irq, desc->irq_data.chip->name);
ret = -EINVAL;
goto out_unlock;
}
/*
* If nobody did set the configuration before, inherit
* the one provided by the requester.
*/
if (irqd_trigger_type_was_set(&desc->irq_data)) {
oldtype = irqd_get_trigger_type(&desc->irq_data);
} else {
oldtype = new->flags & IRQF_TRIGGER_MASK;
irqd_set_trigger_type(&desc->irq_data, oldtype);
}
if (!((old->flags & new->flags) & IRQF_SHARED) ||
(oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
((old->flags ^ new->flags) & IRQF_ONESHOT))
goto mismatch;
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQF_PERCPU) !=
(new->flags & IRQF_PERCPU))
goto mismatch;
/* add new interrupt at end of irq queue */
do {
/*
* Or all existing action->thread_mask bits,
* so we can find the next zero bit for this
* new action.
*/
thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
} while (old);
shared = 1;
}
/*
* Setup the thread mask for this irqaction for ONESHOT. For
* !ONESHOT irqs the thread mask is 0 so we can avoid a
* conditional in irq_wake_thread().
*/
if (new->flags & IRQF_ONESHOT) {
/*
* Unlikely to have 32 resp 64 irqs sharing one line,
* but who knows.
*/
if (thread_mask == ~0UL) {
ret = -EBUSY;
goto out_unlock;
}
/*
* The thread_mask for the action is or'ed to
* desc->thread_active to indicate that the
* IRQF_ONESHOT thread handler has been woken, but not
* yet finished. The bit is cleared when a thread
* completes. When all threads of a shared interrupt
* line have completed desc->threads_active becomes
* zero and the interrupt line is unmasked. See
* handle.c:irq_wake_thread() for further information.
*
* If no thread is woken by primary (hard irq context)
* interrupt handlers, then desc->threads_active is
* also checked for zero to unmask the irq line in the
* affected hard irq flow handlers
* (handle_[fasteoi|level]_irq).
*
* The new action gets the first zero bit of
* thread_mask assigned. See the loop above which or's
* all existing action->thread_mask bits.
*/
new->thread_mask = 1UL << ffz(thread_mask);
} else if (new->handler == irq_default_primary_handler &&
!(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
/*
* The interrupt was requested with handler = NULL, so
* we use the default primary handler for it. But it
* does not have the oneshot flag set. In combination
* with level interrupts this is deadly, because the
* default primary handler just wakes the thread, then
* the irq lines is reenabled, but the device still
* has the level irq asserted. Rinse and repeat....
*
* While this works for edge type interrupts, we play
* it safe and reject unconditionally because we can't
* say for sure which type this interrupt really
* has. The type flags are unreliable as the
* underlying chip implementation can override them.
*/
pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
new->name, irq);
ret = -EINVAL;
goto out_unlock;
}
if (!shared) {
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
if (ret)
goto out_unlock;
}
/*
* Activate the interrupt. That activation must happen
* independently of IRQ_NOAUTOEN. request_irq() can fail
* and the callers are supposed to handle
* that. enable_irq() of an interrupt requested with
* IRQ_NOAUTOEN is not supposed to fail. The activation
* keeps it in shutdown mode, it merily associates
* resources if necessary and if that's not possible it
* fails. Interrupts which are in managed shutdown mode
* will simply ignore that activation request.
*/
ret = irq_activate(desc);
if (ret)
goto out_unlock;
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
IRQS_ONESHOT | IRQS_WAITING);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
if (new->flags & IRQF_PERCPU) {
irqd_set(&desc->irq_data, IRQD_PER_CPU);
irq_settings_set_per_cpu(desc);
if (new->flags & IRQF_NO_DEBUG)
irq_settings_set_no_debug(desc);
}
if (noirqdebug)
irq_settings_set_no_debug(desc);
if (new->flags & IRQF_ONESHOT)
desc->istate |= IRQS_ONESHOT;
/* Exclude IRQ from balancing if requested */
if (new->flags & IRQF_NOBALANCING) {
irq_settings_set_no_balancing(desc);
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
if (!(new->flags & IRQF_NO_AUTOEN) &&
irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
} else {
/*
* Shared interrupts do not go well with disabling
* auto enable. The sharing interrupt might request
* it while it's still disabled and then wait for
* interrupts forever.
*/
WARN_ON_ONCE(new->flags & IRQF_SHARED);
/* Undo nested disables: */
desc->depth = 1;
}
} else if (new->flags & IRQF_TRIGGER_MASK) {
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
if (nmsk != omsk)
/* hope the handler works with current trigger mode */
pr_warn("irq %d uses trigger mode %u; requested %u\n",
irq, omsk, nmsk);
}
*old_ptr = new;
irq_pm_install_action(desc, new);
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
desc->irqs_unhandled = 0;
/*
* Check whether we disabled the irq via the spurious handler
* before. Reenable it and give it another chance.
*/
if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
desc->istate &= ~IRQS_SPURIOUS_DISABLED;
__enable_irq(desc);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
mutex_unlock(&desc->request_mutex);
irq_setup_timings(desc, new);
wake_up_and_wait_for_irq_thread_ready(desc, new);
wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
register_irq_proc(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);
return 0;
mismatch:
if (!(new->flags & IRQF_PROBE_SHARED)) {
pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
irq, new->flags, new->name, old->flags, old->name);
#ifdef CONFIG_DEBUG_SHIRQ
dump_stack();
#endif
}
ret = -EBUSY;
out_unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (!desc->action)
irq_release_resources(desc);
out_bus_unlock:
chip_bus_sync_unlock(desc);
mutex_unlock(&desc->request_mutex);
out_thread:
if (new->thread) {
struct task_struct *t = new->thread;
new->thread = NULL;
kthread_stop(t);
put_task_struct(t);
}
if (new->secondary && new->secondary->thread) {
struct task_struct *t = new->secondary->thread;
new->secondary->thread = NULL;
kthread_stop(t);
put_task_struct(t);
}
out_mput:
module_put(desc->owner);
return ret;
}
/*
* Internal function to unregister an irqaction - used to free
* regular and special interrupts that are part of the architecture.
*/
static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
{
unsigned irq = desc->irq_data.irq;
struct irqaction *action, **action_ptr;
unsigned long flags;
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
mutex_lock(&desc->request_mutex);
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
* one based on the dev_id:
*/
action_ptr = &desc->action;
for (;;) {
action = *action_ptr;
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
mutex_unlock(&desc->request_mutex);
return NULL;
}
if (action->dev_id == dev_id)
break;
action_ptr = &action->next;
}
/* Found it - now remove it from the list of entries: */
*action_ptr = action->next;
irq_pm_remove_action(desc, action);
/* If this was the last handler, shut down the IRQ line: */
if (!desc->action) {
irq_settings_clr_disable_unlazy(desc);
/* Only shutdown. Deactivate after synchronize_hardirq() */
irq_shutdown(desc);
}
#ifdef CONFIG_SMP
/* make sure affinity_hint is cleaned up */
if (WARN_ON_ONCE(desc->affinity_hint))
desc->affinity_hint = NULL;
#endif
raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
* Drop bus_lock here so the changes which were done in the chip
* callbacks above are synced out to the irq chips which hang
* behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
*
* Aside of that the bus_lock can also be taken from the threaded
* handler in irq_finalize_oneshot() which results in a deadlock
* because kthread_stop() would wait forever for the thread to
* complete, which is blocked on the bus lock.
*
* The still held desc->request_mutex() protects against a
* concurrent request_irq() of this irq so the release of resources
* and timing data is properly serialized.
*/
chip_bus_sync_unlock(desc);
unregister_handler_proc(irq, action);
/*
* Make sure it's not being used on another CPU and if the chip
* supports it also make sure that there is no (not yet serviced)
* interrupt in flight at the hardware level.
*/
__synchronize_irq(desc);
#ifdef CONFIG_DEBUG_SHIRQ
/*
* It's a shared IRQ -- the driver ought to be prepared for an IRQ
* event to happen even now it's being freed, so let's make sure that
* is so by doing an extra call to the handler ....
*
* ( We do this after actually deregistering it, to make sure that a
* 'real' IRQ doesn't run in parallel with our fake. )
*/
if (action->flags & IRQF_SHARED) {
local_irq_save(flags);
action->handler(irq, dev_id);
local_irq_restore(flags);
}
#endif
/*
* The action has already been removed above, but the thread writes
* its oneshot mask bit when it completes. Though request_mutex is
* held across this which prevents __setup_irq() from handing out
* the same bit to a newly requested action.
*/
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
if (action->secondary && action->secondary->thread) {
kthread_stop(action->secondary->thread);
put_task_struct(action->secondary->thread);
}
}
/* Last action releases resources */
if (!desc->action) {
/*
* Reacquire bus lock as irq_release_resources() might
* require it to deallocate resources over the slow bus.
*/
chip_bus_lock(desc);
/*
* There is no interrupt on the fly anymore. Deactivate it
* completely.
*/
raw_spin_lock_irqsave(&desc->lock, flags);
irq_domain_deactivate_irq(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
irq_release_resources(desc);
chip_bus_sync_unlock(desc);
irq_remove_timings(desc);
}
mutex_unlock(&desc->request_mutex);
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner);
kfree(action->secondary);
return action;
}
/**
* free_irq - free an interrupt allocated with request_irq
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Remove an interrupt handler. The handler is removed and if the
* interrupt line is no longer in use by any driver it is disabled.
* On a shared IRQ the caller must ensure the interrupt is disabled
* on the card it drives before calling this function. The function
* does not return until any executing interrupts for this IRQ
* have completed.
*
* This function must not be called from interrupt context.
*
* Returns the devname argument passed to request_irq.
*/
const void *free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
const char *devname;
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return NULL;
#ifdef CONFIG_SMP
if (WARN_ON(desc->affinity_notify))
desc->affinity_notify = NULL;
#endif
action = __free_irq(desc, dev_id);
if (!action)
return NULL;
devname = action->name;
kfree(action);
return devname;
}
EXPORT_SYMBOL(free_irq);
/* This function must be called with desc->lock held */
static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
{
const char *devname = NULL;
desc->istate &= ~IRQS_NMI;
if (!WARN_ON(desc->action == NULL)) {
irq_pm_remove_action(desc, desc->action);
devname = desc->action->name;
unregister_handler_proc(irq, desc->action);
kfree(desc->action);
desc->action = NULL;
}
irq_settings_clr_disable_unlazy(desc);
irq_shutdown_and_deactivate(desc);
irq_release_resources(desc);
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner);
return devname;
}
const void *free_nmi(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
const void *devname;
if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
return NULL;
if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return NULL;
/* NMI still enabled */
if (WARN_ON(desc->depth == 0))
disable_nmi_nosync(irq);
raw_spin_lock_irqsave(&desc->lock, flags);
irq_nmi_teardown(desc);
devname = __cleanup_nmi(irq, desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return devname;
}
/**
* request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts.
* If handler is NULL and thread_fn != NULL
* the default primary handler is installed.
* @thread_fn: Function called from the irq handler thread
* If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. From the point this
* call is made your handler function may be invoked. Since
* your handler function must clear any interrupt the board
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
* If you want to set up a threaded irq handler for your device
* then you need to supply @handler and @thread_fn. @handler is
* still called in hard interrupt context and has to check
* whether the interrupt originates from the device. If yes it
* needs to disable the interrupt on the device and return
* IRQ_WAKE_THREAD which will wake up the handler thread and run
* @thread_fn. This split handler design is necessary to support
* shared interrupts.
*
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
*
* If your interrupt is shared you must pass a non NULL dev_id
* as this is required when freeing the interrupt.
*
* Flags:
*
* IRQF_SHARED Interrupt is shared
* IRQF_TRIGGER_* Specify active edge(s) or level
* IRQF_ONESHOT Run thread_fn with interrupt line masked
*/
int request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn, unsigned long irqflags,
const char *devname, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
int retval;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
/*
* Sanity-check: shared interrupts must pass in a real dev-ID,
* otherwise we'll have trouble later trying to figure out
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*
* Also shared interrupts do not go well with disabling auto enable.
* The sharing interrupt might request it while it's still disabled
* and then wait for interrupts forever.
*
* Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
* it cannot be set along with IRQF_NO_SUSPEND.
*/
if (((irqflags & IRQF_SHARED) && !dev_id) ||
((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
(!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
return -EINVAL;
desc = irq_to_desc(irq);
if (!desc)
return -EINVAL;
if (!irq_settings_can_request(desc) ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
if (!handler) {
if (!thread_fn)
return -EINVAL;
handler = irq_default_primary_handler;
}
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->thread_fn = thread_fn;
action->flags = irqflags;
action->name = devname;
action->dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0) {
kfree(action);
return retval;
}
retval = __setup_irq(irq, desc, action);
if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action->secondary);
kfree(action);
}
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
* We disable the irq to make sure that a 'real' IRQ doesn't
* run in parallel with our fake.
*/
unsigned long flags;
disable_irq(irq);
local_irq_save(flags);
handler(irq, dev_id);
local_irq_restore(flags);
enable_irq(irq);
}
#endif
return retval;
}
EXPORT_SYMBOL(request_threaded_irq);
/**
* request_any_context_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Threaded handler for threaded interrupts.
* @flags: Interrupt type flags
* @name: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. It selects either a
* hardirq or threaded handling method depending on the
* context.
*
* On failure, it returns a negative value. On success,
* it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
*/
int request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id)
{
struct irq_desc *desc;
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
desc = irq_to_desc(irq);
if (!desc)
return -EINVAL;
if (irq_settings_is_nested_thread(desc)) {
ret = request_threaded_irq(irq, NULL, handler,
flags, name, dev_id);
return !ret ? IRQC_IS_NESTED : ret;
}
ret = request_irq(irq, handler, flags, name, dev_id);
return !ret ? IRQC_IS_HARDIRQ : ret;
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
/**
* request_nmi - allocate an interrupt line for NMI delivery
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Threaded handler for threaded interrupts.
* @irqflags: Interrupt type flags
* @name: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. It sets up the IRQ line
* to be handled as an NMI.
*
* An interrupt line delivering NMIs cannot be shared and IRQ handling
* cannot be threaded.
*
* Interrupt lines requested for NMI delivering must produce per cpu
* interrupts and have auto enabling setting disabled.
*
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
*
* If the interrupt line cannot be used to deliver NMIs, function
* will fail and return a negative value.
*/
int request_nmi(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *name, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
unsigned long flags;
int retval;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
/* NMI cannot be shared, used for Polling */
if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
return -EINVAL;
if (!(irqflags & IRQF_PERCPU))
return -EINVAL;
if (!handler)
return -EINVAL;
desc = irq_to_desc(irq);
if (!desc || (irq_settings_can_autoenable(desc) &&
!(irqflags & IRQF_NO_AUTOEN)) ||
!irq_settings_can_request(desc) ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
!irq_supports_nmi(desc))
return -EINVAL;
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
action->name = name;
action->dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
goto err_out;
retval = __setup_irq(irq, desc, action);
if (retval)
goto err_irq_setup;
raw_spin_lock_irqsave(&desc->lock, flags);
/* Setup NMI state */
desc->istate |= IRQS_NMI;
retval = irq_nmi_setup(desc);
if (retval) {
__cleanup_nmi(irq, desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return -EINVAL;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
err_irq_setup:
irq_chip_pm_put(&desc->irq_data);
err_out:
kfree(action);
return retval;
}
void enable_percpu_irq(unsigned int irq, unsigned int type)
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
if (!desc)
return;
/*
* If the trigger type is not specified by the caller, then
* use the default for this interrupt.
*/
type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
type = irqd_get_trigger_type(&desc->irq_data);
if (type != IRQ_TYPE_NONE) {
int ret;
ret = __irq_set_trigger(desc, type);
if (ret) {
WARN(1, "failed to set type for IRQ%d\n", irq);
goto out;
}
}
irq_percpu_enable(desc, cpu);
out:
irq_put_desc_unlock(desc, flags);
}
EXPORT_SYMBOL_GPL(enable_percpu_irq);
void enable_percpu_nmi(unsigned int irq, unsigned int type)
{
enable_percpu_irq(irq, type);
}
/**
* irq_percpu_is_enabled - Check whether the per cpu irq is enabled
* @irq: Linux irq number to check for
*
* Must be called from a non migratable context. Returns the enable
* state of a per cpu interrupt on the current cpu.
*/
bool irq_percpu_is_enabled(unsigned int irq)
{
unsigned int cpu = smp_processor_id();
struct irq_desc *desc;
unsigned long flags;
bool is_enabled;
desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
if (!desc)
return false;
is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
irq_put_desc_unlock(desc, flags);
return is_enabled;
}
EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
void disable_percpu_irq(unsigned int irq)
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
if (!desc)
return;
irq_percpu_disable(desc, cpu);
irq_put_desc_unlock(desc, flags);
}
EXPORT_SYMBOL_GPL(disable_percpu_irq);
void disable_percpu_nmi(unsigned int irq)
{
disable_percpu_irq(irq);
}
/*
* Internal function to unregister a percpu irqaction.
*/
static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
if (!desc)
return NULL;
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action || action->percpu_dev_id != dev_id) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
goto bad;
}
if (!cpumask_empty(desc->percpu_enabled)) {
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
irq, cpumask_first(desc->percpu_enabled));
goto bad;
}
/* Found it - now remove it from the list of entries: */
desc->action = NULL;
desc->istate &= ~IRQS_NMI;
raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner);
return action;
bad:
raw_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
/**
* remove_percpu_irq - free a per-cpu interrupt
* @irq: Interrupt line to free
* @act: irqaction for the interrupt
*
* Used to remove interrupts statically setup by the early boot process.
*/
void remove_percpu_irq(unsigned int irq, struct irqaction *act)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc && irq_settings_is_per_cpu_devid(desc))
__free_percpu_irq(irq, act->percpu_dev_id);
}
/**
* free_percpu_irq - free an interrupt allocated with request_percpu_irq
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Remove a percpu interrupt handler. The handler is removed, but
* the interrupt line is not disabled. This must be done on each
* CPU before calling this function. The function does not return
* until any executing interrupts for this IRQ have completed.
*
* This function must not be called from interrupt context.
*/
void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc || !irq_settings_is_per_cpu_devid(desc))
return;
chip_bus_lock(desc);
kfree(__free_percpu_irq(irq, dev_id));
chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL_GPL(free_percpu_irq);
void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc || !irq_settings_is_per_cpu_devid(desc))
return;
if (WARN_ON(!(desc->istate & IRQS_NMI)))
return;
kfree(__free_percpu_irq(irq, dev_id));
}
/**
* setup_percpu_irq - setup a per-cpu interrupt
* @irq: Interrupt line to setup
* @act: irqaction for the interrupt
*
* Used to statically setup per-cpu interrupts in the early boot process.
*/
int setup_percpu_irq(unsigned int irq, struct irqaction *act)
{
struct irq_desc *desc = irq_to_desc(irq);
int retval;
if (!desc || !irq_settings_is_per_cpu_devid(desc))
return -EINVAL;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
retval = __setup_irq(irq, desc, act);
if (retval)
irq_chip_pm_put(&desc->irq_data);
return retval;
}
/**
* __request_percpu_irq - allocate a percpu interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @flags: Interrupt type flags (IRQF_TIMER only)
* @devname: An ascii name for the claiming device
* @dev_id: A percpu cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt on the local CPU. If the interrupt is supposed to be
* enabled on other CPUs, it has to be done on each CPU using
* enable_percpu_irq().
*
* Dev_id must be globally unique. It is a per-cpu variable, and
* the handler gets called with the interrupted CPU's instance of
* that variable.
*/
int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
int retval;
if (!dev_id)
return -EINVAL;
desc = irq_to_desc(irq);
if (!desc || !irq_settings_can_request(desc) ||
!irq_settings_is_per_cpu_devid(desc))
return -EINVAL;
if (flags && flags != IRQF_TIMER)
return -EINVAL;
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
action->name = devname;
action->percpu_dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0) {
kfree(action);
return retval;
}
retval = __setup_irq(irq, desc, action);
if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action);
}
return retval;
}
EXPORT_SYMBOL_GPL(__request_percpu_irq);
/**
* request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @name: An ascii name for the claiming device
* @dev_id: A percpu cookie passed back to the handler function
*
* This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
* have to be setup on each CPU by calling prepare_percpu_nmi() before
* being enabled on the same CPU by using enable_percpu_nmi().
*
* Dev_id must be globally unique. It is a per-cpu variable, and
* the handler gets called with the interrupted CPU's instance of
* that variable.
*
* Interrupt lines requested for NMI delivering should have auto enabling
* setting disabled.
*
* If the interrupt line cannot be used to deliver NMIs, function
* will fail returning a negative value.
*/
int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
const char *name, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
unsigned long flags;
int retval;
if (!handler)
return -EINVAL;
desc = irq_to_desc(irq);
if (!desc || !irq_settings_can_request(desc) ||
!irq_settings_is_per_cpu_devid(desc) ||
irq_settings_can_autoenable(desc) ||
!irq_supports_nmi(desc))
return -EINVAL;
/* The line cannot already be NMI */
if (desc->istate & IRQS_NMI)
return -EINVAL;
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
| IRQF_NOBALANCING;
action->name = name;
action->percpu_dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
goto err_out;
retval = __setup_irq(irq, desc, action);
if (retval)
goto err_irq_setup;
raw_spin_lock_irqsave(&desc->lock, flags);
desc->istate |= IRQS_NMI;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
err_irq_setup:
irq_chip_pm_put(&desc->irq_data);
err_out:
kfree(action);
return retval;
}
/**
* prepare_percpu_nmi - performs CPU local setup for NMI delivery
* @irq: Interrupt line to prepare for NMI delivery
*
* This call prepares an interrupt line to deliver NMI on the current CPU,
* before that interrupt line gets enabled with enable_percpu_nmi().
*
* As a CPU local operation, this should be called from non-preemptible
* context.
*
* If the interrupt line cannot be used to deliver NMIs, function
* will fail returning a negative value.
*/
int prepare_percpu_nmi(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc;
int ret = 0;
WARN_ON(preemptible());
desc = irq_get_desc_lock(irq, &flags,
IRQ_GET_DESC_CHECK_PERCPU);
if (!desc)
return -EINVAL;
if (WARN(!(desc->istate & IRQS_NMI),
KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
irq)) {
ret = -EINVAL;
goto out;
}
ret = irq_nmi_setup(desc);
if (ret) {
pr_err("Failed to setup NMI delivery: irq %u\n", irq);
goto out;
}
out:
irq_put_desc_unlock(desc, flags);
return ret;
}
/**
* teardown_percpu_nmi - undoes NMI setup of IRQ line
* @irq: Interrupt line from which CPU local NMI configuration should be
* removed
*
* This call undoes the setup done by prepare_percpu_nmi().
*
* IRQ line should not be enabled for the current CPU.
*
* As a CPU local operation, this should be called from non-preemptible
* context.
*/
void teardown_percpu_nmi(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc;
WARN_ON(preemptible());
desc = irq_get_desc_lock(irq, &flags,
IRQ_GET_DESC_CHECK_PERCPU);
if (!desc)
return;
if (WARN_ON(!(desc->istate & IRQS_NMI)))
goto out;
irq_nmi_teardown(desc);
out:
irq_put_desc_unlock(desc, flags);
}
int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
bool *state)
{
struct irq_chip *chip;
int err = -EINVAL;
do {
chip = irq_data_get_irq_chip(data);
if (WARN_ON_ONCE(!chip))
return -ENODEV;
if (chip->irq_get_irqchip_state)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
err = chip->irq_get_irqchip_state(data, which, state);
return err;
}
/**
* irq_get_irqchip_state - returns the irqchip state of a interrupt.
* @irq: Interrupt line that is forwarded to a VM
* @which: One of IRQCHIP_STATE_* the caller wants to know about
* @state: a pointer to a boolean where the state is to be stored
*
* This call snapshots the internal irqchip state of an
* interrupt, returning into @state the bit corresponding to
* stage @which
*
* This function should be called with preemption disabled if the
* interrupt controller has per-cpu registers.
*/
int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool *state)
{
struct irq_desc *desc;
struct irq_data *data;
unsigned long flags;
int err = -EINVAL;
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return err;
data = irq_desc_get_irq_data(desc);
err = __irq_get_irqchip_state(data, which, state);
irq_put_desc_busunlock(desc, flags);
return err;
}
EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
/**
* irq_set_irqchip_state - set the state of a forwarded interrupt.
* @irq: Interrupt line that is forwarded to a VM
* @which: State to be restored (one of IRQCHIP_STATE_*)
* @val: Value corresponding to @which
*
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
* This function should be called with migration disabled if the
* interrupt controller has per-cpu registers.
*/
int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool val)
{
struct irq_desc *desc;
struct irq_data *data;
struct irq_chip *chip;
unsigned long flags;
int err = -EINVAL;
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return err;
data = irq_desc_get_irq_data(desc);
do {
chip = irq_data_get_irq_chip(data);
if (WARN_ON_ONCE(!chip)) {
err = -ENODEV;
goto out_unlock;
}
if (chip->irq_set_irqchip_state)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
err = chip->irq_set_irqchip_state(data, which, val);
out_unlock:
irq_put_desc_busunlock(desc, flags);
return err;
}
EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
/**
* irq_has_action - Check whether an interrupt is requested
* @irq: The linux irq number
*
* Returns: A snapshot of the current state
*/
bool irq_has_action(unsigned int irq)
{
bool res;
rcu_read_lock();
res = irq_desc_has_action(irq_to_desc(irq));
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(irq_has_action);
/**
* irq_check_status_bit - Check whether bits in the irq descriptor status are set
* @irq: The linux irq number
* @bitmask: The bitmask to evaluate
*
* Returns: True if one of the bits in @bitmask is set
*/
bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
{
struct irq_desc *desc;
bool res = false;
rcu_read_lock();
desc = irq_to_desc(irq);
if (desc)
res = !!(desc->status_use_accessors & bitmask);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(irq_check_status_bit);
| linux-master | kernel/irq/manage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.
*
* This file contains power management functions related to interrupts.
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include "internals.h"
bool irq_pm_check_wakeup(struct irq_desc *desc)
{
if (irqd_is_wakeup_armed(&desc->irq_data)) {
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
desc->depth++;
irq_disable(desc);
pm_system_irq_wakeup(irq_desc_get_irq(desc));
return true;
}
return false;
}
/*
* Called from __setup_irq() with desc->lock held after @action has
* been installed in the action chain.
*/
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
{
desc->nr_actions++;
if (action->flags & IRQF_FORCE_RESUME)
desc->force_resume_depth++;
WARN_ON_ONCE(desc->force_resume_depth &&
desc->force_resume_depth != desc->nr_actions);
if (action->flags & IRQF_NO_SUSPEND)
desc->no_suspend_depth++;
else if (action->flags & IRQF_COND_SUSPEND)
desc->cond_suspend_depth++;
WARN_ON_ONCE(desc->no_suspend_depth &&
(desc->no_suspend_depth +
desc->cond_suspend_depth) != desc->nr_actions);
}
/*
* Called from __free_irq() with desc->lock held after @action has
* been removed from the action chain.
*/
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
{
desc->nr_actions--;
if (action->flags & IRQF_FORCE_RESUME)
desc->force_resume_depth--;
if (action->flags & IRQF_NO_SUSPEND)
desc->no_suspend_depth--;
else if (action->flags & IRQF_COND_SUSPEND)
desc->cond_suspend_depth--;
}
static bool suspend_device_irq(struct irq_desc *desc)
{
unsigned long chipflags = irq_desc_get_chip(desc)->flags;
struct irq_data *irqd = &desc->irq_data;
if (!desc->action || irq_desc_is_chained(desc) ||
desc->no_suspend_depth)
return false;
if (irqd_is_wakeup_set(irqd)) {
irqd_set(irqd, IRQD_WAKEUP_ARMED);
if ((chipflags & IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND) &&
irqd_irq_disabled(irqd)) {
/*
* Interrupt marked for wakeup is in disabled state.
* Enable interrupt here to unmask/enable in irqchip
* to be able to resume with such interrupts.
*/
__enable_irq(desc);
irqd_set(irqd, IRQD_IRQ_ENABLED_ON_SUSPEND);
}
/*
* We return true here to force the caller to issue
* synchronize_irq(). We need to make sure that the
* IRQD_WAKEUP_ARMED is visible before we return from
* suspend_device_irqs().
*/
return true;
}
desc->istate |= IRQS_SUSPENDED;
__disable_irq(desc);
/*
* Hardware which has no wakeup source configuration facility
* requires that the non wakeup interrupts are masked at the
* chip level. The chip implementation indicates that with
* IRQCHIP_MASK_ON_SUSPEND.
*/
if (chipflags & IRQCHIP_MASK_ON_SUSPEND)
mask_irq(desc);
return true;
}
/**
* suspend_device_irqs - disable all currently enabled interrupt lines
*
* During system-wide suspend or hibernation device drivers need to be
* prevented from receiving interrupts and this function is provided
* for this purpose.
*
* So we disable all interrupts and mark them IRQS_SUSPENDED except
* for those which are unused, those which are marked as not
* suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
* set and those which are marked as active wakeup sources.
*
* The active wakeup sources are handled by the flow handler entry
* code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
* interrupt and notifies the pm core about the wakeup.
*/
void suspend_device_irqs(void)
{
struct irq_desc *desc;
int irq;
for_each_irq_desc(irq, desc) {
unsigned long flags;
bool sync;
if (irq_settings_is_nested_thread(desc))
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
sync = suspend_device_irq(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (sync)
synchronize_irq(irq);
}
}
static void resume_irq(struct irq_desc *desc)
{
struct irq_data *irqd = &desc->irq_data;
irqd_clear(irqd, IRQD_WAKEUP_ARMED);
if (irqd_is_enabled_on_suspend(irqd)) {
/*
* Interrupt marked for wakeup was enabled during suspend
* entry. Disable such interrupts to restore them back to
* original state.
*/
__disable_irq(desc);
irqd_clear(irqd, IRQD_IRQ_ENABLED_ON_SUSPEND);
}
if (desc->istate & IRQS_SUSPENDED)
goto resume;
/* Force resume the interrupt? */
if (!desc->force_resume_depth)
return;
/* Pretend that it got disabled ! */
desc->depth++;
irq_state_set_disabled(desc);
irq_state_set_masked(desc);
resume:
desc->istate &= ~IRQS_SUSPENDED;
__enable_irq(desc);
}
static void resume_irqs(bool want_early)
{
struct irq_desc *desc;
int irq;
for_each_irq_desc(irq, desc) {
unsigned long flags;
bool is_early = desc->action &&
desc->action->flags & IRQF_EARLY_RESUME;
if (!is_early && want_early)
continue;
if (irq_settings_is_nested_thread(desc))
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
resume_irq(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
/**
* rearm_wake_irq - rearm a wakeup interrupt line after signaling wakeup
* @irq: Interrupt to rearm
*/
void rearm_wake_irq(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return;
if (!(desc->istate & IRQS_SUSPENDED) ||
!irqd_is_wakeup_set(&desc->irq_data))
goto unlock;
desc->istate &= ~IRQS_SUSPENDED;
irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
__enable_irq(desc);
unlock:
irq_put_desc_busunlock(desc, flags);
}
/**
* irq_pm_syscore_resume - enable interrupt lines early
*
* Enable all interrupt lines with %IRQF_EARLY_RESUME set.
*/
static void irq_pm_syscore_resume(void)
{
resume_irqs(true);
}
static struct syscore_ops irq_pm_syscore_ops = {
.resume = irq_pm_syscore_resume,
};
static int __init irq_pm_init_ops(void)
{
register_syscore_ops(&irq_pm_syscore_ops);
return 0;
}
device_initcall(irq_pm_init_ops);
/**
* resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
*
* Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
* disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
* set as well as those with %IRQF_FORCE_RESUME.
*/
void resume_device_irqs(void)
{
resume_irqs(false);
}
| linux-master | kernel/irq/pm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017-2018 Bartosz Golaszewski <[email protected]>
* Copyright (C) 2020 Bartosz Golaszewski <[email protected]>
*/
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/irq_work.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
struct irq_sim_work_ctx {
struct irq_work work;
int irq_base;
unsigned int irq_count;
unsigned long *pending;
struct irq_domain *domain;
};
struct irq_sim_irq_ctx {
int irqnum;
bool enabled;
struct irq_sim_work_ctx *work_ctx;
};
static void irq_sim_irqmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = false;
}
static void irq_sim_irqunmask(struct irq_data *data)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_ctx->enabled = true;
}
static int irq_sim_set_type(struct irq_data *data, unsigned int type)
{
/* We only support rising and falling edge trigger types. */
if (type & ~IRQ_TYPE_EDGE_BOTH)
return -EINVAL;
irqd_set_trigger_type(data, type);
return 0;
}
static int irq_sim_get_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which, bool *state)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_hw_number_t hwirq = irqd_to_hwirq(data);
switch (which) {
case IRQCHIP_STATE_PENDING:
if (irq_ctx->enabled)
*state = test_bit(hwirq, irq_ctx->work_ctx->pending);
break;
default:
return -EINVAL;
}
return 0;
}
static int irq_sim_set_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which, bool state)
{
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
irq_hw_number_t hwirq = irqd_to_hwirq(data);
switch (which) {
case IRQCHIP_STATE_PENDING:
if (irq_ctx->enabled) {
assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
if (state)
irq_work_queue(&irq_ctx->work_ctx->work);
}
break;
default:
return -EINVAL;
}
return 0;
}
static struct irq_chip irq_sim_irqchip = {
.name = "irq_sim",
.irq_mask = irq_sim_irqmask,
.irq_unmask = irq_sim_irqunmask,
.irq_set_type = irq_sim_set_type,
.irq_get_irqchip_state = irq_sim_get_irqchip_state,
.irq_set_irqchip_state = irq_sim_set_irqchip_state,
};
static void irq_sim_handle_irq(struct irq_work *work)
{
struct irq_sim_work_ctx *work_ctx;
unsigned int offset = 0;
int irqnum;
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
offset = find_next_bit(work_ctx->pending,
work_ctx->irq_count, offset);
clear_bit(offset, work_ctx->pending);
irqnum = irq_find_mapping(work_ctx->domain, offset);
handle_simple_irq(irq_to_desc(irqnum));
}
}
static int irq_sim_domain_map(struct irq_domain *domain,
unsigned int virq, irq_hw_number_t hw)
{
struct irq_sim_work_ctx *work_ctx = domain->host_data;
struct irq_sim_irq_ctx *irq_ctx;
irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
if (!irq_ctx)
return -ENOMEM;
irq_set_chip(virq, &irq_sim_irqchip);
irq_set_chip_data(virq, irq_ctx);
irq_set_handler(virq, handle_simple_irq);
irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
irq_ctx->work_ctx = work_ctx;
return 0;
}
static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
{
struct irq_sim_irq_ctx *irq_ctx;
struct irq_data *irqd;
irqd = irq_domain_get_irq_data(domain, virq);
irq_ctx = irq_data_get_irq_chip_data(irqd);
irq_set_handler(virq, NULL);
irq_domain_reset_irq_data(irqd);
kfree(irq_ctx);
}
static const struct irq_domain_ops irq_sim_domain_ops = {
.map = irq_sim_domain_map,
.unmap = irq_sim_domain_unmap,
};
/**
* irq_domain_create_sim - Create a new interrupt simulator irq_domain and
* allocate a range of dummy interrupts.
*
* @fwnode: struct fwnode_handle to be associated with this domain.
* @num_irqs: Number of interrupts to allocate.
*
* On success: return a new irq_domain object.
* On failure: a negative errno wrapped with ERR_PTR().
*/
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
unsigned int num_irqs)
{
struct irq_sim_work_ctx *work_ctx;
work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
if (!work_ctx)
goto err_out;
work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
if (!work_ctx->pending)
goto err_free_work_ctx;
work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
&irq_sim_domain_ops,
work_ctx);
if (!work_ctx->domain)
goto err_free_bitmap;
work_ctx->irq_count = num_irqs;
work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
return work_ctx->domain;
err_free_bitmap:
bitmap_free(work_ctx->pending);
err_free_work_ctx:
kfree(work_ctx);
err_out:
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(irq_domain_create_sim);
/**
* irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
* the interrupt descriptors and allocated memory.
*
* @domain: The interrupt simulator domain to tear down.
*/
void irq_domain_remove_sim(struct irq_domain *domain)
{
struct irq_sim_work_ctx *work_ctx = domain->host_data;
irq_work_sync(&work_ctx->work);
bitmap_free(work_ctx->pending);
kfree(work_ctx);
irq_domain_remove(domain);
}
EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
static void devm_irq_domain_remove_sim(void *data)
{
struct irq_domain *domain = data;
irq_domain_remove_sim(domain);
}
/**
* devm_irq_domain_create_sim - Create a new interrupt simulator for
* a managed device.
*
* @dev: Device to initialize the simulator object for.
* @fwnode: struct fwnode_handle to be associated with this domain.
* @num_irqs: Number of interrupts to allocate
*
* On success: return a new irq_domain object.
* On failure: a negative errno wrapped with ERR_PTR().
*/
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
struct fwnode_handle *fwnode,
unsigned int num_irqs)
{
struct irq_domain *domain;
int ret;
domain = irq_domain_create_sim(fwnode, num_irqs);
if (IS_ERR(domain))
return domain;
ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
if (ret)
return ERR_PTR(ret);
return domain;
}
EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
| linux-master | kernel/irq/irq_sim.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner
*
* This file contains the IRQ-resend code
*
* If the interrupt is waiting to be processed, we try to re-run it.
* We can't directly run it from here since the caller might be in an
* interrupt-protected region. Not all irq controller chips can
* retrigger interrupts at the hardware level, so in those cases
* we allow the resending of IRQs via a tasklet.
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include "internals.h"
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* hlist_head to handle software resend of interrupts: */
static HLIST_HEAD(irq_resend_list);
static DEFINE_RAW_SPINLOCK(irq_resend_lock);
/*
* Run software resends of IRQ's
*/
static void resend_irqs(struct tasklet_struct *unused)
{
struct irq_desc *desc;
raw_spin_lock_irq(&irq_resend_lock);
while (!hlist_empty(&irq_resend_list)) {
desc = hlist_entry(irq_resend_list.first, struct irq_desc,
resend_node);
hlist_del_init(&desc->resend_node);
raw_spin_unlock(&irq_resend_lock);
desc->handle_irq(desc);
raw_spin_lock(&irq_resend_lock);
}
raw_spin_unlock_irq(&irq_resend_lock);
}
/* Tasklet to handle resend: */
static DECLARE_TASKLET(resend_tasklet, resend_irqs);
static int irq_sw_resend(struct irq_desc *desc)
{
/*
* Validate whether this interrupt can be safely injected from
* non interrupt context
*/
if (handle_enforce_irqctx(&desc->irq_data))
return -EINVAL;
/*
* If the interrupt is running in the thread context of the parent
* irq we need to be careful, because we cannot trigger it
* directly.
*/
if (irq_settings_is_nested_thread(desc)) {
/*
* If the parent_irq is valid, we retrigger the parent,
* otherwise we do nothing.
*/
if (!desc->parent_irq)
return -EINVAL;
desc = irq_to_desc(desc->parent_irq);
if (!desc)
return -EINVAL;
}
/* Add to resend_list and activate the softirq: */
raw_spin_lock(&irq_resend_lock);
if (hlist_unhashed(&desc->resend_node))
hlist_add_head(&desc->resend_node, &irq_resend_list);
raw_spin_unlock(&irq_resend_lock);
tasklet_schedule(&resend_tasklet);
return 0;
}
void clear_irq_resend(struct irq_desc *desc)
{
raw_spin_lock(&irq_resend_lock);
hlist_del_init(&desc->resend_node);
raw_spin_unlock(&irq_resend_lock);
}
void irq_resend_init(struct irq_desc *desc)
{
INIT_HLIST_NODE(&desc->resend_node);
}
#else
void clear_irq_resend(struct irq_desc *desc) {}
void irq_resend_init(struct irq_desc *desc) {}
static int irq_sw_resend(struct irq_desc *desc)
{
return -EINVAL;
}
#endif
static int try_retrigger(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_retrigger)
return desc->irq_data.chip->irq_retrigger(&desc->irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
return irq_chip_retrigger_hierarchy(&desc->irq_data);
#else
return 0;
#endif
}
/*
* IRQ resend
*
* Is called with interrupts disabled and desc->lock held.
*/
int check_irq_resend(struct irq_desc *desc, bool inject)
{
int err = 0;
/*
* We do not resend level type interrupts. Level type interrupts
* are resent by hardware when they are still active. Clear the
* pending bit so suspend/resume does not get confused.
*/
if (irq_settings_is_level(desc)) {
desc->istate &= ~IRQS_PENDING;
return -EINVAL;
}
if (desc->istate & IRQS_REPLAY)
return -EBUSY;
if (!(desc->istate & IRQS_PENDING) && !inject)
return 0;
desc->istate &= ~IRQS_PENDING;
if (!try_retrigger(desc))
err = irq_sw_resend(desc);
/* If the retrigger was successful, mark it with the REPLAY bit */
if (!err)
desc->istate |= IRQS_REPLAY;
return err;
}
#ifdef CONFIG_GENERIC_IRQ_INJECTION
/**
* irq_inject_interrupt - Inject an interrupt for testing/error injection
* @irq: The interrupt number
*
* This function must only be used for debug and testing purposes!
*
* Especially on x86 this can cause a premature completion of an interrupt
* affinity change causing the interrupt line to become stale. Very
* unlikely, but possible.
*
* The injection can fail for various reasons:
* - Interrupt is not activated
* - Interrupt is NMI type or currently replaying
* - Interrupt is level type
* - Interrupt does not support hardware retrigger and software resend is
* either not enabled or not possible for the interrupt.
*/
int irq_inject_interrupt(unsigned int irq)
{
struct irq_desc *desc;
unsigned long flags;
int err;
/* Try the state injection hardware interface first */
if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true))
return 0;
/* That failed, try via the resend mechanism */
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return -EINVAL;
/*
* Only try to inject when the interrupt is:
* - not NMI type
* - activated
*/
if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data))
err = -EINVAL;
else
err = check_irq_resend(desc, true);
irq_put_desc_busunlock(desc, flags);
return err;
}
EXPORT_SYMBOL_GPL(irq_inject_interrupt);
#endif
| linux-master | kernel/irq/resend.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains the interrupt probing code and driver APIs.
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/async.h>
#include "internals.h"
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
* "IRQS_WAITING" cleared and the interrupt disabled.
*/
static DEFINE_MUTEX(probing_active);
/**
* probe_irq_on - begin an interrupt autodetect
*
* Commence probing for an interrupt. The interrupts are scanned
* and a mask of potential interrupt lines is returned.
*
*/
unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
unsigned long mask = 0;
int i;
/*
* quiesce the kernel, or at least the asynchronous portion
*/
async_synchronize_full();
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
/*
* Some chips need to know about probing in
* progress:
*/
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
irq_activate_and_startup(desc, IRQ_NORESEND);
}
raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
msleep(20);
/*
* enable any unassigned irqs
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
if (irq_activate_and_startup(desc, IRQ_NORESEND))
desc->istate |= IRQS_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
}
/*
* Wait for spurious interrupts to trigger
*/
msleep(100);
/*
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
/* It triggered already - consider it spurious. */
if (!(desc->istate & IRQS_WAITING)) {
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown_and_deactivate(desc);
} else
if (i < 32)
mask |= 1 << i;
}
raw_spin_unlock_irq(&desc->lock);
}
return mask;
}
EXPORT_SYMBOL(probe_irq_on);
/**
* probe_irq_mask - scan a bitmap of interrupt lines
* @val: mask of interrupts to consider
*
* Scan the interrupt lines and return a bitmap of active
* autodetect interrupts. The interrupt probe logic state
* is then returned to its previous value.
*
* Note: we need to scan all the irq's even though we will
* only return autodetect irq numbers - just so that we reset
* them all to a known state.
*/
unsigned int probe_irq_mask(unsigned long val)
{
unsigned int mask = 0;
struct irq_desc *desc;
int i;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
if (i < 16 && !(desc->istate & IRQS_WAITING))
mask |= 1 << i;
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown_and_deactivate(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
return mask & val;
}
EXPORT_SYMBOL(probe_irq_mask);
/**
* probe_irq_off - end an interrupt autodetect
* @val: mask of potential interrupts (unused)
*
* Scans the unused interrupt lines and returns the line which
* appears to have triggered the interrupt. If no interrupt was
* found then zero is returned. If more than one interrupt is
* found then minus the first candidate is returned to indicate
* their is doubt.
*
* The interrupt probe logic state is returned to its previous
* value.
*
* BUGS: When used in a module (which arguably shouldn't happen)
* nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal.
*/
int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_of_irqs = 0;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
if (!(desc->istate & IRQS_WAITING)) {
if (!nr_of_irqs)
irq_found = i;
nr_of_irqs++;
}
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown_and_deactivate(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
if (nr_of_irqs > 1)
irq_found = -irq_found;
return irq_found;
}
EXPORT_SYMBOL(probe_irq_off);
| linux-master | kernel/irq/autoprobe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the dummy interrupt chip implementation
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/export.h>
#include "internals.h"
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themselves.
*/
static void ack_bad(struct irq_data *data)
{
struct irq_desc *desc = irq_data_to_desc(data);
print_irq_desc(data->irq, desc);
ack_bad_irq(data->irq);
}
/*
* NOP functions
*/
static void noop(struct irq_data *data) { }
static unsigned int noop_ret(struct irq_data *data)
{
return 0;
}
/*
* Generic no controller implementation
*/
struct irq_chip no_irq_chip = {
.name = "none",
.irq_startup = noop_ret,
.irq_shutdown = noop,
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = ack_bad,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
/*
* Generic dummy implementation which can be used for
* real dumb interrupt sources
*/
struct irq_chip dummy_irq_chip = {
.name = "dummy",
.irq_startup = noop_ret,
.irq_shutdown = noop,
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
EXPORT_SYMBOL_GPL(dummy_irq_chip);
| linux-master | kernel/irq/dummychip.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains the /proc/irq/ handling code.
*/
#include <linux/irq.h>
#include <linux/gfp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/mutex.h>
#include "internals.h"
/*
* Access rules:
*
* procfs protects read/write of /proc/irq/N/ files against a
* concurrent free of the interrupt descriptor. remove_proc_entry()
* immediately prevents new read/writes to happen and waits for
* already running read/write functions to complete.
*
* We remove the proc entries first and then delete the interrupt
* descriptor from the radix tree and free it. So it is guaranteed
* that irq_to_desc(N) is valid as long as the read/writes are
* permitted by procfs.
*
* The read from /proc/interrupts is a different problem because there
* is no protection. So the lookup and the access to irqdesc
* information must be protected by sparse_irq_lock.
*/
static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
enum {
AFFINITY,
AFFINITY_LIST,
EFFECTIVE,
EFFECTIVE_LIST,
};
static int show_irq_affinity(int type, struct seq_file *m)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask;
switch (type) {
case AFFINITY:
case AFFINITY_LIST:
mask = desc->irq_common_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
break;
case EFFECTIVE:
case EFFECTIVE_LIST:
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
break;
#endif
default:
return -EINVAL;
}
switch (type) {
case AFFINITY_LIST:
case EFFECTIVE_LIST:
seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
break;
case AFFINITY:
case EFFECTIVE:
seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
break;
}
return 0;
}
static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
unsigned long flags;
cpumask_var_t mask;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->affinity_hint)
cpumask_copy(mask, desc->affinity_hint);
raw_spin_unlock_irqrestore(&desc->lock, flags);
seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
return 0;
}
int no_irq_affinity;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(AFFINITY, m);
}
static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(AFFINITY_LIST, m);
}
#ifndef CONFIG_AUTO_IRQ_AFFINITY
static inline int irq_select_affinity_usr(unsigned int irq)
{
/*
* If the interrupt is started up already then this fails. The
* interrupt is assigned to an online CPU already. There is no
* point to move it around randomly. Tell user space that the
* selected mask is bogus.
*
* If not then any change to the affinity is pointless because the
* startup code invokes irq_setup_affinity() which will select
* a online CPU anyway.
*/
return -EINVAL;
}
#else
/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
static inline int irq_select_affinity_usr(unsigned int irq)
{
return irq_select_affinity(irq);
}
#endif
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
unsigned int irq = (int)(long)pde_data(file_inode(file));
cpumask_var_t new_value;
int err;
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
return -EIO;
if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
if (type)
err = cpumask_parselist_user(buffer, count, new_value);
else
err = cpumask_parse_user(buffer, count, new_value);
if (err)
goto free_cpumask;
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
if (!cpumask_intersects(new_value, cpu_online_mask)) {
/*
* Special case for empty set - allow the architecture code
* to set default SMP affinity.
*/
err = irq_select_affinity_usr(irq) ? -EINVAL : count;
} else {
err = irq_set_affinity(irq, new_value);
if (!err)
err = count;
}
free_cpumask:
free_cpumask_var(new_value);
return err;
}
static ssize_t irq_affinity_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
return write_irq_affinity(0, file, buffer, count, pos);
}
static ssize_t irq_affinity_list_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
return write_irq_affinity(1, file, buffer, count, pos);
}
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_affinity_proc_show, pde_data(inode));
}
static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
}
static const struct proc_ops irq_affinity_proc_ops = {
.proc_open = irq_affinity_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = irq_affinity_proc_write,
};
static const struct proc_ops irq_affinity_list_proc_ops = {
.proc_open = irq_affinity_list_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = irq_affinity_list_proc_write,
};
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(EFFECTIVE, m);
}
static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(EFFECTIVE_LIST, m);
}
#endif
static int default_affinity_show(struct seq_file *m, void *v)
{
seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
return 0;
}
static ssize_t default_affinity_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
cpumask_var_t new_value;
int err;
if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(buffer, count, new_value);
if (err)
goto out;
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
if (!cpumask_intersects(new_value, cpu_online_mask)) {
err = -EINVAL;
goto out;
}
cpumask_copy(irq_default_affinity, new_value);
err = count;
out:
free_cpumask_var(new_value);
return err;
}
static int default_affinity_open(struct inode *inode, struct file *file)
{
return single_open(file, default_affinity_show, pde_data(inode));
}
static const struct proc_ops default_affinity_proc_ops = {
.proc_open = default_affinity_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = default_affinity_write,
};
static int irq_node_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "%d\n", irq_desc_get_node(desc));
return 0;
}
#endif
static int irq_spurious_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
desc->irq_count, desc->irqs_unhandled,
jiffies_to_msecs(desc->last_unhandled));
return 0;
}
#define MAX_NAMELEN 128
static int name_unique(unsigned int irq, struct irqaction *new_action)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
int ret = 1;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_action_of_desc(desc, action) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
ret = 0;
break;
}
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
void register_handler_proc(unsigned int irq, struct irqaction *action)
{
char name [MAX_NAMELEN];
struct irq_desc *desc = irq_to_desc(irq);
if (!desc->dir || action->dir || !action->name ||
!name_unique(irq, action))
return;
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
action->dir = proc_mkdir(name, desc->dir);
}
#undef MAX_NAMELEN
#define MAX_NAMELEN 10
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
{
static DEFINE_MUTEX(register_lock);
void __maybe_unused *irqp = (void *)(unsigned long) irq;
char name [MAX_NAMELEN];
if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
return;
/*
* irq directories are registered only when a handler is
* added, not when the descriptor is created, so multiple
* tasks might try to register at the same time.
*/
mutex_lock(®ister_lock);
if (desc->dir)
goto out_unlock;
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
desc->dir = proc_mkdir(name, root_irq_dir);
if (!desc->dir)
goto out_unlock;
#ifdef CONFIG_SMP
/* create /proc/irq/<irq>/smp_affinity */
proc_create_data("smp_affinity", 0644, desc->dir,
&irq_affinity_proc_ops, irqp);
/* create /proc/irq/<irq>/affinity_hint */
proc_create_single_data("affinity_hint", 0444, desc->dir,
irq_affinity_hint_proc_show, irqp);
/* create /proc/irq/<irq>/smp_affinity_list */
proc_create_data("smp_affinity_list", 0644, desc->dir,
&irq_affinity_list_proc_ops, irqp);
proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
irqp);
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
proc_create_single_data("effective_affinity", 0444, desc->dir,
irq_effective_aff_proc_show, irqp);
proc_create_single_data("effective_affinity_list", 0444, desc->dir,
irq_effective_aff_list_proc_show, irqp);
# endif
#endif
proc_create_single_data("spurious", 0444, desc->dir,
irq_spurious_proc_show, (void *)(long)irq);
out_unlock:
mutex_unlock(®ister_lock);
}
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
{
char name [MAX_NAMELEN];
if (!root_irq_dir || !desc->dir)
return;
#ifdef CONFIG_SMP
remove_proc_entry("smp_affinity", desc->dir);
remove_proc_entry("affinity_hint", desc->dir);
remove_proc_entry("smp_affinity_list", desc->dir);
remove_proc_entry("node", desc->dir);
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
remove_proc_entry("effective_affinity", desc->dir);
remove_proc_entry("effective_affinity_list", desc->dir);
# endif
#endif
remove_proc_entry("spurious", desc->dir);
sprintf(name, "%u", irq);
remove_proc_entry(name, root_irq_dir);
}
#undef MAX_NAMELEN
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
{
proc_remove(action->dir);
}
static void register_default_affinity_proc(void)
{
#ifdef CONFIG_SMP
proc_create("irq/default_smp_affinity", 0644, NULL,
&default_affinity_proc_ops);
#endif
}
void init_irq_proc(void)
{
unsigned int irq;
struct irq_desc *desc;
/* create /proc/irq */
root_irq_dir = proc_mkdir("irq", NULL);
if (!root_irq_dir)
return;
register_default_affinity_proc();
/*
* Create entries for all existing IRQs.
*/
for_each_irq_desc(irq, desc)
register_irq_proc(irq, desc);
}
#ifdef CONFIG_GENERIC_IRQ_SHOW
int __weak arch_show_interrupts(struct seq_file *p, int prec)
{
return 0;
}
#ifndef ACTUAL_NR_IRQS
# define ACTUAL_NR_IRQS nr_irqs
#endif
int show_interrupts(struct seq_file *p, void *v)
{
static int prec;
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
if (i > ACTUAL_NR_IRQS)
return 0;
if (i == ACTUAL_NR_IRQS)
return arch_show_interrupts(p, prec);
/* print header and calculate the width of the first column */
if (i == 0) {
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
j *= 10;
seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
}
rcu_read_lock();
desc = irq_to_desc(i);
if (!desc || irq_settings_is_hidden(desc))
goto outsparse;
if (desc->kstat_irqs) {
for_each_online_cpu(j)
any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
}
if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
goto outsparse;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", desc->kstat_irqs ?
*per_cpu_ptr(desc->kstat_irqs, j) : 0);
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
else if (desc->irq_data.chip->name)
seq_printf(p, " %8s", desc->irq_data.chip->name);
else
seq_printf(p, " %8s", "-");
} else {
seq_printf(p, " %8s", "None");
}
if (desc->irq_data.domain)
seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
else
seq_printf(p, " %*s", prec, "");
#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
#endif
if (desc->name)
seq_printf(p, "-%-8s", desc->name);
action = desc->action;
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
raw_spin_unlock_irqrestore(&desc->lock, flags);
outsparse:
rcu_read_unlock();
return 0;
}
#endif
| linux-master | kernel/irq/proc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/irq.h>
#include "internals.h"
/*
* Device resource management aware IRQ request/free implementation.
*/
struct irq_devres {
unsigned int irq;
void *dev_id;
};
static void devm_irq_release(struct device *dev, void *res)
{
struct irq_devres *this = res;
free_irq(this->irq, this->dev_id);
}
static int devm_irq_match(struct device *dev, void *res, void *data)
{
struct irq_devres *this = res, *match = data;
return this->irq == match->irq && this->dev_id == match->dev_id;
}
/**
* devm_request_threaded_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @thread_fn: function to be called in a threaded interrupt context. NULL
* for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as
* request_threaded_irq(). IRQs requested with this function will be
* automatically freed on driver detach.
*
* If an IRQ allocated with this function needs to be freed
* separately, devm_free_irq() must be used.
*/
int devm_request_threaded_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, irq_handler_t thread_fn,
unsigned long irqflags, const char *devname,
void *dev_id)
{
struct irq_devres *dr;
int rc;
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (!devname)
devname = dev_name(dev);
rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
dev_id);
if (rc) {
devres_free(dr);
return rc;
}
dr->irq = irq;
dr->dev_id = dev_id;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL(devm_request_threaded_irq);
/**
* devm_request_any_context_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as
* request_any_context_irq(). IRQs requested with this function will be
* automatically freed on driver detach.
*
* If an IRQ allocated with this function needs to be freed
* separately, devm_free_irq() must be used.
*/
int devm_request_any_context_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id)
{
struct irq_devres *dr;
int rc;
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (!devname)
devname = dev_name(dev);
rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
if (rc < 0) {
devres_free(dr);
return rc;
}
dr->irq = irq;
dr->dev_id = dev_id;
devres_add(dev, dr);
return rc;
}
EXPORT_SYMBOL(devm_request_any_context_irq);
/**
* devm_free_irq - free an interrupt
* @dev: device to free interrupt for
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as free_irq().
* This function instead of free_irq() should be used to manually
* free IRQs allocated with devm_request_irq().
*/
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
{
struct irq_devres match_data = { irq, dev_id };
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
&match_data));
free_irq(irq, dev_id);
}
EXPORT_SYMBOL(devm_free_irq);
struct irq_desc_devres {
unsigned int from;
unsigned int cnt;
};
static void devm_irq_desc_release(struct device *dev, void *res)
{
struct irq_desc_devres *this = res;
irq_free_descs(this->from, this->cnt);
}
/**
* __devm_irq_alloc_descs - Allocate and initialize a range of irq descriptors
* for a managed device
* @dev: Device to allocate the descriptors for
* @irq: Allocate for specific irq number if irq >= 0
* @from: Start the search from this irq number
* @cnt: Number of consecutive irqs to allocate
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
* @affinity: Optional pointer to an irq_affinity_desc array of size @cnt
* which hints where the irq descriptors should be allocated
* and which default affinities to use
*
* Returns the first irq number or error code.
*
* Note: Use the provided wrappers (devm_irq_alloc_desc*) for simplicity.
*/
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
const struct irq_affinity_desc *affinity)
{
struct irq_desc_devres *dr;
int base;
dr = devres_alloc(devm_irq_desc_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity);
if (base < 0) {
devres_free(dr);
return base;
}
dr->from = base;
dr->cnt = cnt;
devres_add(dev, dr);
return base;
}
EXPORT_SYMBOL_GPL(__devm_irq_alloc_descs);
#ifdef CONFIG_GENERIC_IRQ_CHIP
/**
* devm_irq_alloc_generic_chip - Allocate and initialize a generic chip
* for a managed device
* @dev: Device to allocate the generic chip for
* @name: Name of the irq chip
* @num_ct: Number of irq_chip_type instances associated with this
* @irq_base: Interrupt base nr for this chip
* @reg_base: Register base address (virtual)
* @handler: Default flow handler associated with this chip
*
* Returns an initialized irq_chip_generic structure. The chip defaults
* to the primary (index 0) irq_chip_type and @handler
*/
struct irq_chip_generic *
devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
unsigned int irq_base, void __iomem *reg_base,
irq_flow_handler_t handler)
{
struct irq_chip_generic *gc;
gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL);
if (gc)
irq_init_generic_chip(gc, name, num_ct,
irq_base, reg_base, handler);
return gc;
}
EXPORT_SYMBOL_GPL(devm_irq_alloc_generic_chip);
struct irq_generic_chip_devres {
struct irq_chip_generic *gc;
u32 msk;
unsigned int clr;
unsigned int set;
};
static void devm_irq_remove_generic_chip(struct device *dev, void *res)
{
struct irq_generic_chip_devres *this = res;
irq_remove_generic_chip(this->gc, this->msk, this->clr, this->set);
}
/**
* devm_irq_setup_generic_chip - Setup a range of interrupts with a generic
* chip for a managed device
*
* @dev: Device to setup the generic chip for
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @flags: Flags for initialization
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Set up max. 32 interrupts starting from gc->irq_base. Note, this
* initializes all interrupts to the primary irq_chip_type and its
* associated handler.
*/
int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
u32 msk, enum irq_gc_flags flags,
unsigned int clr, unsigned int set)
{
struct irq_generic_chip_devres *dr;
dr = devres_alloc(devm_irq_remove_generic_chip,
sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
irq_setup_generic_chip(gc, msk, flags, clr, set);
dr->gc = gc;
dr->msk = msk;
dr->clr = clr;
dr->set = set;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_irq_setup_generic_chip);
#endif /* CONFIG_GENERIC_IRQ_CHIP */
| linux-master | kernel/irq/devres.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Library implementing the most common irq chip callback functions
*
* Copyright (C) 2011, Thomas Gleixner
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/syscore_ops.h>
#include "internals.h"
static LIST_HEAD(gc_list);
static DEFINE_RAW_SPINLOCK(gc_lock);
/**
* irq_gc_noop - NOOP function
* @d: irq_data
*/
void irq_gc_noop(struct irq_data *d)
{
}
EXPORT_SYMBOL_GPL(irq_gc_noop);
/**
* irq_gc_mask_disable_reg - Mask chip via disable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_mask_disable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
/**
* irq_gc_mask_set_bit - Mask chip via setting bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
/**
* irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
/**
* irq_gc_unmask_enable_reg - Unmask chip via enable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_unmask_enable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.enable);
*ct->mask_cache |= mask;
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
/**
* irq_gc_ack_set_bit - Ack pending interrupt via setting bit
* @d: irq_data
*/
void irq_gc_ack_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
/**
* irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
* @d: irq_data
*/
void irq_gc_ack_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = ~d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
* @d: irq_data
*
* This generic implementation of the irq_mask_ack method is for chips
* with separate enable/disable registers instead of a single mask
* register and where a pending interrupt is acknowledged by setting a
* bit.
*
* Note: This is the only permutation currently used. Similar generic
* functions should be added here if other permutations are required.
*/
void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_eoi - EOI interrupt
* @d: irq_data
*/
void irq_gc_eoi(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.eoi);
irq_gc_unlock(gc);
}
/**
* irq_gc_set_wake - Set/clr wake bit for an interrupt
* @d: irq_data
* @on: Indicates whether the wake bit should be set or cleared
*
* For chips where the wake from suspend functionality is not
* configured in a separate register and the wakeup active state is
* just stored in a bitmask.
*/
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = d->mask;
if (!(mask & gc->wake_enabled))
return -EINVAL;
irq_gc_lock(gc);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
irq_gc_unlock(gc);
return 0;
}
EXPORT_SYMBOL_GPL(irq_gc_set_wake);
static u32 irq_readl_be(void __iomem *addr)
{
return ioread32be(addr);
}
static void irq_writel_be(u32 val, void __iomem *addr)
{
iowrite32be(val, addr);
}
void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
int num_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler)
{
raw_spin_lock_init(&gc->lock);
gc->num_ct = num_ct;
gc->irq_base = irq_base;
gc->reg_base = reg_base;
gc->chip_types->chip.name = name;
gc->chip_types->handler = handler;
}
/**
* irq_alloc_generic_chip - Allocate a generic chip and initialize it
* @name: Name of the irq chip
* @num_ct: Number of irq_chip_type instances associated with this
* @irq_base: Interrupt base nr for this chip
* @reg_base: Register base address (virtual)
* @handler: Default flow handler associated with this chip
*
* Returns an initialized irq_chip_generic structure. The chip defaults
* to the primary (index 0) irq_chip_type and @handler
*/
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler)
{
struct irq_chip_generic *gc;
gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
if (gc) {
irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
handler);
}
return gc;
}
EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
static void
irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
{
struct irq_chip_type *ct = gc->chip_types;
u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
int i;
for (i = 0; i < gc->num_ct; i++) {
if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
mskptr = &ct[i].mask_cache_priv;
mskreg = ct[i].regs.mask;
}
ct[i].mask_cache = mskptr;
if (flags & IRQ_GC_INIT_MASK_CACHE)
*mskptr = irq_reg_readl(gc, mskreg);
}
}
/**
* __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with this
* @name: Name of the irq chip
* @handler: Default flow handler associated with these chips
* @clr: IRQ_* bits to clear in the mapping function
* @set: IRQ_* bits to set in the mapping function
* @gcflags: Generic chip specific setup flags
*/
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
unsigned int clr, unsigned int set,
enum irq_gc_flags gcflags)
{
struct irq_domain_chip_generic *dgc;
struct irq_chip_generic *gc;
unsigned long flags;
int numchips, i;
size_t dgc_sz;
size_t gc_sz;
size_t sz;
void *tmp;
if (d->gc)
return -EBUSY;
numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
if (!numchips)
return -EINVAL;
/* Allocate a pointer, generic chip and chiptypes for each chip */
gc_sz = struct_size(gc, chip_types, num_ct);
dgc_sz = struct_size(dgc, gc, numchips);
sz = dgc_sz + numchips * gc_sz;
tmp = dgc = kzalloc(sz, GFP_KERNEL);
if (!dgc)
return -ENOMEM;
dgc->irqs_per_chip = irqs_per_chip;
dgc->num_chips = numchips;
dgc->irq_flags_to_set = set;
dgc->irq_flags_to_clear = clr;
dgc->gc_flags = gcflags;
d->gc = dgc;
/* Calc pointer to the first generic chip */
tmp += dgc_sz;
for (i = 0; i < numchips; i++) {
/* Store the pointer to the generic chip */
dgc->gc[i] = gc = tmp;
irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
NULL, handler);
gc->domain = d;
if (gcflags & IRQ_GC_BE_IO) {
gc->reg_readl = &irq_readl_be;
gc->reg_writel = &irq_writel_be;
}
raw_spin_lock_irqsave(&gc_lock, flags);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock_irqrestore(&gc_lock, flags);
/* Calc pointer to the next generic chip */
tmp += gc_sz;
}
return 0;
}
EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
static struct irq_chip_generic *
__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
{
struct irq_domain_chip_generic *dgc = d->gc;
int idx;
if (!dgc)
return ERR_PTR(-ENODEV);
idx = hw_irq / dgc->irqs_per_chip;
if (idx >= dgc->num_chips)
return ERR_PTR(-EINVAL);
return dgc->gc[idx];
}
/**
* irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
* @d: irq domain pointer
* @hw_irq: Hardware interrupt number
*/
struct irq_chip_generic *
irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
{
struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
return !IS_ERR(gc) ? gc : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
/*
* Separate lockdep classes for interrupt chip which can nest irq_desc
* lock and request mutex.
*/
static struct lock_class_key irq_nested_lock_class;
static struct lock_class_key irq_nested_request_class;
/*
* irq_map_generic_chip - Map a generic chip for an irq domain
*/
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq)
{
struct irq_data *data = irq_domain_get_irq_data(d, virq);
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
struct irq_chip *chip;
unsigned long flags;
int idx;
gc = __irq_get_domain_generic_chip(d, hw_irq);
if (IS_ERR(gc))
return PTR_ERR(gc);
idx = hw_irq % dgc->irqs_per_chip;
if (test_bit(idx, &gc->unused))
return -ENOTSUPP;
if (test_bit(idx, &gc->installed))
return -EBUSY;
ct = gc->chip_types;
chip = &ct->chip;
/* We only init the cache for the first mapping of a generic chip */
if (!gc->installed) {
raw_spin_lock_irqsave(&gc->lock, flags);
irq_gc_init_mask_cache(gc, dgc->gc_flags);
raw_spin_unlock_irqrestore(&gc->lock, flags);
}
/* Mark the interrupt as installed */
set_bit(idx, &gc->installed);
if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(virq, &irq_nested_lock_class,
&irq_nested_request_class);
if (chip->irq_calc_mask)
chip->irq_calc_mask(data);
else
data->mask = 1 << idx;
irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
return 0;
}
void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
{
struct irq_data *data = irq_domain_get_irq_data(d, virq);
struct irq_domain_chip_generic *dgc = d->gc;
unsigned int hw_irq = data->hwirq;
struct irq_chip_generic *gc;
int irq_idx;
gc = irq_get_domain_generic_chip(d, hw_irq);
if (!gc)
return;
irq_idx = hw_irq % dgc->irqs_per_chip;
clear_bit(irq_idx, &gc->installed);
irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
NULL);
}
const struct irq_domain_ops irq_generic_chip_ops = {
.map = irq_map_generic_chip,
.unmap = irq_unmap_generic_chip,
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
/**
* irq_setup_generic_chip - Setup a range of interrupts with a generic chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @flags: Flags for initialization
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Set up max. 32 interrupts starting from gc->irq_base. Note, this
* initializes all interrupts to the primary irq_chip_type and its
* associated handler.
*/
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
enum irq_gc_flags flags, unsigned int clr,
unsigned int set)
{
struct irq_chip_type *ct = gc->chip_types;
struct irq_chip *chip = &ct->chip;
unsigned int i;
raw_spin_lock(&gc_lock);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock(&gc_lock);
irq_gc_init_mask_cache(gc, flags);
for (i = gc->irq_base; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
continue;
if (flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(i, &irq_nested_lock_class,
&irq_nested_request_class);
if (!(flags & IRQ_GC_NO_MASK)) {
struct irq_data *d = irq_get_irq_data(i);
if (chip->irq_calc_mask)
chip->irq_calc_mask(d);
else
d->mask = 1 << (i - gc->irq_base);
}
irq_set_chip_and_handler(i, chip, ct->handler);
irq_set_chip_data(i, gc);
irq_modify_status(i, clr, set);
}
gc->irq_cnt = i - gc->irq_base;
}
EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
/**
* irq_setup_alt_chip - Switch to alternative chip
* @d: irq_data for this interrupt
* @type: Flow type to be initialized
*
* Only to be called from chip->irq_set_type() callbacks.
*/
int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = gc->chip_types;
unsigned int i;
for (i = 0; i < gc->num_ct; i++, ct++) {
if (ct->type & type) {
d->chip = &ct->chip;
irq_data_to_desc(d)->handle_irq = ct->handler;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
/**
* irq_remove_generic_chip - Remove a chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Remove up to 32 interrupts starting from gc->irq_base.
*/
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set)
{
unsigned int i = gc->irq_base;
raw_spin_lock(&gc_lock);
list_del(&gc->list);
raw_spin_unlock(&gc_lock);
for (; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
continue;
/* Remove handler first. That will mask the irq line */
irq_set_handler(i, NULL);
irq_set_chip(i, &no_irq_chip);
irq_set_chip_data(i, NULL);
irq_modify_status(i, clr, set);
}
}
EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
{
unsigned int virq;
if (!gc->domain)
return irq_get_irq_data(gc->irq_base);
/*
* We don't know which of the irqs has been actually
* installed. Use the first one.
*/
if (!gc->installed)
return NULL;
virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
return virq ? irq_get_irq_data(virq) : NULL;
}
#ifdef CONFIG_PM
static int irq_gc_suspend(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_suspend) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_suspend(data);
}
if (gc->suspend)
gc->suspend(gc);
}
return 0;
}
static void irq_gc_resume(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (gc->resume)
gc->resume(gc);
if (ct->chip.irq_resume) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_resume(data);
}
}
}
#else
#define irq_gc_suspend NULL
#define irq_gc_resume NULL
#endif
static void irq_gc_shutdown(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_pm_shutdown) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_pm_shutdown(data);
}
}
}
static struct syscore_ops irq_gc_syscore_ops = {
.suspend = irq_gc_suspend,
.resume = irq_gc_resume,
.shutdown = irq_gc_shutdown,
};
static int __init irq_gc_init_ops(void)
{
register_syscore_ops(&irq_gc_syscore_ops);
return 0;
}
device_initcall(irq_gc_init_ops);
| linux-master | kernel/irq/generic-chip.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Imagination Technologies Ltd
* Author: Qais Yousef <[email protected]>
*
* This file contains driver APIs to the IPI subsystem.
*/
#define pr_fmt(fmt) "genirq/ipi: " fmt
#include <linux/irqdomain.h>
#include <linux/irq.h>
/**
* irq_reserve_ipi() - Setup an IPI to destination cpumask
* @domain: IPI domain
* @dest: cpumask of CPUs which can receive the IPI
*
* Allocate a virq that can be used to send IPI to any CPU in dest mask.
*
* Return: Linux IRQ number on success or error code on failure
*/
int irq_reserve_ipi(struct irq_domain *domain,
const struct cpumask *dest)
{
unsigned int nr_irqs, offset;
struct irq_data *data;
int virq, i;
if (!domain ||!irq_domain_is_ipi(domain)) {
pr_warn("Reservation on a non IPI domain\n");
return -EINVAL;
}
if (!cpumask_subset(dest, cpu_possible_mask)) {
pr_warn("Reservation is not in possible_cpu_mask\n");
return -EINVAL;
}
nr_irqs = cpumask_weight(dest);
if (!nr_irqs) {
pr_warn("Reservation for empty destination mask\n");
return -EINVAL;
}
if (irq_domain_is_ipi_single(domain)) {
/*
* If the underlying implementation uses a single HW irq on
* all cpus then we only need a single Linux irq number for
* it. We have no restrictions vs. the destination mask. The
* underlying implementation can deal with holes nicely.
*/
nr_irqs = 1;
offset = 0;
} else {
unsigned int next;
/*
* The IPI requires a separate HW irq on each CPU. We require
* that the destination mask is consecutive. If an
* implementation needs to support holes, it can reserve
* several IPI ranges.
*/
offset = cpumask_first(dest);
/*
* Find a hole and if found look for another set bit after the
* hole. For now we don't support this scenario.
*/
next = cpumask_next_zero(offset, dest);
if (next < nr_cpu_ids)
next = cpumask_next(next, dest);
if (next < nr_cpu_ids) {
pr_warn("Destination mask has holes\n");
return -EINVAL;
}
}
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
if (virq <= 0) {
pr_warn("Can't reserve IPI, failed to alloc descs\n");
return -ENOMEM;
}
virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
(void *) dest, true, NULL);
if (virq <= 0) {
pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
goto free_descs;
}
for (i = 0; i < nr_irqs; i++) {
data = irq_get_irq_data(virq + i);
cpumask_copy(data->common->affinity, dest);
data->common->ipi_offset = offset;
irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
}
return virq;
free_descs:
irq_free_descs(virq, nr_irqs);
return -EBUSY;
}
/**
* irq_destroy_ipi() - unreserve an IPI that was previously allocated
* @irq: Linux IRQ number to be destroyed
* @dest: cpumask of CPUs which should have the IPI removed
*
* The IPIs allocated with irq_reserve_ipi() are returned to the system
* destroying all virqs associated with them.
*
* Return: %0 on success or error code on failure.
*/
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
{
struct irq_data *data = irq_get_irq_data(irq);
const struct cpumask *ipimask;
struct irq_domain *domain;
unsigned int nr_irqs;
if (!irq || !data)
return -EINVAL;
domain = data->domain;
if (WARN_ON(domain == NULL))
return -EINVAL;
if (!irq_domain_is_ipi(domain)) {
pr_warn("Trying to destroy a non IPI domain!\n");
return -EINVAL;
}
ipimask = irq_data_get_affinity_mask(data);
if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
/*
* Must be destroying a subset of CPUs to which this IPI
* was set up to target
*/
return -EINVAL;
if (irq_domain_is_ipi_per_cpu(domain)) {
irq = irq + cpumask_first(dest) - data->common->ipi_offset;
nr_irqs = cpumask_weight(dest);
} else {
nr_irqs = 1;
}
irq_domain_free_irqs(irq, nr_irqs);
return 0;
}
/**
* ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
* @irq: Linux IRQ number
* @cpu: the target CPU
*
* When dealing with coprocessors IPI, we need to inform the coprocessor of
* the hwirq it needs to use to receive and send IPIs.
*
* Return: hwirq value on success or INVALID_HWIRQ on failure.
*/
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
{
struct irq_data *data = irq_get_irq_data(irq);
const struct cpumask *ipimask;
if (!data || cpu >= nr_cpu_ids)
return INVALID_HWIRQ;
ipimask = irq_data_get_affinity_mask(data);
if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
return INVALID_HWIRQ;
/*
* Get the real hardware irq number if the underlying implementation
* uses a separate irq per cpu. If the underlying implementation uses
* a single hardware irq for all cpus then the IPI send mechanism
* needs to take care of the cpu destinations.
*/
if (irq_domain_is_ipi_per_cpu(data->domain))
data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
}
EXPORT_SYMBOL_GPL(ipi_get_hwirq);
static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
const struct cpumask *dest, unsigned int cpu)
{
const struct cpumask *ipimask;
if (!chip || !data)
return -EINVAL;
if (!chip->ipi_send_single && !chip->ipi_send_mask)
return -EINVAL;
if (cpu >= nr_cpu_ids)
return -EINVAL;
ipimask = irq_data_get_affinity_mask(data);
if (!ipimask)
return -EINVAL;
if (dest) {
if (!cpumask_subset(dest, ipimask))
return -EINVAL;
} else {
if (!cpumask_test_cpu(cpu, ipimask))
return -EINVAL;
}
return 0;
}
/**
* __ipi_send_single - send an IPI to a target Linux SMP CPU
* @desc: pointer to irq_desc of the IRQ
* @cpu: destination CPU, must in the destination mask passed to
* irq_reserve_ipi()
*
* This function is for architecture or core code to speed up IPI sending. Not
* usable from driver code.
*
* Return: %0 on success or negative error number on failure.
*/
int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
#ifdef DEBUG
/*
* Minimise the overhead by omitting the checks for Linux SMP IPIs.
* Since the callers should be arch or core code which is generally
* trusted, only check for errors when debugging.
*/
if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
return -EINVAL;
#endif
if (!chip->ipi_send_single) {
chip->ipi_send_mask(data, cpumask_of(cpu));
return 0;
}
/* FIXME: Store this information in irqdata flags */
if (irq_domain_is_ipi_per_cpu(data->domain) &&
cpu != data->common->ipi_offset) {
/* use the correct data for that cpu */
unsigned irq = data->irq + cpu - data->common->ipi_offset;
data = irq_get_irq_data(irq);
}
chip->ipi_send_single(data, cpu);
return 0;
}
/**
* __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
* @desc: pointer to irq_desc of the IRQ
* @dest: dest CPU(s), must be a subset of the mask passed to
* irq_reserve_ipi()
*
* This function is for architecture or core code to speed up IPI sending. Not
* usable from driver code.
*
* Return: %0 on success or negative error number on failure.
*/
int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned int cpu;
#ifdef DEBUG
/*
* Minimise the overhead by omitting the checks for Linux SMP IPIs.
* Since the callers should be arch or core code which is generally
* trusted, only check for errors when debugging.
*/
if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
return -EINVAL;
#endif
if (chip->ipi_send_mask) {
chip->ipi_send_mask(data, dest);
return 0;
}
if (irq_domain_is_ipi_per_cpu(data->domain)) {
unsigned int base = data->irq;
for_each_cpu(cpu, dest) {
unsigned irq = base + cpu - data->common->ipi_offset;
data = irq_get_irq_data(irq);
chip->ipi_send_single(data, cpu);
}
} else {
for_each_cpu(cpu, dest)
chip->ipi_send_single(data, cpu);
}
return 0;
}
/**
* ipi_send_single - Send an IPI to a single CPU
* @virq: Linux IRQ number from irq_reserve_ipi()
* @cpu: destination CPU, must in the destination mask passed to
* irq_reserve_ipi()
*
* Return: %0 on success or negative error number on failure.
*/
int ipi_send_single(unsigned int virq, unsigned int cpu)
{
struct irq_desc *desc = irq_to_desc(virq);
struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
return -EINVAL;
return __ipi_send_single(desc, cpu);
}
EXPORT_SYMBOL_GPL(ipi_send_single);
/**
* ipi_send_mask - Send an IPI to target CPU(s)
* @virq: Linux IRQ number from irq_reserve_ipi()
* @dest: dest CPU(s), must be a subset of the mask passed to
* irq_reserve_ipi()
*
* Return: %0 on success or negative error number on failure.
*/
int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
{
struct irq_desc *desc = irq_to_desc(virq);
struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
return -EINVAL;
return __ipi_send_mask(desc, dest);
}
EXPORT_SYMBOL_GPL(ipi_send_mask);
| linux-master | kernel/irq/ipi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2017 Thomas Gleixner <[email protected]>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <linux/bitmap.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
unsigned int managed_allocated;
bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
unsigned long managed_map[IRQ_MATRIX_SIZE];
};
struct irq_matrix {
unsigned int matrix_bits;
unsigned int alloc_start;
unsigned int alloc_end;
unsigned int alloc_size;
unsigned int global_available;
unsigned int global_reserved;
unsigned int systembits_inalloc;
unsigned int total_allocated;
unsigned int online_maps;
struct cpumap __percpu *maps;
unsigned long scratch_map[IRQ_MATRIX_SIZE];
unsigned long system_map[IRQ_MATRIX_SIZE];
};
#define CREATE_TRACE_POINTS
#include <trace/events/irq_matrix.h>
/**
* irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
* @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
* @alloc_start: From which bit the allocation search starts
* @alloc_end: At which bit the allocation search ends, i.e first
* invalid bit
*/
__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
unsigned int alloc_start,
unsigned int alloc_end)
{
struct irq_matrix *m;
if (matrix_bits > IRQ_MATRIX_BITS)
return NULL;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return NULL;
m->matrix_bits = matrix_bits;
m->alloc_start = alloc_start;
m->alloc_end = alloc_end;
m->alloc_size = alloc_end - alloc_start;
m->maps = alloc_percpu(*m->maps);
if (!m->maps) {
kfree(m);
return NULL;
}
return m;
}
/**
* irq_matrix_online - Bring the local CPU matrix online
* @m: Matrix pointer
*/
void irq_matrix_online(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
BUG_ON(cm->online);
if (!cm->initialized) {
cm->available = m->alloc_size;
cm->available -= cm->managed + m->systembits_inalloc;
cm->initialized = true;
}
m->global_available += cm->available;
cm->online = true;
m->online_maps++;
trace_irq_matrix_online(m);
}
/**
* irq_matrix_offline - Bring the local CPU matrix offline
* @m: Matrix pointer
*/
void irq_matrix_offline(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
/* Update the global available size */
m->global_available -= cm->available;
cm->online = false;
m->online_maps--;
trace_irq_matrix_offline(m);
}
static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
unsigned int num, bool managed)
{
unsigned int area, start = m->alloc_start;
unsigned int end = m->alloc_end;
bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
if (area >= end)
return area;
if (managed)
bitmap_set(cm->managed_map, area, num);
else
bitmap_set(cm->alloc_map, area, num);
return area;
}
/* Find the best CPU which has the lowest vector allocation count */
static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, maxavl = 0;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl)
continue;
best_cpu = cpu;
maxavl = cm->available;
}
return best_cpu;
}
/* Find the best CPU which has the lowest number of managed IRQs allocated */
static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, allocated = UINT_MAX;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->managed_allocated > allocated)
continue;
best_cpu = cpu;
allocated = cm->managed_allocated;
}
return best_cpu;
}
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
* @bit: Which bit to reserve
* @replace: Replace an already allocated vector with a system
* vector at the same bit position.
*
* The BUG_ON()s below are on purpose. If this goes wrong in the
* early boot process, then the chance to survive is about zero.
* If this happens when the system is life, it's not much better.
*/
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
bool replace)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
BUG_ON(bit > m->matrix_bits);
BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
set_bit(bit, m->system_map);
if (replace) {
BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
cm->allocated--;
m->total_allocated--;
}
if (bit >= m->alloc_start && bit < m->alloc_end)
m->systembits_inalloc++;
trace_irq_matrix_assign_system(bit, m);
}
/**
* irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
* @m: Matrix pointer
* @msk: On which CPUs the bits should be reserved.
*
* Can be called for offline CPUs. Note, this will only reserve one bit
* on all CPUs in @msk, but it's not guaranteed that the bits are at the
* same offset on all CPUs
*/
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
{
unsigned int cpu, failed_cpu;
for_each_cpu(cpu, msk) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit;
bit = matrix_alloc_area(m, cm, 1, true);
if (bit >= m->alloc_end)
goto cleanup;
cm->managed++;
if (cm->online) {
cm->available--;
m->global_available--;
}
trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
}
return 0;
cleanup:
failed_cpu = cpu;
for_each_cpu(cpu, msk) {
if (cpu == failed_cpu)
break;
irq_matrix_remove_managed(m, cpumask_of(cpu));
}
return -ENOSPC;
}
/**
* irq_matrix_remove_managed - Remove managed interrupts in a CPU map
* @m: Matrix pointer
* @msk: On which CPUs the bits should be removed
*
* Can be called for offline CPUs
*
* This removes not allocated managed interrupts from the map. It does
* not matter which one because the managed interrupts free their
* allocation when they shut down. If not, the accounting is screwed,
* but all what can be done at this point is warn about it.
*/
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
{
unsigned int cpu;
for_each_cpu(cpu, msk) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
if (WARN_ON_ONCE(!cm->managed))
continue;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
if (WARN_ON_ONCE(bit >= end))
continue;
clear_bit(bit, cm->managed_map);
cm->managed--;
if (cm->online) {
cm->available++;
m->global_available++;
}
trace_irq_matrix_remove_managed(bit, cpu, m, cm);
}
}
/**
* irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
* @m: Matrix pointer
* @msk: Which CPUs to search in
* @mapped_cpu: Pointer to store the CPU for which the irq was allocated
*/
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu)
{
unsigned int bit, cpu, end;
struct cpumap *cm;
if (cpumask_empty(msk))
return -EINVAL;
cpu = matrix_find_best_cpu_managed(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
cm = per_cpu_ptr(m->maps, cpu);
end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
if (bit >= end)
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
cm->managed_allocated++;
m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
/**
* irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
* @m: Matrix pointer
* @bit: Which bit to mark
*
* This should only be used to mark preallocated vectors
*/
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
return;
cm->allocated++;
m->total_allocated++;
cm->available--;
m->global_available--;
trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
}
/**
* irq_matrix_reserve - Reserve interrupts
* @m: Matrix pointer
*
* This is merely a book keeping call. It increments the number of globally
* reserved interrupt bits w/o actually allocating them. This allows to
* setup interrupt descriptors w/o assigning low level resources to it.
* The actual allocation happens when the interrupt gets activated.
*/
void irq_matrix_reserve(struct irq_matrix *m)
{
if (m->global_reserved == m->global_available)
pr_warn("Interrupt reservation exceeds available resources\n");
m->global_reserved++;
trace_irq_matrix_reserve(m);
}
/**
* irq_matrix_remove_reserved - Remove interrupt reservation
* @m: Matrix pointer
*
* This is merely a book keeping call. It decrements the number of globally
* reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
* interrupt was never in use and a real vector allocated, which undid the
* reservation.
*/
void irq_matrix_remove_reserved(struct irq_matrix *m)
{
m->global_reserved--;
trace_irq_matrix_remove_reserved(m);
}
/**
* irq_matrix_alloc - Allocate a regular interrupt in a CPU map
* @m: Matrix pointer
* @msk: Which CPUs to search in
* @reserved: Allocate previously reserved interrupts
* @mapped_cpu: Pointer to store the CPU for which the irq was allocated
*/
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
unsigned int cpu, bit;
struct cpumap *cm;
/*
* Not required in theory, but matrix_find_best_cpu() uses
* for_each_cpu() which ignores the cpumask on UP .
*/
if (cpumask_empty(msk))
return -EINVAL;
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
cm = per_cpu_ptr(m->maps, cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit >= m->alloc_end)
return -ENOSPC;
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = cpu;
trace_irq_matrix_alloc(bit, cpu, m, cm);
return bit;
}
/**
* irq_matrix_free - Free allocated interrupt in the matrix
* @m: Matrix pointer
* @cpu: Which CPU map needs be updated
* @bit: The bit to remove
* @managed: If true, the interrupt is managed and not accounted
* as available.
*/
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
unsigned int bit, bool managed)
{
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
return;
cm->allocated--;
if(managed)
cm->managed_allocated--;
if (cm->online)
m->total_allocated--;
if (!managed) {
cm->available++;
if (cm->online)
m->global_available++;
}
trace_irq_matrix_free(bit, cpu, m, cm);
}
/**
* irq_matrix_available - Get the number of globally available irqs
* @m: Pointer to the matrix to query
* @cpudown: If true, the local CPU is about to go down, adjust
* the number of available irqs accordingly
*/
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
if (!cpudown)
return m->global_available;
return m->global_available - cm->available;
}
/**
* irq_matrix_reserved - Get the number of globally reserved irqs
* @m: Pointer to the matrix to query
*/
unsigned int irq_matrix_reserved(struct irq_matrix *m)
{
return m->global_reserved;
}
/**
* irq_matrix_allocated - Get the number of allocated irqs on the local cpu
* @m: Pointer to the matrix to search
*
* This returns number of allocated irqs
*/
unsigned int irq_matrix_allocated(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
return cm->allocated;
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
/**
* irq_matrix_debug_show - Show detailed allocation information
* @sf: Pointer to the seq_file to print to
* @m: Pointer to the matrix allocator
* @ind: Indentation for the print format
*
* Note, this is a lockless snapshot.
*/
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
{
unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
int cpu;
seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
seq_printf(sf, "Global available: %6u\n", m->global_available);
seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
cpu, cm->available, cm->managed,
cm->managed_allocated, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();
}
#endif
| linux-master | kernel/irq/matrix.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "internals.h"
/**
* irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
* @desc: Interrupt descriptor to clean up
* @force_clear: If set clear the move pending bit unconditionally.
* If not set, clear it only when the dying CPU is the
* last one in the pending mask.
*
* Returns true if the pending bit was set and the pending mask contains an
* online CPU other than the dying CPU.
*/
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
if (!irqd_is_setaffinity_pending(data))
return false;
/*
* The outgoing CPU might be the last online target in a pending
* interrupt move. If that's the case clear the pending move bit.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
irqd_clr_move_pending(data);
return false;
}
if (force_clear)
irqd_clr_move_pending(data);
return true;
}
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_data_to_desc(idata);
struct irq_data *data = &desc->irq_data;
struct irq_chip *chip = data->chip;
if (likely(!irqd_is_setaffinity_pending(data)))
return;
irqd_clr_move_pending(data);
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
if (irqd_is_per_cpu(data)) {
WARN_ON(1);
return;
}
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!chip->irq_set_affinity)
return;
assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
* when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
int ret;
ret = irq_do_set_affinity(data, desc->pending_mask, false);
/*
* If the there is a cleanup pending in the underlying
* vector management, reschedule the move for the next
* interrupt. Leave desc->pending_mask intact.
*/
if (ret == -EBUSY) {
irqd_set_move_pending(data);
return;
}
}
cpumask_clear(desc->pending_mask);
}
void __irq_move_irq(struct irq_data *idata)
{
bool masked;
/*
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
*/
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
if (unlikely(irqd_irq_disabled(idata)))
return;
/*
* Be careful vs. already masked interrupts. If this is a
* threaded interrupt with ONESHOT set, we can end up with an
* interrupt storm.
*/
masked = irqd_irq_masked(idata);
if (!masked)
idata->chip->irq_mask(idata);
irq_move_masked_irq(idata);
if (!masked)
idata->chip->irq_unmask(idata);
}
| linux-master | kernel/irq/migration.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Thomas Gleixner.
* Copyright (C) 2016-2017 Christoph Hellwig.
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/group_cpus.h>
static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
{
affd->nr_sets = 1;
affd->set_size[0] = affvecs;
}
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
*
* Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
{
unsigned int affvecs, curvec, usedvecs, i;
struct irq_affinity_desc *masks = NULL;
/*
* Determine the number of vectors which need interrupt affinities
* assigned. If the pre/post request exhausts the available vectors
* then nothing to do here except for invoking the calc_sets()
* callback so the device driver can adjust to the situation.
*/
if (nvecs > affd->pre_vectors + affd->post_vectors)
affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
else
affvecs = 0;
/*
* Simple invocations do not provide a calc_sets() callback. Install
* the generic one.
*/
if (!affd->calc_sets)
affd->calc_sets = default_calc_sets;
/* Recalculate the sets */
affd->calc_sets(affd, affvecs);
if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
return NULL;
/* Nothing to assign? */
if (!affvecs)
return NULL;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/*
* Spread on present CPUs starting from affd->pre_vectors. If we
* have multiple sets, build each sets affinity mask separately.
*/
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
int j;
struct cpumask *result = group_cpus_evenly(this_vecs);
if (!result) {
kfree(masks);
return NULL;
}
for (j = 0; j < this_vecs; j++)
cpumask_copy(&masks[curvec + j].mask, &result[j]);
kfree(result);
curvec += this_vecs;
usedvecs += this_vecs;
}
/* Fill out vectors at the end that don't need affinity */
if (usedvecs >= affvecs)
curvec = affd->pre_vectors + affvecs;
else
curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/* Mark the managed interrupts */
for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
masks[i].is_managed = 1;
return masks;
}
/**
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
* @minvec: The minimum number of vectors available
* @maxvec: The maximum number of vectors available
* @affd: Description of the affinity requirements
*/
unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd)
{
unsigned int resv = affd->pre_vectors + affd->post_vectors;
unsigned int set_vecs;
if (resv > minvec)
return 0;
if (affd->calc_sets) {
set_vecs = maxvec - resv;
} else {
cpus_read_lock();
set_vecs = cpumask_weight(cpu_possible_mask);
cpus_read_unlock();
}
return resv + min(set_vecs, maxvec - resv);
}
| linux-master | kernel/irq/affinity.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors: Waiman Long <[email protected]>
*/
/*
* Collect locking event counts
*/
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/fs.h>
#include "lock_events.h"
#undef LOCK_EVENT
#define LOCK_EVENT(name) [LOCKEVENT_ ## name] = #name,
#define LOCK_EVENTS_DIR "lock_event_counts"
/*
* When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different
* types of locks will be reported under the <debugfs>/lock_event_counts/
* directory. See lock_events_list.h for the list of available locking
* events.
*
* Writing to the special ".reset_counts" file will reset all the above
* locking event counts. This is a very slow operation and so should not
* be done frequently.
*
* These event counts are implemented as per-cpu variables which are
* summed and computed whenever the corresponding debugfs files are read. This
* minimizes added overhead making the counts usable even in a production
* environment.
*/
static const char * const lockevent_names[lockevent_num + 1] = {
#include "lock_events_list.h"
[LOCKEVENT_reset_cnts] = ".reset_counts",
};
/*
* Per-cpu counts
*/
DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
/*
* The lockevent_read() function can be overridden.
*/
ssize_t __weak lockevent_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[64];
int cpu, id, len;
u64 sum = 0;
/*
* Get the counter ID stored in file->f_inode->i_private
*/
id = (long)file_inode(file)->i_private;
if (id >= lockevent_num)
return -EBADF;
for_each_possible_cpu(cpu)
sum += per_cpu(lockevents[id], cpu);
len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
/*
* Function to handle write request
*
* When idx = reset_cnts, reset all the counts.
*/
static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
int cpu;
/*
* Get the counter ID stored in file->f_inode->i_private
*/
if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
return count;
for_each_possible_cpu(cpu) {
int i;
unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
for (i = 0 ; i < lockevent_num; i++)
WRITE_ONCE(ptr[i], 0);
}
return count;
}
/*
* Debugfs data structures
*/
static const struct file_operations fops_lockevent = {
.read = lockevent_read,
.write = lockevent_write,
.llseek = default_llseek,
};
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#include <asm/paravirt.h>
static bool __init skip_lockevent(const char *name)
{
static int pv_on __initdata = -1;
if (pv_on < 0)
pv_on = !pv_is_native_spin_unlock();
/*
* Skip PV qspinlock events on bare metal.
*/
if (!pv_on && !memcmp(name, "pv_", 3))
return true;
return false;
}
#else
static inline bool skip_lockevent(const char *name)
{
return false;
}
#endif
/*
* Initialize debugfs for the locking event counts.
*/
static int __init init_lockevent_counts(void)
{
struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL);
int i;
if (!d_counts)
goto out;
/*
* Create the debugfs files
*
* As reading from and writing to the stat files can be slow, only
* root is allowed to do the read/write to limit impact to system
* performance.
*/
for (i = 0; i < lockevent_num; i++) {
if (skip_lockevent(lockevent_names[i]))
continue;
if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
(void *)(long)i, &fops_lockevent))
goto fail_undo;
}
if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
d_counts, (void *)(long)LOCKEVENT_reset_cnts,
&fops_lockevent))
goto fail_undo;
return 0;
fail_undo:
debugfs_remove_recursive(d_counts);
out:
pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR);
return -ENOMEM;
}
fs_initcall(init_lockevent_counts);
| linux-master | kernel/locking/lock_events.c |
/*
* Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*
* This file contains the spinlock/rwlock implementations for
* DEBUG_SPINLOCK.
*/
#include <linux/spinlock.h>
#include <linux/nmi.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__raw_spin_lock_init);
#ifndef CONFIG_PREEMPT_RT
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
#endif
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__rwlock_init);
#endif
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = READ_ONCE(lock->owner);
if (owner == SPINLOCK_OWNER_INIT)
owner = NULL;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, READ_ONCE(lock->magic),
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
READ_ONCE(lock->owner_cpu));
dump_stack();
}
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
if (!debug_locks_off())
return;
spin_dump(lock, msg);
}
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
WRITE_ONCE(lock->owner, current);
}
static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
WRITE_ONCE(lock->owner_cpu, -1);
}
/*
* We are now relying on the NMI watchdog to detect lockup instead of doing
* the detection here with an unfair lock which can cause problem of its own.
*/
void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
debug_spin_lock_after(lock);
}
int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&lock->raw_lock);
if (ret) {
mmiowb_spin_lock();
debug_spin_lock_after(lock);
}
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_spin_unlock(raw_spinlock_t *lock)
{
mmiowb_spin_unlock();
debug_spin_unlock(lock);
arch_spin_unlock(&lock->raw_lock);
}
#ifndef CONFIG_PREEMPT_RT
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
return;
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
msg, raw_smp_processor_id(), current->comm,
task_pid_nr(current), lock);
dump_stack();
}
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_lock(&lock->raw_lock);
}
int do_raw_read_trylock(rwlock_t *lock)
{
int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_write_lock_after(rwlock_t *lock)
{
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
WRITE_ONCE(lock->owner, current);
}
static inline void debug_write_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
WRITE_ONCE(lock->owner_cpu, -1);
}
void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int do_raw_write_trylock(rwlock_t *lock)
{
int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
#endif /* !CONFIG_PREEMPT_RT */
| linux-master | kernel/locking/spinlock_debug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Module-based API test facility for ww_mutexes
*/
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ww_mutex.h>
static DEFINE_WD_CLASS(ww_class);
struct workqueue_struct *wq;
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
#define ww_acquire_init_noinject(a, b) do { \
ww_acquire_init((a), (b)); \
(a)->deadlock_inject_countdown = ~0U; \
} while (0)
#else
#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
#endif
struct test_mutex {
struct work_struct work;
struct ww_mutex mutex;
struct completion ready, go, done;
unsigned int flags;
};
#define TEST_MTX_SPIN BIT(0)
#define TEST_MTX_TRY BIT(1)
#define TEST_MTX_CTX BIT(2)
#define __TEST_MTX_LAST BIT(3)
static void test_mutex_work(struct work_struct *work)
{
struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
complete(&mtx->ready);
wait_for_completion(&mtx->go);
if (mtx->flags & TEST_MTX_TRY) {
while (!ww_mutex_trylock(&mtx->mutex, NULL))
cond_resched();
} else {
ww_mutex_lock(&mtx->mutex, NULL);
}
complete(&mtx->done);
ww_mutex_unlock(&mtx->mutex);
}
static int __test_mutex(unsigned int flags)
{
#define TIMEOUT (HZ / 16)
struct test_mutex mtx;
struct ww_acquire_ctx ctx;
int ret;
ww_mutex_init(&mtx.mutex, &ww_class);
ww_acquire_init(&ctx, &ww_class);
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
init_completion(&mtx.ready);
init_completion(&mtx.go);
init_completion(&mtx.done);
mtx.flags = flags;
schedule_work(&mtx.work);
wait_for_completion(&mtx.ready);
ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
complete(&mtx.go);
if (flags & TEST_MTX_SPIN) {
unsigned long timeout = jiffies + TIMEOUT;
ret = 0;
do {
if (completion_done(&mtx.done)) {
ret = -EINVAL;
break;
}
cond_resched();
} while (time_before(jiffies, timeout));
} else {
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
}
ww_mutex_unlock(&mtx.mutex);
ww_acquire_fini(&ctx);
if (ret) {
pr_err("%s(flags=%x): mutual exclusion failure\n",
__func__, flags);
ret = -EINVAL;
}
flush_work(&mtx.work);
destroy_work_on_stack(&mtx.work);
return ret;
#undef TIMEOUT
}
static int test_mutex(void)
{
int ret;
int i;
for (i = 0; i < __TEST_MTX_LAST; i++) {
ret = __test_mutex(i);
if (ret)
return ret;
}
return 0;
}
static int test_aa(bool trylock)
{
struct ww_mutex mutex;
struct ww_acquire_ctx ctx;
int ret;
const char *from = trylock ? "trylock" : "lock";
ww_mutex_init(&mutex, &ww_class);
ww_acquire_init(&ctx, &ww_class);
if (!trylock) {
ret = ww_mutex_lock(&mutex, &ctx);
if (ret) {
pr_err("%s: initial lock failed!\n", __func__);
goto out;
}
} else {
ret = !ww_mutex_trylock(&mutex, &ctx);
if (ret) {
pr_err("%s: initial trylock failed!\n", __func__);
goto out;
}
}
if (ww_mutex_trylock(&mutex, NULL)) {
pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
}
if (ww_mutex_trylock(&mutex, &ctx)) {
pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
}
ret = ww_mutex_lock(&mutex, &ctx);
if (ret != -EALREADY) {
pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
__func__, ret, from);
if (!ret)
ww_mutex_unlock(&mutex);
ret = -EINVAL;
goto out;
}
ww_mutex_unlock(&mutex);
ret = 0;
out:
ww_acquire_fini(&ctx);
return ret;
}
struct test_abba {
struct work_struct work;
struct ww_mutex a_mutex;
struct ww_mutex b_mutex;
struct completion a_ready;
struct completion b_ready;
bool resolve, trylock;
int result;
};
static void test_abba_work(struct work_struct *work)
{
struct test_abba *abba = container_of(work, typeof(*abba), work);
struct ww_acquire_ctx ctx;
int err;
ww_acquire_init_noinject(&ctx, &ww_class);
if (!abba->trylock)
ww_mutex_lock(&abba->b_mutex, &ctx);
else
WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
complete(&abba->b_ready);
wait_for_completion(&abba->a_ready);
err = ww_mutex_lock(&abba->a_mutex, &ctx);
if (abba->resolve && err == -EDEADLK) {
ww_mutex_unlock(&abba->b_mutex);
ww_mutex_lock_slow(&abba->a_mutex, &ctx);
err = ww_mutex_lock(&abba->b_mutex, &ctx);
}
if (!err)
ww_mutex_unlock(&abba->a_mutex);
ww_mutex_unlock(&abba->b_mutex);
ww_acquire_fini(&ctx);
abba->result = err;
}
static int test_abba(bool trylock, bool resolve)
{
struct test_abba abba;
struct ww_acquire_ctx ctx;
int err, ret;
ww_mutex_init(&abba.a_mutex, &ww_class);
ww_mutex_init(&abba.b_mutex, &ww_class);
INIT_WORK_ONSTACK(&abba.work, test_abba_work);
init_completion(&abba.a_ready);
init_completion(&abba.b_ready);
abba.trylock = trylock;
abba.resolve = resolve;
schedule_work(&abba.work);
ww_acquire_init_noinject(&ctx, &ww_class);
if (!trylock)
ww_mutex_lock(&abba.a_mutex, &ctx);
else
WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
complete(&abba.a_ready);
wait_for_completion(&abba.b_ready);
err = ww_mutex_lock(&abba.b_mutex, &ctx);
if (resolve && err == -EDEADLK) {
ww_mutex_unlock(&abba.a_mutex);
ww_mutex_lock_slow(&abba.b_mutex, &ctx);
err = ww_mutex_lock(&abba.a_mutex, &ctx);
}
if (!err)
ww_mutex_unlock(&abba.b_mutex);
ww_mutex_unlock(&abba.a_mutex);
ww_acquire_fini(&ctx);
flush_work(&abba.work);
destroy_work_on_stack(&abba.work);
ret = 0;
if (resolve) {
if (err || abba.result) {
pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
__func__, err, abba.result);
ret = -EINVAL;
}
} else {
if (err != -EDEADLK && abba.result != -EDEADLK) {
pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
__func__, err, abba.result);
ret = -EINVAL;
}
}
return ret;
}
struct test_cycle {
struct work_struct work;
struct ww_mutex a_mutex;
struct ww_mutex *b_mutex;
struct completion *a_signal;
struct completion b_signal;
int result;
};
static void test_cycle_work(struct work_struct *work)
{
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
struct ww_acquire_ctx ctx;
int err, erra = 0;
ww_acquire_init_noinject(&ctx, &ww_class);
ww_mutex_lock(&cycle->a_mutex, &ctx);
complete(cycle->a_signal);
wait_for_completion(&cycle->b_signal);
err = ww_mutex_lock(cycle->b_mutex, &ctx);
if (err == -EDEADLK) {
err = 0;
ww_mutex_unlock(&cycle->a_mutex);
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
}
if (!err)
ww_mutex_unlock(cycle->b_mutex);
if (!erra)
ww_mutex_unlock(&cycle->a_mutex);
ww_acquire_fini(&ctx);
cycle->result = err ?: erra;
}
static int __test_cycle(unsigned int nthreads)
{
struct test_cycle *cycles;
unsigned int n, last = nthreads - 1;
int ret;
cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
if (!cycles)
return -ENOMEM;
for (n = 0; n < nthreads; n++) {
struct test_cycle *cycle = &cycles[n];
ww_mutex_init(&cycle->a_mutex, &ww_class);
if (n == last)
cycle->b_mutex = &cycles[0].a_mutex;
else
cycle->b_mutex = &cycles[n + 1].a_mutex;
if (n == 0)
cycle->a_signal = &cycles[last].b_signal;
else
cycle->a_signal = &cycles[n - 1].b_signal;
init_completion(&cycle->b_signal);
INIT_WORK(&cycle->work, test_cycle_work);
cycle->result = 0;
}
for (n = 0; n < nthreads; n++)
queue_work(wq, &cycles[n].work);
flush_workqueue(wq);
ret = 0;
for (n = 0; n < nthreads; n++) {
struct test_cycle *cycle = &cycles[n];
if (!cycle->result)
continue;
pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
n, nthreads, cycle->result);
ret = -EINVAL;
break;
}
for (n = 0; n < nthreads; n++)
ww_mutex_destroy(&cycles[n].a_mutex);
kfree(cycles);
return ret;
}
static int test_cycle(unsigned int ncpus)
{
unsigned int n;
int ret;
for (n = 2; n <= ncpus + 1; n++) {
ret = __test_cycle(n);
if (ret)
return ret;
}
return 0;
}
struct stress {
struct work_struct work;
struct ww_mutex *locks;
unsigned long timeout;
int nlocks;
};
static int *get_random_order(int count)
{
int *order;
int n, r, tmp;
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
if (!order)
return order;
for (n = 0; n < count; n++)
order[n] = n;
for (n = count - 1; n > 1; n--) {
r = get_random_u32_below(n + 1);
if (r != n) {
tmp = order[n];
order[n] = order[r];
order[r] = tmp;
}
}
return order;
}
static void dummy_load(struct stress *stress)
{
usleep_range(1000, 2000);
}
static void stress_inorder_work(struct work_struct *work)
{
struct stress *stress = container_of(work, typeof(*stress), work);
const int nlocks = stress->nlocks;
struct ww_mutex *locks = stress->locks;
struct ww_acquire_ctx ctx;
int *order;
order = get_random_order(nlocks);
if (!order)
return;
do {
int contended = -1;
int n, err;
ww_acquire_init(&ctx, &ww_class);
retry:
err = 0;
for (n = 0; n < nlocks; n++) {
if (n == contended)
continue;
err = ww_mutex_lock(&locks[order[n]], &ctx);
if (err < 0)
break;
}
if (!err)
dummy_load(stress);
if (contended > n)
ww_mutex_unlock(&locks[order[contended]]);
contended = n;
while (n--)
ww_mutex_unlock(&locks[order[n]]);
if (err == -EDEADLK) {
ww_mutex_lock_slow(&locks[order[contended]], &ctx);
goto retry;
}
if (err) {
pr_err_once("stress (%s) failed with %d\n",
__func__, err);
break;
}
ww_acquire_fini(&ctx);
} while (!time_after(jiffies, stress->timeout));
kfree(order);
kfree(stress);
}
struct reorder_lock {
struct list_head link;
struct ww_mutex *lock;
};
static void stress_reorder_work(struct work_struct *work)
{
struct stress *stress = container_of(work, typeof(*stress), work);
LIST_HEAD(locks);
struct ww_acquire_ctx ctx;
struct reorder_lock *ll, *ln;
int *order;
int n, err;
order = get_random_order(stress->nlocks);
if (!order)
return;
for (n = 0; n < stress->nlocks; n++) {
ll = kmalloc(sizeof(*ll), GFP_KERNEL);
if (!ll)
goto out;
ll->lock = &stress->locks[order[n]];
list_add(&ll->link, &locks);
}
kfree(order);
order = NULL;
do {
ww_acquire_init(&ctx, &ww_class);
list_for_each_entry(ll, &locks, link) {
err = ww_mutex_lock(ll->lock, &ctx);
if (!err)
continue;
ln = ll;
list_for_each_entry_continue_reverse(ln, &locks, link)
ww_mutex_unlock(ln->lock);
if (err != -EDEADLK) {
pr_err_once("stress (%s) failed with %d\n",
__func__, err);
break;
}
ww_mutex_lock_slow(ll->lock, &ctx);
list_move(&ll->link, &locks); /* restarts iteration */
}
dummy_load(stress);
list_for_each_entry(ll, &locks, link)
ww_mutex_unlock(ll->lock);
ww_acquire_fini(&ctx);
} while (!time_after(jiffies, stress->timeout));
out:
list_for_each_entry_safe(ll, ln, &locks, link)
kfree(ll);
kfree(order);
kfree(stress);
}
static void stress_one_work(struct work_struct *work)
{
struct stress *stress = container_of(work, typeof(*stress), work);
const int nlocks = stress->nlocks;
struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
int err;
do {
err = ww_mutex_lock(lock, NULL);
if (!err) {
dummy_load(stress);
ww_mutex_unlock(lock);
} else {
pr_err_once("stress (%s) failed with %d\n",
__func__, err);
break;
}
} while (!time_after(jiffies, stress->timeout));
kfree(stress);
}
#define STRESS_INORDER BIT(0)
#define STRESS_REORDER BIT(1)
#define STRESS_ONE BIT(2)
#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
static int stress(int nlocks, int nthreads, unsigned int flags)
{
struct ww_mutex *locks;
int n;
locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
if (!locks)
return -ENOMEM;
for (n = 0; n < nlocks; n++)
ww_mutex_init(&locks[n], &ww_class);
for (n = 0; nthreads; n++) {
struct stress *stress;
void (*fn)(struct work_struct *work);
fn = NULL;
switch (n & 3) {
case 0:
if (flags & STRESS_INORDER)
fn = stress_inorder_work;
break;
case 1:
if (flags & STRESS_REORDER)
fn = stress_reorder_work;
break;
case 2:
if (flags & STRESS_ONE)
fn = stress_one_work;
break;
}
if (!fn)
continue;
stress = kmalloc(sizeof(*stress), GFP_KERNEL);
if (!stress)
break;
INIT_WORK(&stress->work, fn);
stress->locks = locks;
stress->nlocks = nlocks;
stress->timeout = jiffies + 2*HZ;
queue_work(wq, &stress->work);
nthreads--;
}
flush_workqueue(wq);
for (n = 0; n < nlocks; n++)
ww_mutex_destroy(&locks[n]);
kfree(locks);
return 0;
}
static int __init test_ww_mutex_init(void)
{
int ncpus = num_online_cpus();
int ret, i;
printk(KERN_INFO "Beginning ww mutex selftests\n");
wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
if (!wq)
return -ENOMEM;
ret = test_mutex();
if (ret)
return ret;
ret = test_aa(false);
if (ret)
return ret;
ret = test_aa(true);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
ret = test_abba(i & 1, i & 2);
if (ret)
return ret;
}
ret = test_cycle(ncpus);
if (ret)
return ret;
ret = stress(16, 2*ncpus, STRESS_INORDER);
if (ret)
return ret;
ret = stress(16, 2*ncpus, STRESS_REORDER);
if (ret)
return ret;
ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
if (ret)
return ret;
printk(KERN_INFO "All ww mutex selftests passed\n");
return 0;
}
static void __exit test_ww_mutex_exit(void)
{
destroy_workqueue(wq);
}
module_init(test_ww_mutex_init);
module_exit(test_ww_mutex_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
| linux-master | kernel/locking/test-ww_mutex.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/atomic.h>
#include <linux/percpu.h>
#include <linux/wait.h>
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/debug.h>
#include <linux/errno.h>
#include <trace/events/lock.h>
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *key)
{
sem->read_count = alloc_percpu(int);
if (unlikely(!sem->read_count))
return -ENOMEM;
rcu_sync_init(&sem->rss);
rcuwait_init(&sem->writer);
init_waitqueue_head(&sem->waiters);
atomic_set(&sem->block, 0);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
return 0;
}
EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
{
/*
* XXX: temporary kludge. The error path in alloc_super()
* assumes that percpu_free_rwsem() is safe after kzalloc().
*/
if (!sem->read_count)
return;
rcu_sync_dtor(&sem->rss);
free_percpu(sem->read_count);
sem->read_count = NULL; /* catch use after free bugs */
}
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
this_cpu_inc(*sem->read_count);
/*
* Due to having preemption disabled the decrement happens on
* the same CPU as the increment, avoiding the
* increment-on-one-CPU-and-decrement-on-another problem.
*
* If the reader misses the writer's assignment of sem->block, then the
* writer is guaranteed to see the reader's increment.
*
* Conversely, any readers that increment their sem->read_count after
* the writer looks are guaranteed to see the sem->block value, which
* in turn means that they are guaranteed to immediately decrement
* their sem->read_count, so that it doesn't matter that the writer
* missed them.
*/
smp_mb(); /* A matches D */
/*
* If !sem->block the critical section starts here, matched by the
* release in percpu_up_write().
*/
if (likely(!atomic_read_acquire(&sem->block)))
return true;
this_cpu_dec(*sem->read_count);
/* Prod writer to re-evaluate readers_active_check() */
rcuwait_wake_up(&sem->writer);
return false;
}
static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
{
if (atomic_read(&sem->block))
return false;
return atomic_xchg(&sem->block, 1) == 0;
}
static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
{
if (reader) {
bool ret;
preempt_disable();
ret = __percpu_down_read_trylock(sem);
preempt_enable();
return ret;
}
return __percpu_down_write_trylock(sem);
}
/*
* The return value of wait_queue_entry::func means:
*
* <0 - error, wakeup is terminated and the error is returned
* 0 - no wakeup, a next waiter is tried
* >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
*
* We use EXCLUSIVE for both readers and writers to preserve FIFO order,
* and play games with the return value to allow waking multiple readers.
*
* Specifically, we wake readers until we've woken a single writer, or until a
* trylock fails.
*/
static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
unsigned int mode, int wake_flags,
void *key)
{
bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
struct percpu_rw_semaphore *sem = key;
struct task_struct *p;
/* concurrent against percpu_down_write(), can get stolen */
if (!__percpu_rwsem_trylock(sem, reader))
return 1;
p = get_task_struct(wq_entry->private);
list_del_init(&wq_entry->entry);
smp_store_release(&wq_entry->private, NULL);
wake_up_process(p);
put_task_struct(p);
return !reader; /* wake (readers until) 1 writer */
}
static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
{
DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
bool wait;
spin_lock_irq(&sem->waiters.lock);
/*
* Serialize against the wakeup in percpu_up_write(), if we fail
* the trylock, the wakeup must see us on the list.
*/
wait = !__percpu_rwsem_trylock(sem, reader);
if (wait) {
wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
}
spin_unlock_irq(&sem->waiters.lock);
while (wait) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!smp_load_acquire(&wq_entry.private))
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
if (__percpu_down_read_trylock(sem))
return true;
if (try)
return false;
trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_READ);
preempt_enable();
percpu_rwsem_wait(sem, /* .reader = */ true);
preempt_disable();
trace_contention_end(sem, 0);
return true;
}
EXPORT_SYMBOL_GPL(__percpu_down_read);
#define per_cpu_sum(var) \
({ \
typeof(var) __sum = 0; \
int cpu; \
compiletime_assert_atomic_type(__sum); \
for_each_possible_cpu(cpu) \
__sum += per_cpu(var, cpu); \
__sum; \
})
bool percpu_is_read_locked(struct percpu_rw_semaphore *sem)
{
return per_cpu_sum(*sem->read_count) != 0 && !atomic_read(&sem->block);
}
EXPORT_SYMBOL_GPL(percpu_is_read_locked);
/*
* Return true if the modular sum of the sem->read_count per-CPU variable is
* zero. If this sum is zero, then it is stable due to the fact that if any
* newly arriving readers increment a given counter, they will immediately
* decrement that same counter.
*
* Assumes sem->block is set.
*/
static bool readers_active_check(struct percpu_rw_semaphore *sem)
{
if (per_cpu_sum(*sem->read_count) != 0)
return false;
/*
* If we observed the decrement; ensure we see the entire critical
* section.
*/
smp_mb(); /* C matches B */
return true;
}
void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE);
/* Notify readers to take the slow path. */
rcu_sync_enter(&sem->rss);
/*
* Try set sem->block; this provides writer-writer exclusion.
* Having sem->block set makes new readers block.
*/
if (!__percpu_down_write_trylock(sem))
percpu_rwsem_wait(sem, /* .reader = */ false);
/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
/*
* If they don't see our store of sem->block, then we are guaranteed to
* see their sem->read_count increment, and therefore will wait for
* them.
*/
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_contention_end(sem, 0);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, _RET_IP_);
/*
* Signal the writer is done, no fast path yet.
*
* One reason that we cannot just immediately flip to readers_fast is
* that new readers might fail to see the results of this writer's
* critical section.
*
* Therefore we force it through the slow path which guarantees an
* acquire and thereby guarantees the critical section's consistency.
*/
atomic_set_release(&sem->block, 0);
/*
* Prod any pending reader/writer to make progress.
*/
__wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
/*
* Once this completes (at least one RCU-sched grace period hence) the
* reader fast path will be available again. Safe to use outside the
* exclusive write lock because its counting.
*/
rcu_sync_exit(&sem->rss);
}
EXPORT_SYMBOL_GPL(percpu_up_write);
| linux-master | kernel/locking/percpu-rwsem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RT-Mutexes: simple blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner.
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <[email protected]>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
* Adaptive Spinlocks:
* Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
* and Peter Morreale,
* Adaptive Spinlocks simplification:
* Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <[email protected]>
*
* See Documentation/locking/rt-mutex-design.rst for details.
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/deadline.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/ww_mutex.h>
#include <trace/events/lock.h>
#include "rtmutex_common.h"
#ifndef WW_RT
# define build_ww_mutex() (false)
# define ww_container_of(rtm) NULL
static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
struct rt_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{
return 0;
}
static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{
}
static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{
}
static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct ww_acquire_ctx *ww_ctx)
{
return 0;
}
#else
# define build_ww_mutex() (true)
# define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
# include "ww_mutex.h"
#endif
/*
* lock->owner state tracking:
*
* lock->owner holds the task_struct pointer of the owner. Bit 0
* is used to keep track of the "lock has waiters" state.
*
* owner bit0
* NULL 0 lock is free (fast acquire possible)
* NULL 1 lock is free and has waiters and the top waiter
* is going to take the lock*
* taskpointer 0 lock is held (fast release possible)
* taskpointer 1 lock is held and has waiters**
*
* The fast atomic compare exchange based acquire and release is only
* possible when bit 0 of lock->owner is 0.
*
* (*) It also can be a transitional state when grabbing the lock
* with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
* we need to set the bit0 before looking at the lock, and the owner may be
* NULL in this small time, hence this can be a transitional state.
*
* (**) There is a small time when bit 0 is set but there are no
* waiters. This can happen when grabbing the lock in the slow path.
* To prevent a cmpxchg of the owner releasing the lock, we need to
* set this bit before looking at the lock.
*/
static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{
unsigned long val = (unsigned long)owner;
if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS;
return (struct task_struct *)val;
}
static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{
/*
* lock->wait_lock is held but explicit acquire semantics are needed
* for a new lock owner so WRITE_ONCE is insufficient.
*/
xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
}
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{
/* lock->wait_lock is held so the unlock provides release semantics. */
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}
static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
if (rt_mutex_has_waiters(lock))
return;
/*
* The rbtree has no waiters enqueued, now make sure that the
* lock->owner still has the waiters bit set, otherwise the
* following can happen:
*
* CPU 0 CPU 1 CPU2
* l->owner=T1
* rt_mutex_lock(l)
* lock(l->lock)
* l->owner = T1 | HAS_WAITERS;
* enqueue(T2)
* boost()
* unlock(l->lock)
* block()
*
* rt_mutex_lock(l)
* lock(l->lock)
* l->owner = T1 | HAS_WAITERS;
* enqueue(T3)
* boost()
* unlock(l->lock)
* block()
* signal(->T2) signal(->T3)
* lock(l->lock)
* dequeue(T2)
* deboost()
* unlock(l->lock)
* lock(l->lock)
* dequeue(T3)
* ==> wait list is empty
* deboost()
* unlock(l->lock)
* lock(l->lock)
* fixup_rt_mutex_waiters()
* if (wait_list_empty(l) {
* l->owner = owner
* owner = l->owner & ~HAS_WAITERS;
* ==> l->owner = T1
* }
* lock(l->lock)
* rt_mutex_unlock(l) fixup_rt_mutex_waiters()
* if (wait_list_empty(l) {
* owner = l->owner & ~HAS_WAITERS;
* cmpxchg(l->owner, T1, NULL)
* ===> Success (l->owner = NULL)
*
* l->owner = owner
* ==> l->owner = T1
* }
*
* With the check for the waiter bit in place T3 on CPU2 will not
* overwrite. All tasks fiddling with the waiters bit are
* serialized by l->lock, so nothing else can modify the waiters
* bit. If the bit is set then nothing can change l->owner either
* so the simple RMW is safe. The cmpxchg() will simply fail if it
* happens in the middle of the RMW because the waiters bit is
* still set.
*/
owner = READ_ONCE(*p);
if (owner & RT_MUTEX_HAS_WAITERS) {
/*
* See rt_mutex_set_owner() and rt_mutex_clear_owner() on
* why xchg_acquire() is used for updating owner for
* locking and WRITE_ONCE() for unlocking.
*
* WRITE_ONCE() would work for the acquire case too, but
* in case that the lock acquisition failed it might
* force other lockers into the slow path unnecessarily.
*/
if (acquire_lock)
xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
else
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
}
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
*/
#ifndef CONFIG_DEBUG_RT_MUTEXES
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return try_cmpxchg_acquire(&lock->owner, &old, new);
}
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return try_cmpxchg_release(&lock->owner, &old, new);
}
/*
* Callers must hold the ->wait_lock -- which is the whole purpose as we force
* all future threads that attempt to [Rmw] the lock to the slowpath. As such
* relaxed semantics suffice.
*/
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
do {
owner = *p;
} while (cmpxchg_relaxed(p, owner,
owner | RT_MUTEX_HAS_WAITERS) != owner);
/*
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
* operations in the event of contention. Ensure the successful
* cmpxchg is visible.
*/
smp_mb__after_atomic();
}
/*
* Safe fastpath aware unlock:
* 1) Clear the waiters bit
* 2) Drop lock->wait_lock
* 3) Try to unlock the lock with cmpxchg
*/
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
unsigned long flags)
__releases(lock->wait_lock)
{
struct task_struct *owner = rt_mutex_owner(lock);
clear_rt_mutex_waiters(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
/*
* If a new waiter comes in between the unlock and the cmpxchg
* we have two situations:
*
* unlock(wait_lock);
* lock(wait_lock);
* cmpxchg(p, owner, 0) == owner
* mark_rt_mutex_waiters(lock);
* acquire(lock);
* or:
*
* unlock(wait_lock);
* lock(wait_lock);
* mark_rt_mutex_waiters(lock);
*
* cmpxchg(p, owner, 0) != owner
* enqueue_waiter();
* unlock(wait_lock);
* lock(wait_lock);
* wake waiter();
* unlock(wait_lock);
* lock(wait_lock);
* acquire(lock);
*/
return rt_mutex_cmpxchg_release(lock, owner, NULL);
}
#else
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return false;
}
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return false;
}
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
/*
* Simple slow path only version: lock->owner is protected by lock->wait_lock.
*/
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
unsigned long flags)
__releases(lock->wait_lock)
{
lock->owner = NULL;
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true;
}
#endif
static __always_inline int __waiter_prio(struct task_struct *task)
{
int prio = task->prio;
if (!rt_prio(prio))
return DEFAULT_PRIO;
return prio;
}
/*
* Update the waiter->tree copy of the sort keys.
*/
static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{
lockdep_assert_held(&waiter->lock->wait_lock);
lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
waiter->tree.prio = __waiter_prio(task);
waiter->tree.deadline = task->dl.deadline;
}
/*
* Update the waiter->pi_tree copy of the sort keys (from the tree copy).
*/
static __always_inline void
waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{
lockdep_assert_held(&waiter->lock->wait_lock);
lockdep_assert_held(&task->pi_lock);
lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
waiter->pi_tree.prio = waiter->tree.prio;
waiter->pi_tree.deadline = waiter->tree.deadline;
}
/*
* Only use with rt_waiter_node_{less,equal}()
*/
#define task_to_waiter_node(p) \
&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
#define task_to_waiter(p) \
&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
struct rt_waiter_node *right)
{
if (left->prio < right->prio)
return 1;
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
* If left waiter has a dl_prio(), and we didn't return 1 above,
* then right waiter has a dl_prio() too.
*/
if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline);
return 0;
}
static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
struct rt_waiter_node *right)
{
if (left->prio != right->prio)
return 0;
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
* If left waiter has a dl_prio(), and we didn't return 0 above,
* then right waiter has a dl_prio() too.
*/
if (dl_prio(left->prio))
return left->deadline == right->deadline;
return 1;
}
static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
struct rt_mutex_waiter *top_waiter)
{
if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
return true;
#ifdef RT_MUTEX_BUILD_SPINLOCKS
/*
* Note that RT tasks are excluded from same priority (lateral)
* steals to prevent the introduction of an unbounded latency.
*/
if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
return false;
return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
#else
return false;
#endif
}
#define __node_2_waiter(node) \
rb_entry((node), struct rt_mutex_waiter, tree.entry)
static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
{
struct rt_mutex_waiter *aw = __node_2_waiter(a);
struct rt_mutex_waiter *bw = __node_2_waiter(b);
if (rt_waiter_node_less(&aw->tree, &bw->tree))
return 1;
if (!build_ww_mutex())
return 0;
if (rt_waiter_node_less(&bw->tree, &aw->tree))
return 0;
/* NOTE: relies on waiter->ww_ctx being set before insertion */
if (aw->ww_ctx) {
if (!bw->ww_ctx)
return 1;
return (signed long)(aw->ww_ctx->stamp -
bw->ww_ctx->stamp) < 0;
}
return 0;
}
static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
}
static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
if (RB_EMPTY_NODE(&waiter->tree.entry))
return;
rb_erase_cached(&waiter->tree.entry, &lock->waiters);
RB_CLEAR_NODE(&waiter->tree.entry);
}
#define __node_2_rt_node(node) \
rb_entry((node), struct rt_waiter_node, entry)
static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
{
return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b));
}
static __always_inline void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
lockdep_assert_held(&task->pi_lock);
rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
}
static __always_inline void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
lockdep_assert_held(&task->pi_lock);
if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
return;
rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
RB_CLEAR_NODE(&waiter->pi_tree.entry);
}
static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
struct task_struct *p)
{
struct task_struct *pi_task = NULL;
lockdep_assert_held(&lock->wait_lock);
lockdep_assert(rt_mutex_owner(lock) == p);
lockdep_assert_held(&p->pi_lock);
if (task_has_pi_waiters(p))
pi_task = task_top_pi_waiter(p)->task;
rt_mutex_setprio(p, pi_task);
}
/* RT mutex specific wake_q wrappers */
static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
struct task_struct *task,
unsigned int wake_state)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(wqh->rtlock_task);
get_task_struct(task);
wqh->rtlock_task = task;
} else {
wake_q_add(&wqh->head, task);
}
}
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
struct rt_mutex_waiter *w)
{
rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
}
static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
put_task_struct(wqh->rtlock_task);
wqh->rtlock_task = NULL;
}
if (!wake_q_empty(&wqh->head))
wake_up_q(&wqh->head);
/* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
preempt_enable();
}
/*
* Deadlock detection is conditional:
*
* If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
* if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
*
* If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
* conducted independent of the detect argument.
*
* If the waiter argument is NULL this indicates the deboost path and
* deadlock detection is disabled independent of the detect argument
* and the config settings.
*/
static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
enum rtmutex_chainwalk chwalk)
{
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
return waiter != NULL;
return chwalk == RT_MUTEX_FULL_CHAINWALK;
}
static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
{
return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
}
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
*
* @task: the task owning the mutex (owner) for which a chain walk is
* probably needed
* @chwalk: do we have to carry out deadlock detection?
* @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
* things for a task that has just got its priority adjusted, and
* is waiting on a mutex)
* @next_lock: the mutex on which the owner of @orig_lock was blocked before
* we dropped its pi_lock. Is never dereferenced, only used for
* comparison to detect lock chain changes.
* @orig_waiter: rt_mutex_waiter struct for the task that has just donated
* its priority to the mutex owner (can be NULL in the case
* depicted above or if the top waiter is gone away and we are
* actually deboosting the owner)
* @top_task: the current top waiter
*
* Returns 0 or -EDEADLK.
*
* Chain walk basics and protection scope
*
* [R] refcount on task
* [Pn] task->pi_lock held
* [L] rtmutex->wait_lock held
*
* Normal locking order:
*
* rtmutex->wait_lock
* task->pi_lock
*
* Step Description Protected by
* function arguments:
* @task [R]
* @orig_lock if != NULL @top_task is blocked on it
* @next_lock Unprotected. Cannot be
* dereferenced. Only used for
* comparison.
* @orig_waiter if != NULL @top_task is blocked on it
* @top_task current, or in case of proxy
* locking protected by calling
* code
* again:
* loop_sanity_check();
* retry:
* [1] lock(task->pi_lock); [R] acquire [P1]
* [2] waiter = task->pi_blocked_on; [P1]
* [3] check_exit_conditions_1(); [P1]
* [4] lock = waiter->lock; [P1]
* [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
* unlock(task->pi_lock); release [P1]
* goto retry;
* }
* [6] check_exit_conditions_2(); [P1] + [L]
* [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
* [8] unlock(task->pi_lock); release [P1]
* put_task_struct(task); release [R]
* [9] check_exit_conditions_3(); [L]
* [10] task = owner(lock); [L]
* get_task_struct(task); [L] acquire [R]
* lock(task->pi_lock); [L] acquire [P2]
* [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
* [12] check_exit_conditions_4(); [P2] + [L]
* [13] unlock(task->pi_lock); release [P2]
* unlock(lock->wait_lock); release [L]
* goto again;
*
* Where P1 is the blocking task and P2 is the lock owner; going up one step
* the owner becomes the next blocked task etc..
*
*
*/
static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_base *orig_lock,
struct rt_mutex_base *next_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
struct rt_mutex_waiter *prerequeue_top_waiter;
int ret = 0, depth = 0;
struct rt_mutex_base *lock;
bool detect_deadlock;
bool requeue = true;
detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
/*
* The (de)boosting is a step by step approach with a lot of
* pitfalls. We want this to be preemptible and we want hold a
* maximum of two locks per step. So we have to check
* carefully whether things change under us.
*/
again:
/*
* We limit the lock chain length for each invocation.
*/
if (++depth > max_lock_depth) {
static int prev_max;
/*
* Print this only once. If the admin changes the limit,
* print a new message when reaching the limit again.
*/
if (prev_max != max_lock_depth) {
prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth,
top_task->comm, task_pid_nr(top_task));
}
put_task_struct(task);
return -EDEADLK;
}
/*
* We are fully preemptible here and only hold the refcount on
* @task. So everything can have changed under us since the
* caller or our own code below (goto retry/again) dropped all
* locks.
*/
retry:
/*
* [1] Task cannot go away as we did a get_task() before !
*/
raw_spin_lock_irq(&task->pi_lock);
/*
* [2] Get the waiter on which @task is blocked on.
*/
waiter = task->pi_blocked_on;
/*
* [3] check_exit_conditions_1() protected by task->pi_lock.
*/
/*
* Check whether the end of the boosting chain has been
* reached or the state of the chain has changed while we
* dropped the locks.
*/
if (!waiter)
goto out_unlock_pi;
/*
* Check the orig_waiter state. After we dropped the locks,
* the previous owner of the lock might have released the lock.
*/
if (orig_waiter && !rt_mutex_owner(orig_lock))
goto out_unlock_pi;
/*
* We dropped all locks after taking a refcount on @task, so
* the task might have moved on in the lock chain or even left
* the chain completely and blocks now on an unrelated lock or
* on @orig_lock.
*
* We stored the lock on which @task was blocked in @next_lock,
* so we can detect the chain change.
*/
if (next_lock != waiter->lock)
goto out_unlock_pi;
/*
* There could be 'spurious' loops in the lock graph due to ww_mutex,
* consider:
*
* P1: A, ww_A, ww_B
* P2: ww_B, ww_A
* P3: A
*
* P3 should not return -EDEADLK because it gets trapped in the cycle
* created by P1 and P2 (which will resolve -- and runs into
* max_lock_depth above). Therefore disable detect_deadlock such that
* the below termination condition can trigger once all relevant tasks
* are boosted.
*
* Even when we start with ww_mutex we can disable deadlock detection,
* since we would supress a ww_mutex induced deadlock at [6] anyway.
* Supressing it here however is not sufficient since we might still
* hit [6] due to adjustment driven iteration.
*
* NOTE: if someone were to create a deadlock between 2 ww_classes we'd
* utterly fail to report it; lockdep should.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
detect_deadlock = false;
/*
* Drop out, when the task has no waiters. Note,
* top_waiter can be NULL, when we are in the deboosting
* mode!
*/
if (top_waiter) {
if (!task_has_pi_waiters(task))
goto out_unlock_pi;
/*
* If deadlock detection is off, we stop here if we
* are not the top pi waiter of the task. If deadlock
* detection is enabled we continue, but stop the
* requeueing in the chain walk.
*/
if (top_waiter != task_top_pi_waiter(task)) {
if (!detect_deadlock)
goto out_unlock_pi;
else
requeue = false;
}
}
/*
* If the waiter priority is the same as the task priority
* then there is no further priority adjustment necessary. If
* deadlock detection is off, we stop the chain walk. If its
* enabled we continue, but stop the requeueing in the chain
* walk.
*/
if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
if (!detect_deadlock)
goto out_unlock_pi;
else
requeue = false;
}
/*
* [4] Get the next lock; per holding task->pi_lock we can't unblock
* and guarantee @lock's existence.
*/
lock = waiter->lock;
/*
* [5] We need to trylock here as we are holding task->pi_lock,
* which is the reverse lock order versus the other rtmutex
* operations.
*
* Per the above, holding task->pi_lock guarantees lock exists, so
* inverting this lock order is infeasible from a life-time
* perspective.
*/
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irq(&task->pi_lock);
cpu_relax();
goto retry;
}
/*
* [6] check_exit_conditions_2() protected by task->pi_lock and
* lock->wait_lock.
*
* Deadlock detection. If the lock is the same as the original
* lock which caused us to walk the lock chain or if the
* current lock is owned by the task which initiated the chain
* walk, we detected a deadlock.
*/
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
ret = -EDEADLK;
/*
* When the deadlock is due to ww_mutex; also see above. Don't
* report the deadlock and instead let the ww_mutex wound/die
* logic pick which of the contending threads gets -EDEADLK.
*
* NOTE: assumes the cycle only contains a single ww_class; any
* other configuration and we fail to report; also, see
* lockdep.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
ret = 0;
raw_spin_unlock(&lock->wait_lock);
goto out_unlock_pi;
}
/*
* If we just follow the lock chain for deadlock detection, no
* need to do all the requeue operations. To avoid a truckload
* of conditionals around the various places below, just do the
* minimum chain walk checks.
*/
if (!requeue) {
/*
* No requeue[7] here. Just release @task [8]
*/
raw_spin_unlock(&task->pi_lock);
put_task_struct(task);
/*
* [9] check_exit_conditions_3 protected by lock->wait_lock.
* If there is no owner of the lock, end of chain.
*/
if (!rt_mutex_owner(lock)) {
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
/* [10] Grab the next task, i.e. owner of @lock */
task = get_task_struct(rt_mutex_owner(lock));
raw_spin_lock(&task->pi_lock);
/*
* No requeue [11] here. We just do deadlock detection.
*
* [12] Store whether owner is blocked
* itself. Decision is made after dropping the locks
*/
next_lock = task_blocked_on_lock(task);
/*
* Get the top waiter for the next iteration
*/
top_waiter = rt_mutex_top_waiter(lock);
/* [13] Drop locks */
raw_spin_unlock(&task->pi_lock);
raw_spin_unlock_irq(&lock->wait_lock);
/* If owner is not blocked, end of chain. */
if (!next_lock)
goto out_put_task;
goto again;
}
/*
* Store the current top waiter before doing the requeue
* operation on @lock. We need it for the boost/deboost
* decision below.
*/
prerequeue_top_waiter = rt_mutex_top_waiter(lock);
/* [7] Requeue the waiter in the lock waiter tree. */
rt_mutex_dequeue(lock, waiter);
/*
* Update the waiter prio fields now that we're dequeued.
*
* These values can have changed through either:
*
* sys_sched_set_scheduler() / sys_sched_setattr()
*
* or
*
* DL CBS enforcement advancing the effective deadline.
*/
waiter_update_prio(waiter, task);
rt_mutex_enqueue(lock, waiter);
/*
* [8] Release the (blocking) task in preparation for
* taking the owner task in [10].
*
* Since we hold lock->waiter_lock, task cannot unblock, even if we
* release task->pi_lock.
*/
raw_spin_unlock(&task->pi_lock);
put_task_struct(task);
/*
* [9] check_exit_conditions_3 protected by lock->wait_lock.
*
* We must abort the chain walk if there is no lock owner even
* in the dead lock detection case, as we have nothing to
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
/*
* If the requeue [7] above changed the top waiter,
* then we need to wake the new top waiter up to try
* to get the lock.
*/
top_waiter = rt_mutex_top_waiter(lock);
if (prerequeue_top_waiter != top_waiter)
wake_up_state(top_waiter->task, top_waiter->wake_state);
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
/*
* [10] Grab the next task, i.e. the owner of @lock
*
* Per holding lock->wait_lock and checking for !owner above, there
* must be an owner and it cannot go away.
*/
task = get_task_struct(rt_mutex_owner(lock));
raw_spin_lock(&task->pi_lock);
/* [11] requeue the pi waiters if necessary */
if (waiter == rt_mutex_top_waiter(lock)) {
/*
* The waiter became the new top (highest priority)
* waiter on the lock. Replace the previous top waiter
* in the owner tasks pi waiters tree with this waiter
* and adjust the priority of the owner.
*/
rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
waiter_clone_prio(waiter, task);
rt_mutex_enqueue_pi(task, waiter);
rt_mutex_adjust_prio(lock, task);
} else if (prerequeue_top_waiter == waiter) {
/*
* The waiter was the top waiter on the lock, but is
* no longer the top priority waiter. Replace waiter in
* the owner tasks pi waiters tree with the new top
* (highest priority) waiter and adjust the priority
* of the owner.
* The new top waiter is stored in @waiter so that
* @waiter == @top_waiter evaluates to true below and
* we continue to deboost the rest of the chain.
*/
rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
waiter_clone_prio(waiter, task);
rt_mutex_enqueue_pi(task, waiter);
rt_mutex_adjust_prio(lock, task);
} else {
/*
* Nothing changed. No need to do any priority
* adjustment.
*/
}
/*
* [12] check_exit_conditions_4() protected by task->pi_lock
* and lock->wait_lock. The actual decisions are made after we
* dropped the locks.
*
* Check whether the task which owns the current lock is pi
* blocked itself. If yes we store a pointer to the lock for
* the lock chain change detection above. After we dropped
* task->pi_lock next_lock cannot be dereferenced anymore.
*/
next_lock = task_blocked_on_lock(task);
/*
* Store the top waiter of @lock for the end of chain walk
* decision below.
*/
top_waiter = rt_mutex_top_waiter(lock);
/* [13] Drop the locks */
raw_spin_unlock(&task->pi_lock);
raw_spin_unlock_irq(&lock->wait_lock);
/*
* Make the actual exit decisions [12], based on the stored
* values.
*
* We reached the end of the lock chain. Stop right here. No
* point to go back just to figure that out.
*/
if (!next_lock)
goto out_put_task;
/*
* If the current waiter is not the top waiter on the lock,
* then we can stop the chain walk here if we are not in full
* deadlock detection mode.
*/
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
goto again;
out_unlock_pi:
raw_spin_unlock_irq(&task->pi_lock);
out_put_task:
put_task_struct(task);
return ret;
}
/*
* Try to take an rt-mutex
*
* Must be called with lock->wait_lock held and interrupts disabled
*
* @lock: The lock to be acquired.
* @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
/*
* Before testing whether we can acquire @lock, we set the
* RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
* other tasks which try to modify @lock into the slow path
* and they serialize on @lock->wait_lock.
*
* The RT_MUTEX_HAS_WAITERS bit can have a transitional state
* as explained at the top of this file if and only if:
*
* - There is a lock owner. The caller must fixup the
* transient state if it does a trylock or leaves the lock
* function due to a signal or timeout.
*
* - @task acquires the lock and there are no other
* waiters. This is undone in rt_mutex_set_owner(@task) at
* the end of this function.
*/
mark_rt_mutex_waiters(lock);
/*
* If @lock has an owner, give up.
*/
if (rt_mutex_owner(lock))
return 0;
/*
* If @waiter != NULL, @task has already enqueued the waiter
* into @lock waiter tree. If @waiter == NULL then this is a
* trylock attempt.
*/
if (waiter) {
struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
/*
* If waiter is the highest priority waiter of @lock,
* or allowed to steal it, take it over.
*/
if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters tree.
*/
rt_mutex_dequeue(lock, waiter);
} else {
return 0;
}
} else {
/*
* If the lock has waiters already we check whether @task is
* eligible to take over the lock.
*
* If there are no other waiters, @task can acquire
* the lock. @task->pi_blocked_on is NULL, so it does
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
/* Check whether the trylock can steal it. */
if (!rt_mutex_steal(task_to_waiter(task),
rt_mutex_top_waiter(lock)))
return 0;
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
* waiters order.
*/
} else {
/*
* No waiters. Take the lock without the
* pi_lock dance.@task->pi_blocked_on is NULL
* and we have no waiters to enqueue in @task
* pi waiters tree.
*/
goto takeit;
}
}
/*
* Clear @task->pi_blocked_on. Requires protection by
* @task->pi_lock. Redundant operation for the @waiter == NULL
* case, but conditionals are more expensive than a redundant
* store.
*/
raw_spin_lock(&task->pi_lock);
task->pi_blocked_on = NULL;
/*
* Finish the lock acquisition. @task is the new owner. If
* other waiters exist we have to insert the highest priority
* waiter into @task->pi_waiters tree.
*/
if (rt_mutex_has_waiters(lock))
rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
raw_spin_unlock(&task->pi_lock);
takeit:
/*
* This either preserves the RT_MUTEX_HAS_WAITERS bit if there
* are still waiters or clears it.
*/
rt_mutex_set_owner(lock, task);
return 1;
}
/*
* Task blocks on lock.
*
* Prepare waiter and propagate pi chain
*
* This must be called with lock->wait_lock held and interrupts disabled
*/
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
struct ww_acquire_ctx *ww_ctx,
enum rtmutex_chainwalk chwalk)
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
struct rt_mutex_base *next_lock;
int chain_walk = 0, res;
lockdep_assert_held(&lock->wait_lock);
/*
* Early deadlock detection. We really don't want the task to
* enqueue on itself just to untangle the mess later. It's not
* only an optimization. We drop the locks, so another waiter
* can come in before the chain walk detects the deadlock. So
* the other will detect the deadlock and return -EDEADLOCK,
* which is wrong, as the other waiter is not in a deadlock
* situation.
*
* Except for ww_mutex, in that case the chain walk must already deal
* with spurious cycles, see the comments at [3] and [6].
*/
if (owner == task && !(build_ww_mutex() && ww_ctx))
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
waiter->task = task;
waiter->lock = lock;
waiter_update_prio(waiter, task);
waiter_clone_prio(waiter, task);
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
top_waiter = rt_mutex_top_waiter(lock);
rt_mutex_enqueue(lock, waiter);
task->pi_blocked_on = waiter;
raw_spin_unlock(&task->pi_lock);
if (build_ww_mutex() && ww_ctx) {
struct rt_mutex *rtm;
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
if (res) {
raw_spin_lock(&task->pi_lock);
rt_mutex_dequeue(lock, waiter);
task->pi_blocked_on = NULL;
raw_spin_unlock(&task->pi_lock);
return res;
}
}
if (!owner)
return 0;
raw_spin_lock(&owner->pi_lock);
if (waiter == rt_mutex_top_waiter(lock)) {
rt_mutex_dequeue_pi(owner, top_waiter);
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(lock, owner);
if (owner->pi_blocked_on)
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
}
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
raw_spin_unlock(&owner->pi_lock);
/*
* Even if full deadlock detection is on, if the owner is not
* blocked itself, we can avoid finding this out in the chain
* walk.
*/
if (!chain_walk || !next_lock)
return 0;
/*
* The owner can't disappear while holding a lock,
* so the owner struct is protected by wait_lock.
* Gets dropped in rt_mutex_adjust_prio_chain()!
*/
get_task_struct(owner);
raw_spin_unlock_irq(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
next_lock, waiter, task);
raw_spin_lock_irq(&lock->wait_lock);
return res;
}
/*
* Remove the top waiter from the current tasks pi waiter tree and
* queue it up.
*
* Called with lock->wait_lock held and interrupts disabled.
*/
static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
struct rt_mutex_base *lock)
{
struct rt_mutex_waiter *waiter;
lockdep_assert_held(&lock->wait_lock);
raw_spin_lock(¤t->pi_lock);
waiter = rt_mutex_top_waiter(lock);
/*
* Remove it from current->pi_waiters and deboost.
*
* We must in fact deboost here in order to ensure we call
* rt_mutex_setprio() to update p->pi_top_task before the
* task unblocks.
*/
rt_mutex_dequeue_pi(current, waiter);
rt_mutex_adjust_prio(lock, current);
/*
* As we are waking up the top waiter, and the waiter stays
* queued on the lock until it gets the lock, this lock
* obviously has waiters. Just set the bit here and this has
* the added benefit of forcing all new tasks into the
* slow path making sure no task of lower priority than
* the top waiter can steal this lock.
*/
lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
/*
* We deboosted before waking the top waiter task such that we don't
* run two tasks with the 'same' priority (and ensure the
* p->pi_top_task pointer points to a blocked task). This however can
* lead to priority inversion if we would get preempted after the
* deboost but before waking our donor task, hence the preempt_disable()
* before unlock.
*
* Pairs with preempt_enable() in rt_mutex_wake_up_q();
*/
preempt_disable();
rt_mutex_wake_q_add(wqh, waiter);
raw_spin_unlock(¤t->pi_lock);
}
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
int ret = try_to_take_rt_mutex(lock, current, NULL);
/*
* try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up.
*/
fixup_rt_mutex_waiters(lock, true);
return ret;
}
/*
* Slow path try-lock function:
*/
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
unsigned long flags;
int ret;
/*
* If the lock already has an owner we fail to get the lock.
* This can be done without taking the @lock->wait_lock as
* it is only being read, and this is a trylock anyway.
*/
if (rt_mutex_owner(lock))
return 0;
/*
* The mutex has currently no owner. Lock the wait lock and try to
* acquire the lock. We use irqsave here to support early boot calls.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowtrylock(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret;
}
static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 1;
return rt_mutex_slowtrylock(lock);
}
/*
* Slow path to release a rt-mutex.
*/
static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
{
DEFINE_RT_WAKE_Q(wqh);
unsigned long flags;
/* irqsave required to support early boot calls */
raw_spin_lock_irqsave(&lock->wait_lock, flags);
debug_rt_mutex_unlock(lock);
/*
* We must be careful here if the fast path is enabled. If we
* have no waiters queued we cannot set owner to NULL here
* because of:
*
* foo->lock->owner = NULL;
* rtmutex_lock(foo->lock); <- fast path
* free = atomic_dec_and_test(foo->refcnt);
* rtmutex_unlock(foo->lock); <- fast path
* if (free)
* kfree(foo);
* raw_spin_unlock(foo->lock->wait_lock);
*
* So for the fastpath enabled kernel:
*
* Nothing can set the waiters bit as long as we hold
* lock->wait_lock. So we do the following sequence:
*
* owner = rt_mutex_owner(lock);
* clear_rt_mutex_waiters(lock);
* raw_spin_unlock(&lock->wait_lock);
* if (cmpxchg(&lock->owner, owner, 0) == owner)
* return;
* goto retry;
*
* The fastpath disabled variant is simple as all access to
* lock->owner is serialized by lock->wait_lock:
*
* lock->owner = NULL;
* raw_spin_unlock(&lock->wait_lock);
*/
while (!rt_mutex_has_waiters(lock)) {
/* Drops lock->wait_lock ! */
if (unlock_rt_mutex_safe(lock, flags) == true)
return;
/* Relock the rtmutex and try again */
raw_spin_lock_irqsave(&lock->wait_lock, flags);
}
/*
* The wakeup next waiter path does not suffer from the above
* race. See the comments there.
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
mark_wakeup_next_waiter(&wqh, lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
rt_mutex_wake_up_q(&wqh);
}
static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;
rt_mutex_slowunlock(lock);
}
#ifdef CONFIG_SMP
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
bool res = true;
rcu_read_lock();
for (;;) {
/* If owner changed, trylock again. */
if (owner != rt_mutex_owner(lock))
break;
/*
* Ensure that @owner is dereferenced after checking that
* the lock owner still matches @owner. If that fails,
* @owner might point to freed memory. If it still matches,
* the rcu_read_lock() ensures the memory stays valid.
*/
barrier();
/*
* Stop spinning when:
* - the lock owner has been scheduled out
* - current is not longer the top waiter
* - current is requested to reschedule (redundant
* for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted
*/
if (!owner_on_cpu(owner) || need_resched() ||
!rt_mutex_waiter_is_top_waiter(lock, waiter)) {
res = false;
break;
}
cpu_relax();
}
rcu_read_unlock();
return res;
}
#else
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
return false;
}
#endif
#ifdef RT_MUTEX_BUILD_MUTEX
/*
* Functions required for:
* - rtmutex, futex on all kernels
* - mutex and rwsem substitutions on RT kernels
*/
/*
* Remove a waiter from a lock and give up
*
* Must be called with lock->wait_lock held and interrupts disabled. It must
* have just failed to try_to_take_rt_mutex().
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_base *next_lock;
lockdep_assert_held(&lock->wait_lock);
raw_spin_lock(¤t->pi_lock);
rt_mutex_dequeue(lock, waiter);
current->pi_blocked_on = NULL;
raw_spin_unlock(¤t->pi_lock);
/*
* Only update priority if the waiter was the highest priority
* waiter of the lock and there is an owner to update.
*/
if (!owner || !is_top_waiter)
return;
raw_spin_lock(&owner->pi_lock);
rt_mutex_dequeue_pi(owner, waiter);
if (rt_mutex_has_waiters(lock))
rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
rt_mutex_adjust_prio(lock, owner);
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
raw_spin_unlock(&owner->pi_lock);
/*
* Don't walk the chain, if the owner task is not blocked
* itself.
*/
if (!next_lock)
return;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
raw_spin_unlock_irq(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
next_lock, NULL, current);
raw_spin_lock_irq(&lock->wait_lock);
}
/**
* rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @ww_ctx: WW mutex context pointer
* @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
* Must be called with lock->wait_lock held and interrupts disabled
*/
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct task_struct *owner;
int ret = 0;
for (;;) {
/* Try to acquire the lock: */
if (try_to_take_rt_mutex(lock, current, waiter))
break;
if (timeout && !timeout->task) {
ret = -ETIMEDOUT;
break;
}
if (signal_pending_state(state, current)) {
ret = -EINTR;
break;
}
if (build_ww_mutex() && ww_ctx) {
ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
if (ret)
break;
}
if (waiter == rt_mutex_top_waiter(lock))
owner = rt_mutex_owner(lock);
else
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
schedule();
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(state);
}
__set_current_state(TASK_RUNNING);
return ret;
}
static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
struct rt_mutex_waiter *w)
{
/*
* If the result is not -EDEADLOCK or the caller requested
* deadlock detection, nothing to do here.
*/
if (res != -EDEADLOCK || detect_deadlock)
return;
if (build_ww_mutex() && w->ww_ctx)
return;
/*
* Yell loudly and stop the task right here.
*/
WARN(1, "rtmutex deadlock detected\n");
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
}
/**
* __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
* @lock: The rtmutex to block lock
* @ww_ctx: WW mutex context pointer
* @state: The task state for sleeping
* @chwalk: Indicator whether full or partial chainwalk is requested
* @waiter: Initializer waiter for blocking
*/
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct ww_mutex *ww = ww_container_of(rtm);
int ret;
lockdep_assert_held(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
if (build_ww_mutex() && ww_ctx) {
__ww_mutex_check_waiters(rtm, ww_ctx);
ww_mutex_lock_acquired(ww, ww_ctx);
}
return 0;
}
set_current_state(state);
trace_contention_begin(lock, LCB_F_RT);
ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
if (likely(!ret))
ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
if (likely(!ret)) {
/* acquired the lock */
if (build_ww_mutex() && ww_ctx) {
if (!ww_ctx->is_wait_die)
__ww_mutex_check_waiters(rtm, ww_ctx);
ww_mutex_lock_acquired(ww, ww_ctx);
}
} else {
__set_current_state(TASK_RUNNING);
remove_waiter(lock, waiter);
rt_mutex_handle_deadlock(ret, chwalk, waiter);
}
/*
* try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock, true);
trace_contention_end(lock, ret);
return ret;
}
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{
struct rt_mutex_waiter waiter;
int ret;
rt_mutex_init_waiter(&waiter);
waiter.ww_ctx = ww_ctx;
ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
&waiter);
debug_rt_mutex_free_waiter(&waiter);
return ret;
}
/*
* rt_mutex_slowlock - Locking slowpath invoked when fast path fails
* @lock: The rtmutex to block lock
* @ww_ctx: WW mutex context pointer
* @state: The task state for sleeping
*/
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{
unsigned long flags;
int ret;
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
* be called in early boot if the cmpxchg() fast path is disabled
* (debug, no architecture support). In this case we will acquire the
* rtmutex with lock->wait_lock held. But we cannot unconditionally
* enable interrupts in that early boot case. So we need to use the
* irqsave/restore variants.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret;
}
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
unsigned int state)
{
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
return rt_mutex_slowlock(lock, NULL, state);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
#ifdef RT_MUTEX_BUILD_SPINLOCKS
/*
* Functions required for spin/rw_lock substitution on RT kernels
*/
/**
* rtlock_slowlock_locked - Slow path lock acquisition for RT locks
* @lock: The underlying RT mutex
*/
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
struct rt_mutex_waiter waiter;
struct task_struct *owner;
lockdep_assert_held(&lock->wait_lock);
if (try_to_take_rt_mutex(lock, current, NULL))
return;
rt_mutex_init_rtlock_waiter(&waiter);
/* Save current state and set state to TASK_RTLOCK_WAIT */
current_save_and_set_rtlock_wait_state();
trace_contention_begin(lock, LCB_F_RT);
task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
for (;;) {
/* Try to acquire the lock again */
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
if (&waiter == rt_mutex_top_waiter(lock))
owner = rt_mutex_owner(lock);
else
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
schedule_rtlock();
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(TASK_RTLOCK_WAIT);
}
/* Restore the task state */
current_restore_rtlock_saved_state();
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
* We might have to fix that up:
*/
fixup_rt_mutex_waiters(lock, true);
debug_rt_mutex_free_waiter(&waiter);
trace_contention_end(lock, 0);
}
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
{
unsigned long flags;
raw_spin_lock_irqsave(&lock->wait_lock, flags);
rtlock_slowlock_locked(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
}
#endif /* RT_MUTEX_BUILD_SPINLOCKS */
| linux-master | kernel/locking/rtmutex.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtmutex API
*/
#include <linux/spinlock.h>
#include <linux/export.h>
#define RT_MUTEX_BUILD_MUTEX
#include "rtmutex.c"
/*
* Max number of times we'll walk the boosting chain:
*/
int max_lock_depth = 1024;
/*
* Debug aware fast / slowpath lock,trylock,unlock
*
* The atomic acquire/release ops are compiled away, when either the
* architecture does not support cmpxchg or when debugging is enabled.
*/
static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
unsigned int state,
struct lockdep_map *nest_lock,
unsigned int subclass)
{
int ret;
might_sleep();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
ret = __rt_mutex_lock(&lock->rtmutex, state);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
return ret;
}
void rt_mutex_base_init(struct rt_mutex_base *rtb)
{
__rt_mutex_base_init(rtb);
}
EXPORT_SYMBOL(rt_mutex_base_init);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* rt_mutex_lock_nested - lock a rt_mutex
*
* @lock: the rt_mutex to be locked
* @subclass: the lockdep subclass
*/
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
}
EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
#else /* !CONFIG_DEBUG_LOCK_ALLOC */
/**
* rt_mutex_lock - lock a rt_mutex
*
* @lock: the rt_mutex to be locked
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
#endif
/**
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
*
* @lock: the rt_mutex to be locked
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
* @lock: the rt_mutex to be locked
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
*/
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
{
return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
* @lock: the rt_mutex to be locked
*
* This function can only be called in thread context. It's safe to call it
* from atomic regions, but not from hard or soft interrupt context.
*
* Returns:
* 1 on success
* 0 on contention
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
int ret;
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
return 0;
ret = __rt_mutex_trylock(&lock->rtmutex);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
/**
* rt_mutex_unlock - unlock a rt_mutex
*
* @lock: the rt_mutex to be unlocked
*/
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/*
* Futex variants, must not use fastpath.
*/
int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
{
return rt_mutex_slowtrylock(lock);
}
int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
{
return __rt_mutex_slowtrylock(lock);
}
/**
* __rt_mutex_futex_unlock - Futex variant, that since futex variants
* do not use the fast-path, can be simple and will not need to retry.
*
* @lock: The rt_mutex to be unlocked
* @wqh: The wake queue head from which to get the next lock waiter
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
struct rt_wake_q_head *wqh)
{
lockdep_assert_held(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
return false; /* done */
}
/*
* We've already deboosted, mark_wakeup_next_waiter() will
* retain preempt_disabled when we drop the wait_lock, to
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
mark_wakeup_next_waiter(wqh, lock);
return true; /* call postunlock() */
}
void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
{
DEFINE_RT_WAKE_Q(wqh);
unsigned long flags;
bool postunlock;
raw_spin_lock_irqsave(&lock->wait_lock, flags);
postunlock = __rt_mutex_futex_unlock(lock, &wqh);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
if (postunlock)
rt_mutex_postunlock(&wqh);
}
/**
* __rt_mutex_init - initialize the rt_mutex
*
* @lock: The rt_mutex to be initialized
* @name: The lock name used for debugging
* @key: The lock class key used for debugging
*
* Initialize the rt_mutex to unlocked state.
*
* Initializing of a locked rt_mutex is not allowed
*/
void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
__rt_mutex_base_init(&lock->rtmutex);
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
* proxy owner
*
* @lock: the rt_mutex to be locked
* @proxy_owner:the task to set as owner
*
* No locking. Caller has to do serializing itself
*
* Special API call for PI-futex support. This initializes the rtmutex and
* assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
* possible at this point because the pi_state which contains the rtmutex
* is not yet visible to other tasks.
*/
void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
struct task_struct *proxy_owner)
{
static struct lock_class_key pi_futex_key;
__rt_mutex_base_init(lock);
/*
* On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
* and rtmutex based. That causes a lockdep false positive, because
* some of the futex functions invoke spin_unlock(&hb->lock) with
* the wait_lock of the rtmutex associated to the pi_futex held.
* spin_unlock() in turn takes wait_lock of the rtmutex on which
* the spinlock is based, which makes lockdep notice a lock
* recursion. Give the futex/rtmutex wait_lock a separate key.
*/
lockdep_set_class(&lock->wait_lock, &pi_futex_key);
rt_mutex_set_owner(lock, proxy_owner);
}
/**
* rt_mutex_proxy_unlock - release a lock on behalf of owner
*
* @lock: the rt_mutex to be locked
*
* No locking. Caller has to do serializing itself
*
* Special API call for PI-futex support. This just cleans up the rtmutex
* (debugging) state. Concurrent operations on this rt_mutex are not
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_clear_owner(lock);
}
/**
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
* @lock: the rt_mutex to take
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
*
* NOTE: does _NOT_ remove the @waiter on failure; must either call
* rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
*
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
* Special API call for PI-futex support.
*/
int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;
lockdep_assert_held(&lock->wait_lock);
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
RT_MUTEX_FULL_CHAINWALK);
if (ret && !rt_mutex_owner(lock)) {
/*
* Reset the return value. We might have
* returned with -EDEADLK and the owner
* released the lock while we were walking the
* pi chain. Let the waiter sort it out.
*/
ret = 0;
}
return ret;
}
/**
* rt_mutex_start_proxy_lock() - Start lock acquisition for another task
* @lock: the rt_mutex to take
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
*
* NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
* on failure.
*
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
* Special API call for PI-futex support.
*/
int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;
raw_spin_lock_irq(&lock->wait_lock);
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
if (unlikely(ret))
remove_waiter(lock, waiter);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
}
/**
* rt_mutex_wait_proxy_lock() - Wait for lock acquisition
* @lock: the rt_mutex we were woken on
* @to: the timeout, null if none. hrtimer should already have
* been started.
* @waiter: the pre-initialized rt_mutex_waiter
*
* Wait for the lock acquisition started on our behalf by
* rt_mutex_start_proxy_lock(). Upon failure, the caller must call
* rt_mutex_cleanup_proxy_lock().
*
* Returns:
* 0 - success
* <0 - error, one of -EINTR, -ETIMEDOUT
*
* Special API call for PI-futex support
*/
int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
int ret;
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock, true);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
}
/**
* rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
* @lock: the rt_mutex we were woken on
* @waiter: the pre-initialized rt_mutex_waiter
*
* Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
* rt_mutex_wait_proxy_lock().
*
* Unless we acquired the lock; we're still enqueued on the wait-list and can
* in fact still be granted ownership until we're removed. Therefore we can
* find we are in fact the owner and must disregard the
* rt_mutex_wait_proxy_lock() failure.
*
* Returns:
* true - did the cleanup, we done.
* false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
* caller should disregards its return value.
*
* Special API call for PI-futex support
*/
bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{
bool cleanup = false;
raw_spin_lock_irq(&lock->wait_lock);
/*
* Do an unconditional try-lock, this deals with the lock stealing
* state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
* sets a NULL owner.
*
* We're not interested in the return value, because the subsequent
* test on rt_mutex_owner() will infer that. If the trylock succeeded,
* we will own the lock and it will have removed the waiter. If we
* failed the trylock, we're still not owner and we need to remove
* ourselves.
*/
try_to_take_rt_mutex(lock, current, waiter);
/*
* Unless we're the owner; we're still enqueued on the wait_list.
* So check if we became owner, if not, take us off the wait_list.
*/
if (rt_mutex_owner(lock) != current) {
remove_waiter(lock, waiter);
cleanup = true;
}
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock, false);
raw_spin_unlock_irq(&lock->wait_lock);
return cleanup;
}
/*
* Recheck the pi chain, in case we got a priority setting
*
* Called from sched_setscheduler
*/
void __sched rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
struct rt_mutex_base *next_lock;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
next_lock = waiter->lock;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
next_lock, NULL, task);
}
/*
* Performs the wakeup of the top-waiter and re-enables preemption.
*/
void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
{
rt_mutex_wake_up_q(wqh);
}
#ifdef CONFIG_DEBUG_RT_MUTEXES
void rt_mutex_debug_task_free(struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
#endif
#ifdef CONFIG_PREEMPT_RT
/* Mutexes */
void __mutex_rt_init(struct mutex *mutex, const char *name,
struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
}
EXPORT_SYMBOL(__mutex_rt_init);
static __always_inline int __mutex_lock_common(struct mutex *lock,
unsigned int state,
unsigned int subclass,
struct lockdep_map *nest_lock,
unsigned long ip)
{
int ret;
might_sleep();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
ret = __rt_mutex_lock(&lock->rtmutex, state);
if (ret)
mutex_release(&lock->dep_map, ip);
else
lock_acquired(&lock->dep_map, ip);
return ret;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
void __sched _mutex_lock_nest_lock(struct mutex *lock,
struct lockdep_map *nest_lock)
{
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass)
{
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
int __sched mutex_lock_killable_nested(struct mutex *lock,
unsigned int subclass)
{
return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
int token;
might_sleep();
token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
#else /* CONFIG_DEBUG_LOCK_ALLOC */
void __sched mutex_lock(struct mutex *lock)
{
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
}
EXPORT_SYMBOL(mutex_lock);
int __sched mutex_lock_interruptible(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
}
EXPORT_SYMBOL(mutex_lock_interruptible);
int __sched mutex_lock_killable(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
}
EXPORT_SYMBOL(mutex_lock_killable);
void __sched mutex_lock_io(struct mutex *lock)
{
int token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
io_schedule_finish(token);
}
EXPORT_SYMBOL(mutex_lock_io);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
int __sched mutex_trylock(struct mutex *lock)
{
int ret;
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
return 0;
ret = __rt_mutex_trylock(&lock->rtmutex);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(mutex_trylock);
void __sched mutex_unlock(struct mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
}
EXPORT_SYMBOL(mutex_unlock);
#endif /* CONFIG_PREEMPT_RT */
| linux-master | kernel/locking/rtmutex_api.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/locking/mutex.c
*
* Mutexes: blocking mutual exclusion locks
*
* Started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
*
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
* David Howells for suggestions and improvements.
*
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
* from the -rt tree, where it was originally implemented for rtmutexes
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
* and Sven Dietrich.
*
* Also see Documentation/locking/mutex-design.rst.
*/
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>
#ifndef CONFIG_PREEMPT_RT
#include "mutex.h"
#ifdef CONFIG_DEBUG_MUTEXES
# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
#else
# define MUTEX_WARN_ON(cond)
#endif
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
atomic_long_set(&lock->owner, 0);
raw_spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
}
EXPORT_SYMBOL(__mutex_init);
/*
* @owner: contains: 'struct task_struct *' to the current lock owner,
* NULL means not owned. Since task_struct pointers are aligned at
* at least L1_CACHE_BYTES, we have low bits to store extra state.
*
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
* Bit1 indicates unlock needs to hand the lock to the top-waiter
* Bit2 indicates handoff has been done and we're waiting for pickup.
*/
#define MUTEX_FLAG_WAITERS 0x01
#define MUTEX_FLAG_HANDOFF 0x02
#define MUTEX_FLAG_PICKUP 0x04
#define MUTEX_FLAGS 0x07
/*
* Internal helper function; C doesn't allow us to hide it :/
*
* DO NOT USE (outside of mutex code).
*/
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
}
static inline struct task_struct *__owner_task(unsigned long owner)
{
return (struct task_struct *)(owner & ~MUTEX_FLAGS);
}
bool mutex_is_locked(struct mutex *lock)
{
return __mutex_owner(lock) != NULL;
}
EXPORT_SYMBOL(mutex_is_locked);
static inline unsigned long __owner_flags(unsigned long owner)
{
return owner & MUTEX_FLAGS;
}
/*
* Returns: __mutex_owner(lock) on failure or NULL on success.
*/
static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
{
unsigned long owner, curr = (unsigned long)current;
owner = atomic_long_read(&lock->owner);
for (;;) { /* must loop, can race against a flag */
unsigned long flags = __owner_flags(owner);
unsigned long task = owner & ~MUTEX_FLAGS;
if (task) {
if (flags & MUTEX_FLAG_PICKUP) {
if (task != curr)
break;
flags &= ~MUTEX_FLAG_PICKUP;
} else if (handoff) {
if (flags & MUTEX_FLAG_HANDOFF)
break;
flags |= MUTEX_FLAG_HANDOFF;
} else {
break;
}
} else {
MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
task = curr;
}
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
if (task == curr)
return NULL;
break;
}
}
return __owner_task(owner);
}
/*
* Trylock or set HANDOFF
*/
static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
{
return !__mutex_trylock_common(lock, handoff);
}
/*
* Actual trylock that will work on any unlocked state.
*/
static inline bool __mutex_trylock(struct mutex *lock)
{
return !__mutex_trylock_common(lock, false);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* Lockdep annotations are contained to the slow paths for simplicity.
* There is nothing that would stop spreading the lockdep annotations outwards
* except more code.
*/
/*
* Optimistic trylock that only works in the uncontended case. Make sure to
* follow with a __mutex_trylock() before failing.
*/
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
{
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
return true;
return false;
}
static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
unsigned long curr = (unsigned long)current;
return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
}
#endif
static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
{
atomic_long_or(flag, &lock->owner);
}
static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
{
atomic_long_andnot(flag, &lock->owner);
}
static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
{
return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
}
/*
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
debug_mutex_add_waiter(lock, waiter, current);
list_add_tail(&waiter->list, list);
if (__mutex_waiter_is_first(lock, waiter))
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
static void
__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
list_del(&waiter->list);
if (likely(list_empty(&lock->wait_list)))
__mutex_clear_flag(lock, MUTEX_FLAGS);
debug_mutex_remove_waiter(lock, waiter, current);
}
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
* WAITERS. Provides RELEASE semantics like a regular unlock, the
* __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
*/
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
{
unsigned long owner = atomic_long_read(&lock->owner);
for (;;) {
unsigned long new;
MUTEX_WARN_ON(__owner_task(owner) != current);
MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
new = (owner & MUTEX_FLAG_WAITERS);
new |= (unsigned long)task;
if (task)
new |= MUTEX_FLAG_PICKUP;
if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
break;
}
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
static void __sched __mutex_lock_slowpath(struct mutex *lock);
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
* Lock the mutex exclusively for this task. If the mutex is not
* available right now, it will sleep until it can get it.
*
* The mutex must later on be released by the same task that
* acquired it. Recursive locking is not allowed. The task
* may not exit without first unlocking the mutex. Also, kernel
* memory where the mutex resides must not be freed with
* the mutex still locked. The mutex must first be initialized
* (or statically defined) before it can be locked. memset()-ing
* the mutex to 0 is not allowed.
*
* (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
* checks that will enforce the restrictions and will also do
* deadlock debugging)
*
* This function is similar to (but not equivalent to) down().
*/
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
if (!__mutex_trylock_fast(lock))
__mutex_lock_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock);
#endif
#include "ww_mutex.h"
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Trylock variant that returns the owning task on failure.
*/
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
{
return __mutex_trylock_common(lock, false);
}
static inline
bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
struct mutex_waiter *waiter)
{
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
/*
* If ww->ctx is set the contents are undefined, only
* by acquiring wait_lock there is a guarantee that
* they are not invalid when reading.
*
* As such, when deadlock detection needs to be
* performed the optimistic spinning cannot be done.
*
* Check this in every inner iteration because we may
* be racing against another thread's ww_mutex_lock.
*/
if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
return false;
/*
* If we aren't on the wait list yet, cancel the spin
* if there are waiters. We want to avoid stealing the
* lock from a waiter with an earlier stamp, since the
* other thread may already own a lock that we also
* need.
*/
if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
return false;
/*
* Similarly, stop spinning if we are no longer the
* first waiter.
*/
if (waiter && !__mutex_waiter_is_first(lock, waiter))
return false;
return true;
}
/*
* Look out! "owner" is an entirely speculative pointer access and not
* reliable.
*
* "noinline" so that this function shows up on perf profiles.
*/
static noinline
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
{
bool ret = true;
lockdep_assert_preemption_disabled();
while (__mutex_owner(lock) == owner) {
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking lock->owner still matches owner. And we already
* disabled preemption which is equal to the RCU read-side
* crital section in optimistic spinning code. Thus the
* task_strcut structure won't go away during the spinning
* period
*/
barrier();
/*
* Use vcpu_is_preempted to detect lock holder preemption issue.
*/
if (!owner_on_cpu(owner) || need_resched()) {
ret = false;
break;
}
if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
ret = false;
break;
}
cpu_relax();
}
return ret;
}
/*
* Initial check for entering the mutex spinning loop
*/
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
struct task_struct *owner;
int retval = 1;
lockdep_assert_preemption_disabled();
if (need_resched())
return 0;
/*
* We already disabled preemption which is equal to the RCU read-side
* crital section in optimistic spinning code. Thus the task_strcut
* structure won't go away during the spinning period.
*/
owner = __mutex_owner(lock);
if (owner)
retval = owner_on_cpu(owner);
/*
* If lock->owner is not set, the mutex has been released. Return true
* such that we'll trylock in the spin path, which is a faster option
* than the blocking slow path.
*/
return retval;
}
/*
* Optimistic spinning.
*
* We try to spin for acquisition when we find that the lock owner
* is currently running on a (different) CPU and while we don't
* need to reschedule. The rationale is that if the lock owner is
* running, it is likely to release the lock soon.
*
* The mutex spinners are queued up using MCS lock so that only one
* spinner can compete for the mutex. However, if mutex spinning isn't
* going to happen, there is no point in going through the lock/unlock
* overhead.
*
* Returns true when the lock was taken, otherwise false, indicating
* that we need to jump to the slowpath and sleep.
*
* The waiter flag is set to true if the spinner is a waiter in the wait
* queue. The waiter-spinner will spin on the lock directly and concurrently
* with the spinner at the head of the OSQ, if present, until the owner is
* changed to itself.
*/
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
struct mutex_waiter *waiter)
{
if (!waiter) {
/*
* The purpose of the mutex_can_spin_on_owner() function is
* to eliminate the overhead of osq_lock() and osq_unlock()
* in case spinning isn't possible. As a waiter-spinner
* is not going to take OSQ lock anyway, there is no need
* to call mutex_can_spin_on_owner().
*/
if (!mutex_can_spin_on_owner(lock))
goto fail;
/*
* In order to avoid a stampede of mutex spinners trying to
* acquire the mutex all at once, the spinners need to take a
* MCS (queued) lock first before spinning on the owner field.
*/
if (!osq_lock(&lock->osq))
goto fail;
}
for (;;) {
struct task_struct *owner;
/* Try to acquire the mutex... */
owner = __mutex_trylock_or_owner(lock);
if (!owner)
break;
/*
* There's an owner, wait for it to either
* release the lock or go to sleep.
*/
if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
goto fail_unlock;
/*
* The cpu_relax() call is a compiler barrier which forces
* everything in this loop to be re-loaded. We don't need
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
cpu_relax();
}
if (!waiter)
osq_unlock(&lock->osq);
return true;
fail_unlock:
if (!waiter)
osq_unlock(&lock->osq);
fail:
/*
* If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting
* scheduled out right after we obtained the mutex.
*/
if (need_resched()) {
/*
* We _should_ have TASK_RUNNING here, but just in case
* we do not, make it so, otherwise we might get stuck.
*/
__set_current_state(TASK_RUNNING);
schedule_preempt_disabled();
}
return false;
}
#else
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
struct mutex_waiter *waiter)
{
return false;
}
#endif
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
* Unlock a mutex that has been locked by this task previously.
*
* This function must not be used in interrupt context. Unlocking
* of a not locked mutex is not allowed.
*
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
{
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
#endif
__mutex_unlock_slowpath(lock, _RET_IP_);
}
EXPORT_SYMBOL(mutex_unlock);
/**
* ww_mutex_unlock - release the w/w mutex
* @lock: the mutex to be released
*
* Unlock a mutex that has been locked by this task previously with any of the
* ww_mutex_lock* functions (with or without an acquire context). It is
* forbidden to release the locks after releasing the acquire context.
*
* This function must not be used in interrupt context. Unlocking
* of a unlocked mutex is not allowed.
*/
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
__ww_mutex_unlock(lock);
mutex_unlock(&lock->base);
}
EXPORT_SYMBOL(ww_mutex_unlock);
/*
* Lock a mutex (possibly interruptible), slowpath:
*/
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct mutex_waiter waiter;
struct ww_mutex *ww;
int ret;
if (!use_ww_ctx)
ww_ctx = NULL;
might_sleep();
MUTEX_WARN_ON(lock->magic != lock);
ww = container_of(lock, struct ww_mutex, base);
if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nest_lock = &ww_ctx->dep_map;
#endif
}
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (__mutex_trylock(lock) ||
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
trace_contention_end(lock, 0);
preempt_enable();
return 0;
}
raw_spin_lock(&lock->wait_lock);
/*
* After waiting to acquire the wait_lock, try again.
*/
if (__mutex_trylock(lock)) {
if (ww_ctx)
__ww_mutex_check_waiters(lock, ww_ctx);
goto skip_wait;
}
debug_mutex_lock_common(lock, &waiter);
waiter.task = current;
if (use_ww_ctx)
waiter.ww_ctx = ww_ctx;
lock_contended(&lock->dep_map, ip);
if (!use_ww_ctx) {
/* add waiting tasks to the end of the waitqueue (FIFO): */
__mutex_add_waiter(lock, &waiter, &lock->wait_list);
} else {
/*
* Add in stamp order, waking up waiters that must kill
* themselves.
*/
ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
if (ret)
goto err_early_kill;
}
set_current_state(state);
trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
bool first;
/*
* Once we hold wait_lock, we're serialized against
* mutex_unlock() handing the lock off to us, do a trylock
* before testing the error conditions to make sure we pick up
* the handoff.
*/
if (__mutex_trylock(lock))
goto acquired;
/*
* Check for signals and kill conditions while holding
* wait_lock. This ensures the lock cancellation is ordered
* against mutex_unlock() and wake-ups do not go missing.
*/
if (signal_pending_state(state, current)) {
ret = -EINTR;
goto err;
}
if (ww_ctx) {
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
if (ret)
goto err;
}
raw_spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();
first = __mutex_waiter_is_first(lock, &waiter);
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
* state back to RUNNING and fall through the next schedule(),
* or we must see its unlock and acquire.
*/
if (__mutex_trylock_or_handoff(lock, first))
break;
if (first) {
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
break;
trace_contention_begin(lock, LCB_F_MUTEX);
}
raw_spin_lock(&lock->wait_lock);
}
raw_spin_lock(&lock->wait_lock);
acquired:
__set_current_state(TASK_RUNNING);
if (ww_ctx) {
/*
* Wound-Wait; we stole the lock (!first_waiter), check the
* waiters as anyone might want to wound us.
*/
if (!ww_ctx->is_wait_die &&
!__mutex_waiter_is_first(lock, &waiter))
__ww_mutex_check_waiters(lock, ww_ctx);
}
__mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
trace_contention_end(lock, 0);
if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx);
raw_spin_unlock(&lock->wait_lock);
preempt_enable();
return 0;
err:
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
trace_contention_end(lock, ret);
raw_spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, ip);
preempt_enable();
return ret;
}
static int __sched
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}
static int __sched
__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
unsigned long ip, struct ww_acquire_ctx *ww_ctx)
{
return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
}
/**
* ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
* @ww: mutex to lock
* @ww_ctx: optional w/w acquire context
*
* Trylocks a mutex with the optional acquire context; no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*
* Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
* specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
{
if (!ww_ctx)
return mutex_trylock(&ww->base);
MUTEX_WARN_ON(ww->base.magic != &ww->base);
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
if (__mutex_trylock(&ww->base)) {
ww_mutex_set_context_fastpath(ww, ww_ctx);
mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
return 1;
}
return 0;
}
EXPORT_SYMBOL(ww_mutex_trylock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
void __sched
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
int token;
might_sleep();
token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
subclass, NULL, _RET_IP_, NULL, 0);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned tmp;
if (ctx->deadlock_inject_countdown-- == 0) {
tmp = ctx->deadlock_inject_interval;
if (tmp > UINT_MAX/4)
tmp = UINT_MAX;
else
tmp = tmp*2 + tmp + tmp/2;
ctx->deadlock_inject_interval = tmp;
ctx->deadlock_inject_countdown = tmp;
ctx->contending_lock = lock;
ww_mutex_unlock(lock);
return -EDEADLK;
}
#endif
return 0;
}
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
might_sleep();
ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
0, _RET_IP_, ctx);
if (!ret && ctx && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(ww_mutex_lock);
int __sched
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
might_sleep();
ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
0, _RET_IP_, ctx);
if (!ret && ctx && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
#endif
/*
* Release the lock, slowpath:
*/
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
struct task_struct *next = NULL;
DEFINE_WAKE_Q(wake_q);
unsigned long owner;
mutex_release(&lock->dep_map, ip);
/*
* Release the lock before (potentially) taking the spinlock such that
* other contenders can get on with things ASAP.
*
* Except when HANDOFF, in that case we must not clear the owner field,
* but instead set it to the top waiter.
*/
owner = atomic_long_read(&lock->owner);
for (;;) {
MUTEX_WARN_ON(__owner_task(owner) != current);
MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
if (owner & MUTEX_FLAG_HANDOFF)
break;
if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
if (owner & MUTEX_FLAG_WAITERS)
break;
return;
}
}
raw_spin_lock(&lock->wait_lock);
debug_mutex_unlock(lock);
if (!list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
struct mutex_waiter *waiter =
list_first_entry(&lock->wait_list,
struct mutex_waiter, list);
next = waiter->task;
debug_mutex_wake_waiter(lock, waiter);
wake_q_add(&wake_q, next);
}
if (owner & MUTEX_FLAG_HANDOFF)
__mutex_handoff(lock, next);
raw_spin_unlock(&lock->wait_lock);
wake_up_q(&wake_q);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
*/
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock);
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock);
/**
* mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
* @lock: The mutex to be acquired.
*
* Lock the mutex like mutex_lock(). If a signal is delivered while the
* process is sleeping, this function will return without acquiring the
* mutex.
*
* Context: Process context.
* Return: 0 if the lock was successfully acquired or %-EINTR if a
* signal arrived.
*/
int __sched mutex_lock_interruptible(struct mutex *lock)
{
might_sleep();
if (__mutex_trylock_fast(lock))
return 0;
return __mutex_lock_interruptible_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_interruptible);
/**
* mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
* @lock: The mutex to be acquired.
*
* Lock the mutex like mutex_lock(). If a signal which will be fatal to
* the current process is delivered while the process is sleeping, this
* function will return without acquiring the mutex.
*
* Context: Process context.
* Return: 0 if the lock was successfully acquired or %-EINTR if a
* fatal signal arrived.
*/
int __sched mutex_lock_killable(struct mutex *lock)
{
might_sleep();
if (__mutex_trylock_fast(lock))
return 0;
return __mutex_lock_killable_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_killable);
/**
* mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
* @lock: The mutex to be acquired.
*
* Lock the mutex like mutex_lock(). While the task is waiting for this
* mutex, it will be accounted as being in the IO wait state by the
* scheduler.
*
* Context: Process context.
*/
void __sched mutex_lock_io(struct mutex *lock)
{
int token;
token = io_schedule_prepare();
mutex_lock(lock);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io);
static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock)
{
return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock)
{
return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
_RET_IP_, ctx);
}
static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
_RET_IP_, ctx);
}
#endif
/**
* mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
* it is negated from the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The
* mutex must be released by the same task that acquired it.
*/
int __sched mutex_trylock(struct mutex *lock)
{
bool locked;
MUTEX_WARN_ON(lock->magic != lock);
locked = __mutex_trylock(lock);
if (locked)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return locked;
}
EXPORT_SYMBOL(mutex_trylock);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
might_sleep();
if (__mutex_trylock_fast(&lock->base)) {
if (ctx)
ww_mutex_set_context_fastpath(lock, ctx);
return 0;
}
return __ww_mutex_lock_slowpath(lock, ctx);
}
EXPORT_SYMBOL(ww_mutex_lock);
int __sched
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
might_sleep();
if (__mutex_trylock_fast(&lock->base)) {
if (ctx)
ww_mutex_set_context_fastpath(lock, ctx);
return 0;
}
return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
}
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#endif /* !CONFIG_PREEMPT_RT */
/**
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
* @cnt: the atomic which we are to dec
* @lock: the mutex to return holding if we dec to 0
*
* return true and hold lock if we dec to 0, return false otherwise
*/
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
{
/* dec if we can't possibly hit 0 */
if (atomic_add_unless(cnt, -1, 1))
return 0;
/* we might hit 0, so take the lock */
mutex_lock(lock);
if (!atomic_dec_and_test(cnt)) {
/* when we actually did the dec, we didn't hit 0 */
mutex_unlock(lock);
return 0;
}
/* we hit 0, and we hold the lock */
return 1;
}
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
| linux-master | kernel/locking/mutex.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/irqflags.h>
noinstr void warn_bogus_irq_restore(void)
{
instrumentation_begin();
WARN_ONCE(1, "raw_local_irq_restore() called with IRQs enabled\n");
instrumentation_end();
}
EXPORT_SYMBOL(warn_bogus_irq_restore);
| linux-master | kernel/locking/irqflag-debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RT-specific reader/writer semaphores and reader/writer locks
*
* down_write/write_lock()
* 1) Lock rtmutex
* 2) Remove the reader BIAS to force readers into the slow path
* 3) Wait until all readers have left the critical section
* 4) Mark it write locked
*
* up_write/write_unlock()
* 1) Remove the write locked marker
* 2) Set the reader BIAS, so readers can use the fast path again
* 3) Unlock rtmutex, to release blocked readers
*
* down_read/read_lock()
* 1) Try fast path acquisition (reader BIAS is set)
* 2) Take tmutex::wait_lock, which protects the writelocked flag
* 3) If !writelocked, acquire it for read
* 4) If writelocked, block on tmutex
* 5) unlock rtmutex, goto 1)
*
* up_read/read_unlock()
* 1) Try fast path release (reader count != 1)
* 2) Wake the writer waiting in down_write()/write_lock() #3
*
* down_read/read_lock()#3 has the consequence, that rw semaphores and rw
* locks on RT are not writer fair, but writers, which should be avoided in
* RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
* inheritance mechanism.
*
* It's possible to make the rw primitives writer fair by keeping a list of
* active readers. A blocked writer would force all newly incoming readers
* to block on the rtmutex, but the rtmutex would have to be proxy locked
* for one reader after the other. We can't use multi-reader inheritance
* because there is no way to support that with SCHED_DEADLINE.
* Implementing the one by one reader boosting/handover mechanism is a
* major surgery for a very dubious value.
*
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
* Fast-path orderings:
* The lock/unlock of readers can run in fast paths: lock and unlock are only
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
* and _release() (or stronger).
*
* Common code shared between RT rw_semaphore and rwlock
*/
static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
{
int r;
/*
* Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
return 1;
}
return 0;
}
static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
unsigned int state)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
int ret;
raw_spin_lock_irq(&rtm->wait_lock);
/*
* Call into the slow lock path with the rtmutex->wait_lock
* held, so this can't result in the following race:
*
* Reader1 Reader2 Writer
* down_read()
* down_write()
* rtmutex_lock(m)
* wait()
* down_read()
* unlock(m->wait_lock)
* up_read()
* wake(Writer)
* lock(m->wait_lock)
* sem->writelocked=true
* unlock(m->wait_lock)
*
* up_write()
* sem->writelocked=false
* rtmutex_unlock(m)
* down_read()
* down_write()
* rtmutex_lock(m)
* wait()
* rtmutex_lock(m)
*
* That would put Reader1 behind the writer waiting on
* Reader2 to call up_read(), which might be unbound.
*/
trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
/*
* For rwlocks this returns 0 unconditionally, so the below
* !ret conditionals are optimized out.
*/
ret = rwbase_rtmutex_slowlock_locked(rtm, state);
/*
* On success the rtmutex is held, so there can't be a writer
* active. Increment the reader count and immediately drop the
* rtmutex again.
*
* rtmutex->wait_lock has to be unlocked in any case of course.
*/
if (!ret)
atomic_inc(&rwb->readers);
raw_spin_unlock_irq(&rtm->wait_lock);
if (!ret)
rwbase_rtmutex_unlock(rtm);
trace_contention_end(rwb, ret);
return ret;
}
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
unsigned int state)
{
if (rwbase_read_trylock(rwb))
return 0;
return __rwbase_read_lock(rwb, state);
}
static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
unsigned int state)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
struct task_struct *owner;
DEFINE_RT_WAKE_Q(wqh);
raw_spin_lock_irq(&rtm->wait_lock);
/*
* Wake the writer, i.e. the rtmutex owner. It might release the
* rtmutex concurrently in the fast path (due to a signal), but to
* clean up rwb->readers it needs to acquire rtm->wait_lock. The
* worst case which can happen is a spurious wakeup.
*/
owner = rt_mutex_owner(rtm);
if (owner)
rt_mutex_wake_q_add_task(&wqh, owner, state);
/* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
preempt_disable();
raw_spin_unlock_irq(&rtm->wait_lock);
rt_mutex_wake_up_q(&wqh);
}
static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
unsigned int state)
{
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
*
* dec_and_test() is fully ordered, provides RELEASE.
*/
if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state);
}
static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
unsigned long flags)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
/*
* _release() is needed in case that reader is in fast path, pairing
* with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
*/
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm);
}
static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
unsigned long flags;
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
__rwbase_write_unlock(rwb, WRITER_BIAS, flags);
}
static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
unsigned long flags;
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
/* Release it and account current as reader */
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
{
/* Can do without CAS because we're serialized by wait_lock. */
lockdep_assert_held(&rwb->rtmutex.wait_lock);
/*
* _acquire is needed in case the reader is in the fast path, pairing
* with rwbase_read_unlock(), provides ACQUIRE.
*/
if (!atomic_read_acquire(&rwb->readers)) {
atomic_set(&rwb->readers, WRITER_BIAS);
return 1;
}
return 0;
}
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
unsigned long flags;
/* Take the rtmutex as a first step */
if (rwbase_rtmutex_lock_state(rtm, state))
return -EINTR;
/* Force readers into slow path */
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (__rwbase_write_trylock(rwb))
goto out_unlock;
rwbase_set_and_save_current_state(state);
trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
for (;;) {
/* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) {
rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
trace_contention_end(rwb, -EINTR);
return -EINTR;
}
if (__rwbase_write_trylock(rwb))
break;
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_schedule();
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
set_current_state(state);
}
rwbase_restore_current_state();
trace_contention_end(rwb, 0);
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0;
}
static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
unsigned long flags;
if (!rwbase_rtmutex_trylock(rtm))
return 0;
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (__rwbase_write_trylock(rwb)) {
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1;
}
__rwbase_write_unlock(rwb, 0, flags);
return 0;
}
| linux-master | kernel/locking/rwbase_rt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Module-based torture test facility for locking
*
* Copyright (C) IBM Corporation, 2014
*
* Authors: Paul E. McKenney <[email protected]>
* Davidlohr Bueso <[email protected]>
* Based on kernel/rcu/torture.c.
*/
#define pr_fmt(fmt) fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <linux/rtmutex.h>
#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/reboot.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <[email protected]>");
torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(int, rt_boost, 2,
"Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
#define MAX_NESTED_LOCKS 8
static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
"Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
static struct task_struct **reader_tasks;
static bool lock_is_write_held;
static atomic_t lock_is_read_held;
static unsigned long last_lock_release;
struct lock_stress_stats {
long n_lock_fail;
long n_lock_acquired;
};
/* Forward reference. */
static void lock_torture_cleanup(void);
/*
* Operations vector for selecting different types of tests.
*/
struct lock_torture_ops {
void (*init)(void);
void (*exit)(void);
int (*nested_lock)(int tid, u32 lockset);
int (*writelock)(int tid);
void (*write_delay)(struct torture_random_state *trsp);
void (*task_boost)(struct torture_random_state *trsp);
void (*writeunlock)(int tid);
void (*nested_unlock)(int tid, u32 lockset);
int (*readlock)(int tid);
void (*read_delay)(struct torture_random_state *trsp);
void (*readunlock)(int tid);
unsigned long flags; /* for irq spinlocks */
const char *name;
};
struct lock_torture_cxt {
int nrealwriters_stress;
int nrealreaders_stress;
bool debug_lock;
bool init_called;
atomic_t n_lock_torture_errors;
struct lock_torture_ops *cur_ops;
struct lock_stress_stats *lwsa; /* writer statistics */
struct lock_stress_stats *lrsa; /* reader statistics */
};
static struct lock_torture_cxt cxt = { 0, 0, false, false,
ATOMIC_INIT(0),
NULL, NULL};
/*
* Definitions for lock torture testing.
*/
static int torture_lock_busted_write_lock(int tid __maybe_unused)
{
return 0; /* BUGGY, do not use in real life!!! */
}
static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_lock_busted_write_unlock(int tid __maybe_unused)
{
/* BUGGY, do not use in real life!!! */
}
static void __torture_rt_boost(struct torture_random_state *trsp)
{
const unsigned int factor = rt_boost_factor;
if (!rt_task(current)) {
/*
* Boost priority once every rt_boost_factor operations. When
* the task tries to take the lock, the rtmutex it will account
* for the new priority, and do any corresponding pi-dance.
*/
if (trsp && !(torture_random(trsp) %
(cxt.nrealwriters_stress * factor))) {
sched_set_fifo(current);
} else /* common case, do nothing */
return;
} else {
/*
* The task will remain boosted for another 10 * rt_boost_factor
* operations, then restored back to its original prio, and so
* forth.
*
* When @trsp is nil, we want to force-reset the task for
* stopping the kthread.
*/
if (!trsp || !(torture_random(trsp) %
(cxt.nrealwriters_stress * factor * 2))) {
sched_set_normal(current, 0);
} else /* common case, do nothing */
return;
}
}
static void torture_rt_boost(struct torture_random_state *trsp)
{
if (rt_boost != 2)
return;
__torture_rt_boost(trsp);
}
static struct lock_torture_ops lock_busted_ops = {
.writelock = torture_lock_busted_write_lock,
.write_delay = torture_lock_busted_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_lock_busted_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "lock_busted"
};
static DEFINE_SPINLOCK(torture_spinlock);
static int torture_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_spinlock)
{
spin_lock(&torture_spinlock);
return 0;
}
static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
unsigned long j;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * longdelay_ms))) {
j = jiffies;
mdelay(longdelay_ms);
pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
}
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
udelay(shortdelay_us);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_spinlock)
{
spin_unlock(&torture_spinlock);
}
static struct lock_torture_ops spin_lock_ops = {
.writelock = torture_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_spin_lock_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "spin_lock"
};
static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_spinlock)
{
unsigned long flags;
spin_lock_irqsave(&torture_spinlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
__releases(torture_spinlock)
{
spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops spin_lock_irq_ops = {
.writelock = torture_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_lock_spin_write_unlock_irq,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "spin_lock_irq"
};
static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
raw_spin_lock(&torture_raw_spinlock);
return 0;
}
static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
raw_spin_unlock(&torture_raw_spinlock);
}
static struct lock_torture_ops raw_spin_lock_ops = {
.writelock = torture_raw_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_raw_spin_lock_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "raw_spin_lock"
};
static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
unsigned long flags;
raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops raw_spin_lock_irq_ops = {
.writelock = torture_raw_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_raw_spin_lock_write_unlock_irq,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "raw_spin_lock_irq"
};
static DEFINE_RWLOCK(torture_rwlock);
static int torture_rwlock_write_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{
write_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_write_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{
write_unlock(&torture_rwlock);
}
static int torture_rwlock_read_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{
read_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 10;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_read_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{
read_unlock(&torture_rwlock);
}
static struct lock_torture_ops rw_lock_ops = {
.writelock = torture_rwlock_write_lock,
.write_delay = torture_rwlock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_rwlock_write_unlock,
.readlock = torture_rwlock_read_lock,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock,
.name = "rw_lock"
};
static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{
unsigned long flags;
write_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{
write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{
unsigned long flags;
read_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{
read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops rw_lock_irq_ops = {
.writelock = torture_rwlock_write_lock_irq,
.write_delay = torture_rwlock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_rwlock_write_unlock_irq,
.readlock = torture_rwlock_read_lock_irq,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock_irq,
.name = "rw_lock_irq"
};
static DEFINE_MUTEX(torture_mutex);
static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
static void torture_mutex_init(void)
{
int i;
for (i = 0; i < MAX_NESTED_LOCKS; i++)
__mutex_init(&torture_nested_mutexes[i], __func__,
&nested_mutex_keys[i]);
}
static int torture_mutex_nested_lock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = 0; i < nested_locks; i++)
if (lockset & (1 << i))
mutex_lock(&torture_nested_mutexes[i]);
return 0;
}
static int torture_mutex_lock(int tid __maybe_unused)
__acquires(torture_mutex)
{
mutex_lock(&torture_mutex);
return 0;
}
static void torture_mutex_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 5);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_mutex_unlock(int tid __maybe_unused)
__releases(torture_mutex)
{
mutex_unlock(&torture_mutex);
}
static void torture_mutex_nested_unlock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = nested_locks - 1; i >= 0; i--)
if (lockset & (1 << i))
mutex_unlock(&torture_nested_mutexes[i]);
}
static struct lock_torture_ops mutex_lock_ops = {
.init = torture_mutex_init,
.nested_lock = torture_mutex_nested_lock,
.writelock = torture_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_mutex_unlock,
.nested_unlock = torture_mutex_nested_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "mutex_lock"
};
#include <linux/ww_mutex.h>
/*
* The torture ww_mutexes should belong to the same lock class as
* torture_ww_class to avoid lockdep problem. The ww_mutex_init()
* function is called for initialization to ensure that.
*/
static DEFINE_WD_CLASS(torture_ww_class);
static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
static struct ww_acquire_ctx *ww_acquire_ctxs;
static void torture_ww_mutex_init(void)
{
ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
sizeof(*ww_acquire_ctxs),
GFP_KERNEL);
if (!ww_acquire_ctxs)
VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
}
static void torture_ww_mutex_exit(void)
{
kfree(ww_acquire_ctxs);
}
static int torture_ww_mutex_lock(int tid)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
{
LIST_HEAD(list);
struct reorder_lock {
struct list_head link;
struct ww_mutex *lock;
} locks[3], *ll, *ln;
struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
locks[0].lock = &torture_ww_mutex_0;
list_add(&locks[0].link, &list);
locks[1].lock = &torture_ww_mutex_1;
list_add(&locks[1].link, &list);
locks[2].lock = &torture_ww_mutex_2;
list_add(&locks[2].link, &list);
ww_acquire_init(ctx, &torture_ww_class);
list_for_each_entry(ll, &list, link) {
int err;
err = ww_mutex_lock(ll->lock, ctx);
if (!err)
continue;
ln = ll;
list_for_each_entry_continue_reverse(ln, &list, link)
ww_mutex_unlock(ln->lock);
if (err != -EDEADLK)
return err;
ww_mutex_lock_slow(ll->lock, ctx);
list_move(&ll->link, &list);
}
return 0;
}
static void torture_ww_mutex_unlock(int tid)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
ww_mutex_unlock(&torture_ww_mutex_0);
ww_mutex_unlock(&torture_ww_mutex_1);
ww_mutex_unlock(&torture_ww_mutex_2);
ww_acquire_fini(ctx);
}
static struct lock_torture_ops ww_mutex_lock_ops = {
.init = torture_ww_mutex_init,
.exit = torture_ww_mutex_exit,
.writelock = torture_ww_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_ww_mutex_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "ww_mutex_lock"
};
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);
static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
static void torture_rtmutex_init(void)
{
int i;
for (i = 0; i < MAX_NESTED_LOCKS; i++)
__rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
&nested_rtmutex_keys[i]);
}
static int torture_rtmutex_nested_lock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = 0; i < nested_locks; i++)
if (lockset & (1 << i))
rt_mutex_lock(&torture_nested_rtmutexes[i]);
return 0;
}
static int torture_rtmutex_lock(int tid __maybe_unused)
__acquires(torture_rtmutex)
{
rt_mutex_lock(&torture_rtmutex);
return 0;
}
static void torture_rtmutex_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
/*
* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 200 * shortdelay_us)))
udelay(shortdelay_us);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rtmutex_unlock(int tid __maybe_unused)
__releases(torture_rtmutex)
{
rt_mutex_unlock(&torture_rtmutex);
}
static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
{
if (!rt_boost)
return;
__torture_rt_boost(trsp);
}
static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = nested_locks - 1; i >= 0; i--)
if (lockset & (1 << i))
rt_mutex_unlock(&torture_nested_rtmutexes[i]);
}
static struct lock_torture_ops rtmutex_lock_ops = {
.init = torture_rtmutex_init,
.nested_lock = torture_rtmutex_nested_lock,
.writelock = torture_rtmutex_lock,
.write_delay = torture_rtmutex_delay,
.task_boost = torture_rt_boost_rtmutex,
.writeunlock = torture_rtmutex_unlock,
.nested_unlock = torture_rtmutex_nested_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "rtmutex_lock"
};
#endif
static DECLARE_RWSEM(torture_rwsem);
static int torture_rwsem_down_write(int tid __maybe_unused)
__acquires(torture_rwsem)
{
down_write(&torture_rwsem);
return 0;
}
static void torture_rwsem_write_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 10);
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_write(int tid __maybe_unused)
__releases(torture_rwsem)
{
up_write(&torture_rwsem);
}
static int torture_rwsem_down_read(int tid __maybe_unused)
__acquires(torture_rwsem)
{
down_read(&torture_rwsem);
return 0;
}
static void torture_rwsem_read_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_ms = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 2);
else
mdelay(longdelay_ms / 2);
if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_read(int tid __maybe_unused)
__releases(torture_rwsem)
{
up_read(&torture_rwsem);
}
static struct lock_torture_ops rwsem_lock_ops = {
.writelock = torture_rwsem_down_write,
.write_delay = torture_rwsem_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_rwsem_up_write,
.readlock = torture_rwsem_down_read,
.read_delay = torture_rwsem_read_delay,
.readunlock = torture_rwsem_up_read,
.name = "rwsem_lock"
};
#include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem;
static void torture_percpu_rwsem_init(void)
{
BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
}
static void torture_percpu_rwsem_exit(void)
{
percpu_free_rwsem(&pcpu_rwsem);
}
static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{
percpu_down_write(&pcpu_rwsem);
return 0;
}
static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
__releases(pcpu_rwsem)
{
percpu_up_write(&pcpu_rwsem);
}
static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{
percpu_down_read(&pcpu_rwsem);
return 0;
}
static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
__releases(pcpu_rwsem)
{
percpu_up_read(&pcpu_rwsem);
}
static struct lock_torture_ops percpu_rwsem_lock_ops = {
.init = torture_percpu_rwsem_init,
.exit = torture_percpu_rwsem_exit,
.writelock = torture_percpu_rwsem_down_write,
.write_delay = torture_rwsem_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_percpu_rwsem_up_write,
.readlock = torture_percpu_rwsem_down_read,
.read_delay = torture_rwsem_read_delay,
.readunlock = torture_percpu_rwsem_up_read,
.name = "percpu_rwsem_lock"
};
/*
* Lock torture writer kthread. Repeatedly acquires and releases
* the lock, checking for duplicate acquisitions.
*/
static int lock_torture_writer(void *arg)
{
struct lock_stress_stats *lwsp = arg;
int tid = lwsp - cxt.lwsa;
DEFINE_TORTURE_RANDOM(rand);
u32 lockset_mask;
bool skip_main_lock;
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
if (!rt_task(current))
set_user_nice(current, MAX_NICE);
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
lockset_mask = torture_random(&rand);
/*
* When using nested_locks, we want to occasionally
* skip the main lock so we can avoid always serializing
* the lock chains on that central lock. By skipping the
* main lock occasionally, we can create different
* contention patterns (allowing for multiple disjoint
* blocked trees)
*/
skip_main_lock = (nested_locks &&
!(torture_random(&rand) % 100));
cxt.cur_ops->task_boost(&rand);
if (cxt.cur_ops->nested_lock)
cxt.cur_ops->nested_lock(tid, lockset_mask);
if (!skip_main_lock) {
cxt.cur_ops->writelock(tid);
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++;
lock_is_write_held = true;
if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
lwsp->n_lock_fail++; /* rare, but... */
lwsp->n_lock_acquired++;
}
if (!skip_main_lock) {
cxt.cur_ops->write_delay(&rand);
lock_is_write_held = false;
WRITE_ONCE(last_lock_release, jiffies);
cxt.cur_ops->writeunlock(tid);
}
if (cxt.cur_ops->nested_unlock)
cxt.cur_ops->nested_unlock(tid, lockset_mask);
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
cxt.cur_ops->task_boost(NULL); /* reset prio */
torture_kthread_stopping("lock_torture_writer");
return 0;
}
/*
* Lock torture reader kthread. Repeatedly acquires and releases
* the reader lock.
*/
static int lock_torture_reader(void *arg)
{
struct lock_stress_stats *lrsp = arg;
int tid = lrsp - cxt.lrsa;
DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_reader task started");
set_user_nice(current, MAX_NICE);
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
cxt.cur_ops->readlock(tid);
atomic_inc(&lock_is_read_held);
if (WARN_ON_ONCE(lock_is_write_held))
lrsp->n_lock_fail++; /* rare, but... */
lrsp->n_lock_acquired++;
cxt.cur_ops->read_delay(&rand);
atomic_dec(&lock_is_read_held);
cxt.cur_ops->readunlock(tid);
stutter_wait("lock_torture_reader");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_reader");
return 0;
}
/*
* Create an lock-torture-statistics message in the specified buffer.
*/
static void __torture_print_stats(char *page,
struct lock_stress_stats *statp, bool write)
{
long cur;
bool fail = false;
int i, n_stress;
long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
long long sum = 0;
n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
for (i = 0; i < n_stress; i++) {
if (data_race(statp[i].n_lock_fail))
fail = true;
cur = data_race(statp[i].n_lock_acquired);
sum += cur;
if (max < cur)
max = cur;
if (min > cur)
min = cur;
}
page += sprintf(page,
"%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
write ? "Writes" : "Reads ",
sum, max, min,
!onoff_interval && max / 2 > min ? "???" : "",
fail, fail ? "!!!" : "");
if (fail)
atomic_inc(&cxt.n_lock_torture_errors);
}
/*
* Print torture statistics. Caller must ensure that there is only one
* call to this function at a given time!!! This is normally accomplished
* by relying on the module system to only have one copy of the module
* loaded, and then by giving the lock_torture_stats kthread full control
* (or the init/cleanup functions when lock_torture_stats thread is not
* running).
*/
static void lock_torture_stats_print(void)
{
int size = cxt.nrealwriters_stress * 200 + 8192;
char *buf;
if (cxt.cur_ops->readlock)
size += cxt.nrealreaders_stress * 200 + 8192;
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
__torture_print_stats(buf, cxt.lwsa, true);
pr_alert("%s", buf);
kfree(buf);
if (cxt.cur_ops->readlock) {
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
__torture_print_stats(buf, cxt.lrsa, false);
pr_alert("%s", buf);
kfree(buf);
}
}
/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*
* No need to worry about fullstop here, since this one doesn't reference
* volatile state or register callbacks.
*/
static int lock_torture_stats(void *arg)
{
VERBOSE_TOROUT_STRING("lock_torture_stats task started");
do {
schedule_timeout_interruptible(stat_interval * HZ);
lock_torture_stats_print();
torture_shutdown_absorb("lock_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_stats");
return 0;
}
static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, cxt.debug_lock ? " [debug]": "",
cxt.nrealwriters_stress, cxt.nrealreaders_stress,
nested_locks, stat_interval, verbose, shuffle_interval,
stutter, shutdown_secs, onoff_interval, onoff_holdoff);
}
static void lock_torture_cleanup(void)
{
int i;
if (torture_cleanup_begin())
return;
/*
* Indicates early cleanup, meaning that the test has not run,
* such as when passing bogus args when loading the module.
* However cxt->cur_ops.init() may have been invoked, so beside
* perform the underlying torture-specific cleanups, cur_ops.exit()
* will be invoked if needed.
*/
if (!cxt.lwsa && !cxt.lrsa)
goto end;
if (writer_tasks) {
for (i = 0; i < cxt.nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
if (reader_tasks) {
for (i = 0; i < cxt.nrealreaders_stress; i++)
torture_stop_kthread(lock_torture_reader,
reader_tasks[i]);
kfree(reader_tasks);
reader_tasks = NULL;
}
torture_stop_kthread(lock_torture_stats, stats_task);
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
if (atomic_read(&cxt.n_lock_torture_errors))
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: FAILURE");
else if (torture_onoff_failures())
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: LOCK_HOTPLUG");
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
kfree(cxt.lwsa);
cxt.lwsa = NULL;
kfree(cxt.lrsa);
cxt.lrsa = NULL;
end:
if (cxt.init_called) {
if (cxt.cur_ops->exit)
cxt.cur_ops->exit();
cxt.init_called = false;
}
torture_cleanup_end();
}
static int __init lock_torture_init(void)
{
int i, j;
int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops,
&spin_lock_ops, &spin_lock_irq_ops,
&raw_spin_lock_ops, &raw_spin_lock_irq_ops,
&rw_lock_ops, &rw_lock_irq_ops,
&mutex_lock_ops,
&ww_mutex_lock_ops,
#ifdef CONFIG_RT_MUTEXES
&rtmutex_lock_ops,
#endif
&rwsem_lock_ops,
&percpu_rwsem_lock_ops,
};
if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cxt.cur_ops = torture_ops[i];
if (strcmp(torture_type, cxt.cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
pr_alert("lock-torture: invalid torture type: \"%s\"\n",
torture_type);
pr_alert("lock-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_alert(" %s", torture_ops[i]->name);
pr_alert("\n");
firsterr = -EINVAL;
goto unwind;
}
if (nwriters_stress == 0 &&
(!cxt.cur_ops->readlock || nreaders_stress == 0)) {
pr_alert("lock-torture: must run at least one locking thread\n");
firsterr = -EINVAL;
goto unwind;
}
if (nwriters_stress >= 0)
cxt.nrealwriters_stress = nwriters_stress;
else
cxt.nrealwriters_stress = 2 * num_online_cpus();
if (cxt.cur_ops->init) {
cxt.cur_ops->init();
cxt.init_called = true;
}
#ifdef CONFIG_DEBUG_MUTEXES
if (str_has_prefix(torture_type, "mutex"))
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_RT_MUTEXES
if (str_has_prefix(torture_type, "rtmutex"))
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
if ((str_has_prefix(torture_type, "spin")) ||
(str_has_prefix(torture_type, "rw_lock")))
cxt.debug_lock = true;
#endif
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = false;
cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
sizeof(*cxt.lwsa),
GFP_KERNEL);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < cxt.nrealwriters_stress; i++) {
cxt.lwsa[i].n_lock_fail = 0;
cxt.lwsa[i].n_lock_acquired = 0;
}
}
if (cxt.cur_ops->readlock) {
if (nreaders_stress >= 0)
cxt.nrealreaders_stress = nreaders_stress;
else {
/*
* By default distribute evenly the number of
* readers and writers. We still run the same number
* of threads as the writer-only locks default.
*/
if (nwriters_stress < 0) /* user doesn't care */
cxt.nrealwriters_stress = num_online_cpus();
cxt.nrealreaders_stress = cxt.nrealwriters_stress;
}
if (nreaders_stress) {
cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
sizeof(*cxt.lrsa),
GFP_KERNEL);
if (cxt.lrsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM;
kfree(cxt.lwsa);
cxt.lwsa = NULL;
goto unwind;
}
for (i = 0; i < cxt.nrealreaders_stress; i++) {
cxt.lrsa[i].n_lock_fail = 0;
cxt.lrsa[i].n_lock_acquired = 0;
}
}
}
lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
/* Prepare torture context. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ, NULL);
if (torture_init_error(firsterr))
goto unwind;
}
if (shuffle_interval > 0) {
firsterr = torture_shuffle_init(shuffle_interval);
if (torture_init_error(firsterr))
goto unwind;
}
if (shutdown_secs > 0) {
firsterr = torture_shutdown_init(shutdown_secs,
lock_torture_cleanup);
if (torture_init_error(firsterr))
goto unwind;
}
if (stutter > 0) {
firsterr = torture_stutter_init(stutter, stutter);
if (torture_init_error(firsterr))
goto unwind;
}
if (nwriters_stress) {
writer_tasks = kcalloc(cxt.nrealwriters_stress,
sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
TOROUT_ERRSTRING("writer_tasks: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
}
/* cap nested_locks to MAX_NESTED_LOCKS */
if (nested_locks > MAX_NESTED_LOCKS)
nested_locks = MAX_NESTED_LOCKS;
if (cxt.cur_ops->readlock) {
reader_tasks = kcalloc(cxt.nrealreaders_stress,
sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
TOROUT_ERRSTRING("reader_tasks: Out of memory");
kfree(writer_tasks);
writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
}
/*
* Create the kthreads and start torturing (oh, those poor little locks).
*
* TODO: Note that we interleave writers with readers, giving writers a
* slight advantage, by creating its kthread first. This can be modified
* for very specific needs, or even let the user choose the policy, if
* ever wanted.
*/
for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
j < cxt.nrealreaders_stress; i++, j++) {
if (i >= cxt.nrealwriters_stress)
goto create_reader;
/* Create writer. */
firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
writer_tasks[i],
writer_fifo ? sched_set_fifo : NULL);
if (torture_init_error(firsterr))
goto unwind;
create_reader:
if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
continue;
/* Create reader. */
firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
reader_tasks[j]);
if (torture_init_error(firsterr))
goto unwind;
}
if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL,
stats_task);
if (torture_init_error(firsterr))
goto unwind;
}
torture_init_end();
return 0;
unwind:
torture_init_end();
lock_torture_cleanup();
if (shutdown_secs) {
WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
kernel_power_off();
}
return firsterr;
}
module_init(lock_torture_init);
module_exit(lock_torture_cleanup);
| linux-master | kernel/locking/locktorture.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Queued spinlock
*
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2013-2014,2018 Red Hat, Inc.
* (C) Copyright 2015 Intel Corp.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
* Authors: Waiman Long <[email protected]>
* Peter Zijlstra <[email protected]>
*/
#ifndef _GEN_PV_LOCK_SLOWPATH
#include <linux/smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/mutex.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/qspinlock.h>
#include <trace/events/lock.h>
/*
* Include queued spinlock statistics code
*/
#include "qspinlock_stat.h"
/*
* The basic principle of a queue-based spinlock can best be understood
* by studying a classic queue-based spinlock implementation called the
* MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
* Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
* Scott") is available at
*
* https://bugzilla.kernel.org/show_bug.cgi?id=206115
*
* This queued spinlock implementation is based on the MCS lock, however to
* make it fit the 4 bytes we assume spinlock_t to be, and preserve its
* existing API, we must modify it somehow.
*
* In particular; where the traditional MCS lock consists of a tail pointer
* (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
* unlock the next pending (next->locked), we compress both these: {tail,
* next->locked} into a single u32 value.
*
* Since a spinlock disables recursion of its own context and there is a limit
* to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
* are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
* we can encode the tail by combining the 2-bit nesting level with the cpu
* number. With one byte for the lock value and 3 bytes for the tail, only a
* 32-bit word is now needed. Even though we only need 1 bit for the lock,
* we extend it to a full byte to achieve better performance for architectures
* that support atomic byte write.
*
* We also change the first spinner to spin on the lock bit instead of its
* node; whereby avoiding the need to carry a node from lock to unlock, and
* preserving existing lock API. This also makes the unlock code simpler and
* faster.
*
* N.B. The current implementation only supports architectures that allow
* atomic operations on smaller 8-bit and 16-bit data types.
*
*/
#include "mcs_spinlock.h"
#define MAX_NODES 4
/*
* On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
* size and four of them will fit nicely in one 64-byte cacheline. For
* pvqspinlock, however, we need more space for extra data. To accommodate
* that, we insert two more long words to pad it up to 32 bytes. IOW, only
* two of them can fit in a cacheline in this case. That is OK as it is rare
* to have more than 2 levels of slowpath nesting in actual use. We don't
* want to penalize pvqspinlocks to optimize for a rare case in native
* qspinlocks.
*/
struct qnode {
struct mcs_spinlock mcs;
#ifdef CONFIG_PARAVIRT_SPINLOCKS
long reserved[2];
#endif
};
/*
* The pending bit spinning loop count.
* This heuristic is used to limit the number of lockword accesses
* made by atomic_cond_read_relaxed when waiting for the lock to
* transition out of the "== _Q_PENDING_VAL" state. We don't spin
* indefinitely because there's no guarantee that we'll make forward
* progress.
*/
#ifndef _Q_PENDING_LOOPS
#define _Q_PENDING_LOOPS 1
#endif
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
*
* Exactly fits one 64-byte cacheline on a 64-bit architecture.
*
* PV doubles the storage and uses the second cacheline for PV state.
*/
static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
/*
* We must be able to distinguish between no-tail and the tail at 0:0,
* therefore increment the cpu number by one.
*/
static inline __pure u32 encode_tail(int cpu, int idx)
{
u32 tail;
tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
return tail;
}
static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
{
int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
return per_cpu_ptr(&qnodes[idx].mcs, cpu);
}
static inline __pure
struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
{
return &((struct qnode *)base + idx)->mcs;
}
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
#if _Q_PENDING_BITS == 8
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
WRITE_ONCE(lock->pending, 0);
}
/**
* clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,0 -> *,0,1
*
* Lock stealing is not allowed if this function is used.
*/
static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
{
WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
}
/*
* xchg_tail - Put in the new queue tail code word & retrieve previous one
* @lock : Pointer to queued spinlock structure
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
* xchg(lock, tail), which heads an address dependency
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{
/*
* We can use relaxed semantics since the caller ensures that the
* MCS node is properly initialized before updating the tail.
*/
return (u32)xchg_relaxed(&lock->tail,
tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
}
#else /* _Q_PENDING_BITS == 8 */
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
/**
* clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,0 -> *,0,1
*/
static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
{
atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
}
/**
* xchg_tail - Put in the new queue tail code word & retrieve previous one
* @lock : Pointer to queued spinlock structure
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
* xchg(lock, tail)
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{
u32 old, new, val = atomic_read(&lock->val);
for (;;) {
new = (val & _Q_LOCKED_PENDING_MASK) | tail;
/*
* We can use relaxed semantics since the caller ensures that
* the MCS node is properly initialized before updating the
* tail.
*/
old = atomic_cmpxchg_relaxed(&lock->val, val, new);
if (old == val)
break;
val = old;
}
return old;
}
#endif /* _Q_PENDING_BITS == 8 */
/**
* queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
* @lock : Pointer to queued spinlock structure
* Return: The previous lock value
*
* *,*,* -> *,1,*
*/
#ifndef queued_fetch_set_pending_acquire
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
}
#endif
/**
* set_locked - Set the lock bit and own the lock
* @lock: Pointer to queued spinlock structure
*
* *,*,0 -> *,0,1
*/
static __always_inline void set_locked(struct qspinlock *lock)
{
WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
}
/*
* Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
* all the PV callbacks.
*/
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
struct mcs_spinlock *prev) { }
static __always_inline void __pv_kick_node(struct qspinlock *lock,
struct mcs_spinlock *node) { }
static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
struct mcs_spinlock *node)
{ return 0; }
#define pv_enabled() false
#define pv_init_node __pv_init_node
#define pv_wait_node __pv_wait_node
#define pv_kick_node __pv_kick_node
#define pv_wait_head_or_lock __pv_wait_head_or_lock
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
#endif /* _GEN_PV_LOCK_SLOWPATH */
/**
* queued_spin_lock_slowpath - acquire the queued spinlock
* @lock: Pointer to queued spinlock structure
* @val: Current value of the queued spinlock 32-bit word
*
* (queue tail, pending bit, lock value)
*
* fast : slow : unlock
* : :
* uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
* : | ^--------.------. / :
* : v \ \ | :
* pending : (0,1,1) +--> (0,1,0) \ | :
* : | ^--' | | :
* : v | | :
* uncontended : (n,x,y) +--> (n,0,0) --' | :
* queue : | ^--' | :
* : v | :
* contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
* queue : ^--' :
*/
void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
struct mcs_spinlock *prev, *next, *node;
u32 old, tail;
int idx;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
if (pv_enabled())
goto pv_queue;
if (virt_spin_lock(lock))
return;
/*
* Wait for in-progress pending->locked hand-overs with a bounded
* number of spins so that we guarantee forward progress.
*
* 0,1,0 -> 0,0,1
*/
if (val == _Q_PENDING_VAL) {
int cnt = _Q_PENDING_LOOPS;
val = atomic_cond_read_relaxed(&lock->val,
(VAL != _Q_PENDING_VAL) || !cnt--);
}
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
/*
* trylock || pending
*
* 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
*/
val = queued_fetch_set_pending_acquire(lock);
/*
* If we observe contention, there is a concurrent locker.
*
* Undo and queue; our setting of PENDING might have made the
* n,0,0 -> 0,0,0 transition fail and it will now be waiting
* on @next to become !NULL.
*/
if (unlikely(val & ~_Q_LOCKED_MASK)) {
/* Undo PENDING if we set it. */
if (!(val & _Q_PENDING_MASK))
clear_pending(lock);
goto queue;
}
/*
* We're pending, wait for the owner to go away.
*
* 0,1,1 -> *,1,0
*
* this wait loop must be a load-acquire such that we match the
* store-release that clears the locked bit and create lock
* sequentiality; this is because not all
* clear_pending_set_locked() implementations imply full
* barriers.
*/
if (val & _Q_LOCKED_MASK)
smp_cond_load_acquire(&lock->locked, !VAL);
/*
* take ownership and clear the pending bit.
*
* 0,1,0 -> 0,0,1
*/
clear_pending_set_locked(lock);
lockevent_inc(lock_pending);
return;
/*
* End of pending bit optimistic spinning and beginning of MCS
* queuing.
*/
queue:
lockevent_inc(lock_slowpath);
pv_queue:
node = this_cpu_ptr(&qnodes[0].mcs);
idx = node->count++;
tail = encode_tail(smp_processor_id(), idx);
trace_contention_begin(lock, LCB_F_SPIN);
/*
* 4 nodes are allocated based on the assumption that there will
* not be nested NMIs taking spinlocks. That may not be true in
* some architectures even though the chance of needing more than
* 4 nodes will still be extremely unlikely. When that happens,
* we fall back to spinning on the lock directly without using
* any MCS node. This is not the most elegant solution, but is
* simple enough.
*/
if (unlikely(idx >= MAX_NODES)) {
lockevent_inc(lock_no_node);
while (!queued_spin_trylock(lock))
cpu_relax();
goto release;
}
node = grab_mcs_node(node, idx);
/*
* Keep counts of non-zero index values:
*/
lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
/*
* Ensure that we increment the head node->count before initialising
* the actual node. If the compiler is kind enough to reorder these
* stores, then an IRQ could overwrite our assignments.
*/
barrier();
node->locked = 0;
node->next = NULL;
pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
* attempt the trylock once more in the hope someone let go while we
* weren't watching.
*/
if (queued_spin_trylock(lock))
goto release;
/*
* Ensure that the initialisation of @node is complete before we
* publish the updated tail via xchg_tail() and potentially link
* @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
*/
smp_wmb();
/*
* Publish the updated tail.
* We have already touched the queueing cacheline; don't bother with
* pending stuff.
*
* p,*,* -> n,*,*
*/
old = xchg_tail(lock, tail);
next = NULL;
/*
* if there was a previous node; link it and wait until reaching the
* head of the waitqueue.
*/
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
pv_wait_node(node, prev);
arch_mcs_spin_lock_contended(&node->locked);
/*
* While waiting for the MCS lock, the next pointer may have
* been set by another lock waiter. We optimistically load
* the next pointer & prefetch the cacheline for writing
* to reduce latency in the upcoming MCS unlock operation.
*/
next = READ_ONCE(node->next);
if (next)
prefetchw(next);
}
/*
* we're at the head of the waitqueue, wait for the owner & pending to
* go away.
*
* *,x,y -> *,0,0
*
* this wait loop must use a load-acquire such that we match the
* store-release that clears the locked bit and create lock
* sequentiality; this is because the set_locked() function below
* does not imply a full barrier.
*
* The PV pv_wait_head_or_lock function, if active, will acquire
* the lock and return a non-zero value. So we have to skip the
* atomic_cond_read_acquire() call. As the next PV queue head hasn't
* been designated yet, there is no way for the locked value to become
* _Q_SLOW_VAL. So both the set_locked() and the
* atomic_cmpxchg_relaxed() calls will be safe.
*
* If PV isn't active, 0 will be returned instead.
*
*/
if ((val = pv_wait_head_or_lock(lock, node)))
goto locked;
val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
locked:
/*
* claim the lock:
*
* n,0,0 -> 0,0,1 : lock, uncontended
* *,*,0 -> *,*,1 : lock, contended
*
* If the queue head is the only one in the queue (lock value == tail)
* and nobody is pending, clear the tail code and grab the lock.
* Otherwise, we only need to grab the lock.
*/
/*
* In the PV case we might already have _Q_LOCKED_VAL set, because
* of lock stealing; therefore we must also allow:
*
* n,0,1 -> 0,0,1
*
* Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
* above wait condition, therefore any concurrent setting of
* PENDING will make the uncontended transition fail.
*/
if ((val & _Q_TAIL_MASK) == tail) {
if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
goto release; /* No contention */
}
/*
* Either somebody is queued behind us or _Q_PENDING_VAL got set
* which will then detect the remaining tail and queue behind us
* ensuring we'll see a @next.
*/
set_locked(lock);
/*
* contended path; wait for next if not observed yet, release.
*/
if (!next)
next = smp_cond_load_relaxed(&node->next, (VAL));
arch_mcs_spin_unlock_contended(&next->locked);
pv_kick_node(lock, next);
release:
trace_contention_end(lock, 0);
/*
* release the node
*/
__this_cpu_dec(qnodes[0].mcs.count);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
/*
* Generate the paravirt code for queued_spin_unlock_slowpath().
*/
#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
#define _GEN_PV_LOCK_SLOWPATH
#undef pv_enabled
#define pv_enabled() true
#undef pv_init_node
#undef pv_wait_node
#undef pv_kick_node
#undef pv_wait_head_or_lock
#undef queued_spin_lock_slowpath
#define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
#include "qspinlock_paravirt.h"
#include "qspinlock.c"
bool nopvspin __initdata;
static __init int parse_nopvspin(char *arg)
{
nopvspin = true;
return 0;
}
early_param("nopvspin", parse_nopvspin);
#endif
| linux-master | kernel/locking/qspinlock.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/osq_lock.h>
/*
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*
* Using a single mcs node per CPU is safe because sleeping locks should not be
* called from interrupt context and we have preemption disabled while
* spinning.
*/
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
/*
* We use the value 0 to represent "no CPU", thus the encoded value
* will be the CPU number incremented by 1.
*/
static inline int encode_cpu(int cpu_nr)
{
return cpu_nr + 1;
}
static inline int node_cpu(struct optimistic_spin_node *node)
{
return node->cpu - 1;
}
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
{
int cpu_nr = encoded_cpu_val - 1;
return per_cpu_ptr(&osq_node, cpu_nr);
}
/*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
* Can return NULL in case we were the last queued and we updated @lock instead.
*/
static inline struct optimistic_spin_node *
osq_wait_next(struct optimistic_spin_queue *lock,
struct optimistic_spin_node *node,
struct optimistic_spin_node *prev)
{
struct optimistic_spin_node *next = NULL;
int curr = encode_cpu(smp_processor_id());
int old;
/*
* If there is a prev node in queue, then the 'old' value will be
* the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
* we're currently last in queue, then the queue will then become empty.
*/
old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
for (;;) {
if (atomic_read(&lock->tail) == curr &&
atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
/*
* We were the last queued, we moved @lock back. @prev
* will now observe @lock and will complete its
* unlock()/unqueue().
*/
break;
}
/*
* We must xchg() the @node->next value, because if we were to
* leave it in, a concurrent unlock()/unqueue() from
* @node->next might complete Step-A and think its @prev is
* still valid.
*
* If the concurrent unlock()/unqueue() wins the race, we'll
* wait for either @lock to point to us, through its Step-B, or
* wait for a new @node->next from its Step-C.
*/
if (node->next) {
next = xchg(&node->next, NULL);
if (next)
break;
}
cpu_relax();
}
return next;
}
bool osq_lock(struct optimistic_spin_queue *lock)
{
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *prev, *next;
int curr = encode_cpu(smp_processor_id());
int old;
node->locked = 0;
node->next = NULL;
node->cpu = curr;
/*
* We need both ACQUIRE (pairs with corresponding RELEASE in
* unlock() uncontended, or fastpath) and RELEASE (to publish
* the node fields we just initialised) semantics when updating
* the lock tail.
*/
old = atomic_xchg(&lock->tail, curr);
if (old == OSQ_UNLOCKED_VAL)
return true;
prev = decode_cpu(old);
node->prev = prev;
/*
* osq_lock() unqueue
*
* node->prev = prev osq_wait_next()
* WMB MB
* prev->next = node next->prev = prev // unqueue-C
*
* Here 'node->prev' and 'next->prev' are the same variable and we need
* to ensure these stores happen in-order to avoid corrupting the list.
*/
smp_wmb();
WRITE_ONCE(prev->next, node);
/*
* Normally @prev is untouchable after the above store; because at that
* moment unlock can proceed and wipe the node element from stack.
*
* However, since our nodes are static per-cpu storage, we're
* guaranteed their existence -- this allows us to apply
* cmpxchg in an attempt to undo our queueing.
*/
/*
* Wait to acquire the lock or cancellation. Note that need_resched()
* will come with an IPI, which will wake smp_cond_load_relaxed() if it
* is implemented with a monitor-wait. vcpu_is_preempted() relies on
* polling, be careful.
*/
if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
vcpu_is_preempted(node_cpu(node->prev))))
return true;
/* unqueue */
/*
* Step - A -- stabilize @prev
*
* Undo our @prev->next assignment; this will make @prev's
* unlock()/unqueue() wait for a next pointer since @lock points to us
* (or later).
*/
for (;;) {
/*
* cpu_relax() below implies a compiler barrier which would
* prevent this comparison being optimized away.
*/
if (data_race(prev->next) == node &&
cmpxchg(&prev->next, node, NULL) == node)
break;
/*
* We can only fail the cmpxchg() racing against an unlock(),
* in which case we should observe @node->locked becoming
* true.
*/
if (smp_load_acquire(&node->locked))
return true;
cpu_relax();
/*
* Or we race against a concurrent unqueue()'s step-B, in which
* case its step-C will write us a new @node->prev pointer.
*/
prev = READ_ONCE(node->prev);
}
/*
* Step - B -- stabilize @next
*
* Similar to unlock(), wait for @node->next or move @lock from @node
* back to @prev.
*/
next = osq_wait_next(lock, node, prev);
if (!next)
return false;
/*
* Step - C -- unlink
*
* @prev is stable because its still waiting for a new @prev->next
* pointer, @next is stable because our @node->next pointer is NULL and
* it will wait in Step-A.
*/
WRITE_ONCE(next->prev, prev);
WRITE_ONCE(prev->next, next);
return false;
}
void osq_unlock(struct optimistic_spin_queue *lock)
{
struct optimistic_spin_node *node, *next;
int curr = encode_cpu(smp_processor_id());
/*
* Fast path for the uncontended case.
*/
if (likely(atomic_cmpxchg_release(&lock->tail, curr,
OSQ_UNLOCKED_VAL) == curr))
return;
/*
* Second most likely case.
*/
node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL);
if (next) {
WRITE_ONCE(next->locked, 1);
return;
}
next = osq_wait_next(lock, node, NULL);
if (next)
WRITE_ONCE(next->locked, 1);
}
| linux-master | kernel/locking/osq_lock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (2004) Linus Torvalds
*
* Author: Zwane Mwaikambo <[email protected]>
*
* Copyright (2004, 2005) Ingo Molnar
*
* This file contains the spinlock/rwlock implementations for the
* SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
*
* Note that some architectures have special knowledge about the
* stack frames of these functions in their profile_pc. If you
* change anything significant here that could change the stack
* frame contact the architecture maintainers.
*/
#include <linux/linkage.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/export.h>
#ifdef CONFIG_MMIOWB
#ifndef arch_mmiowb_state
DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
#endif
#endif
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
/*
* The __lock_function inlines are taken from
* spinlock : include/linux/spinlock_api_smp.h
* rwlock : include/linux/rwlock_api_smp.h
*/
#else
/*
* Some architectures can relax in favour of the CPU owning the lock.
*/
#ifndef arch_read_relax
# define arch_read_relax(l) cpu_relax()
#endif
#ifndef arch_write_relax
# define arch_write_relax(l) cpu_relax()
#endif
#ifndef arch_spin_relax
# define arch_spin_relax(l) cpu_relax()
#endif
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
* which embeds them into the calling _lock_function below.
*
* This could be a long-held lock. We both prepare to spin for a long
* time (making _this_ CPU preemptible if possible), and we also signal
* towards that other CPU that it should break the lock ASAP.
*/
#define BUILD_LOCK_OPS(op, locktype) \
void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
if (likely(do_raw_##op##_trylock(lock))) \
break; \
preempt_enable(); \
\
arch_##op##_relax(&lock->raw_lock); \
} \
} \
\
unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
for (;;) { \
preempt_disable(); \
local_irq_save(flags); \
if (likely(do_raw_##op##_trylock(lock))) \
break; \
local_irq_restore(flags); \
preempt_enable(); \
\
arch_##op##_relax(&lock->raw_lock); \
} \
\
return flags; \
} \
\
void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
{ \
_raw_##op##_lock_irqsave(lock); \
} \
\
void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
/* */ \
/* Careful: we must exclude softirqs too, hence the */ \
/* irq-disabling. We use the generic preemption-aware */ \
/* function: */ \
/**/ \
flags = _raw_##op##_lock_irqsave(lock); \
local_bh_disable(); \
local_irq_restore(flags); \
} \
/*
* Build preemption-friendly versions of the following
* lock-spinning functions:
*
* __[spin|read|write]_lock()
* __[spin|read|write]_lock_irq()
* __[spin|read|write]_lock_irqsave()
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
#ifndef CONFIG_PREEMPT_RT
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
#endif
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
{
return __raw_spin_trylock(lock);
}
EXPORT_SYMBOL(_raw_spin_trylock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
{
return __raw_spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_trylock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK
noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
__raw_spin_lock(lock);
}
EXPORT_SYMBOL(_raw_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
return __raw_spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
{
__raw_spin_lock_irq(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
{
__raw_spin_lock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_bh);
#endif
#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
{
__raw_spin_unlock(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
{
__raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
{
__raw_spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
{
__raw_spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
#ifndef CONFIG_PREEMPT_RT
#ifndef CONFIG_INLINE_READ_TRYLOCK
noinline int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
return __raw_read_trylock(lock);
}
EXPORT_SYMBOL(_raw_read_trylock);
#endif
#ifndef CONFIG_INLINE_READ_LOCK
noinline void __lockfunc _raw_read_lock(rwlock_t *lock)
{
__raw_read_lock(lock);
}
EXPORT_SYMBOL(_raw_read_lock);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
{
return __raw_read_lock_irqsave(lock);
}
EXPORT_SYMBOL(_raw_read_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_IRQ
noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
{
__raw_read_lock_irq(lock);
}
EXPORT_SYMBOL(_raw_read_lock_irq);
#endif
#ifndef CONFIG_INLINE_READ_LOCK_BH
noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
{
__raw_read_lock_bh(lock);
}
EXPORT_SYMBOL(_raw_read_lock_bh);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK
noinline void __lockfunc _raw_read_unlock(rwlock_t *lock)
{
__raw_read_unlock(lock);
}
EXPORT_SYMBOL(_raw_read_unlock);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__raw_read_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
{
__raw_read_unlock_irq(lock);
}
EXPORT_SYMBOL(_raw_read_unlock_irq);
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_BH
noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
{
__raw_read_unlock_bh(lock);
}
EXPORT_SYMBOL(_raw_read_unlock_bh);
#endif
#ifndef CONFIG_INLINE_WRITE_TRYLOCK
noinline int __lockfunc _raw_write_trylock(rwlock_t *lock)
{
return __raw_write_trylock(lock);
}
EXPORT_SYMBOL(_raw_write_trylock);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK
noinline void __lockfunc _raw_write_lock(rwlock_t *lock)
{
__raw_write_lock(lock);
}
EXPORT_SYMBOL(_raw_write_lock);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
#endif
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
{
__raw_write_lock_nested(lock, subclass);
}
EXPORT_SYMBOL(_raw_write_lock_nested);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
{
return __raw_write_lock_irqsave(lock);
}
EXPORT_SYMBOL(_raw_write_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
{
__raw_write_lock_irq(lock);
}
EXPORT_SYMBOL(_raw_write_lock_irq);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_BH
noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
{
__raw_write_lock_bh(lock);
}
EXPORT_SYMBOL(_raw_write_lock_bh);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK
noinline void __lockfunc _raw_write_unlock(rwlock_t *lock)
{
__raw_write_unlock(lock);
}
EXPORT_SYMBOL(_raw_write_unlock);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__raw_write_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
{
__raw_write_unlock_irq(lock);
}
EXPORT_SYMBOL(_raw_write_unlock_irq);
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
{
__raw_write_unlock_bh(lock);
}
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
#endif /* !CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
EXPORT_SYMBOL(_raw_spin_lock_nested);
unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
int subclass)
{
unsigned long flags;
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
return flags;
}
EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
#endif
notrace int in_lock_functions(unsigned long addr)
{
/* Linker adds these: start and end of __lockfunc functions */
extern char __lock_text_start[], __lock_text_end[];
return addr >= (unsigned long)__lock_text_start
&& addr < (unsigned long)__lock_text_end;
}
EXPORT_SYMBOL(in_lock_functions);
| linux-master | kernel/locking/spinlock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtmutex API
*/
#include <linux/spinlock.h>
#include <linux/export.h>
#define RT_MUTEX_BUILD_MUTEX
#define WW_RT
#include "rtmutex.c"
int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
{
struct rt_mutex *rtm = &lock->base;
if (!ww_ctx)
return rt_mutex_trylock(rtm);
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
if (__rt_mutex_trylock(&rtm->rtmutex)) {
ww_mutex_set_context_fastpath(lock, ww_ctx);
mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
return 1;
}
return 0;
}
EXPORT_SYMBOL(ww_mutex_trylock);
static int __sched
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
unsigned int state, unsigned long ip)
{
struct lockdep_map __maybe_unused *nest_lock = NULL;
struct rt_mutex *rtm = &lock->base;
int ret;
might_sleep();
if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
return -EALREADY;
/*
* Reset the wounded flag after a kill. No other process can
* race and wound us here, since they can't have a valid owner
* pointer if we don't have any locks held.
*/
if (ww_ctx->acquired == 0)
ww_ctx->wounded = 0;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nest_lock = &ww_ctx->dep_map;
#endif
}
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
if (ww_ctx)
ww_mutex_set_context_fastpath(lock, ww_ctx);
return 0;
}
ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
if (ret)
mutex_release(&rtm->dep_map, ip);
return ret;
}
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
}
EXPORT_SYMBOL(ww_mutex_lock);
int __sched
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
}
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
struct rt_mutex *rtm = &lock->base;
__ww_mutex_unlock(lock);
mutex_release(&rtm->dep_map, _RET_IP_);
__rt_mutex_unlock(&rtm->rtmutex);
}
EXPORT_SYMBOL(ww_mutex_unlock);
| linux-master | kernel/locking/ww_rt_mutex.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <[email protected]>
*
* This file implements counting semaphores.
* A counting semaphore may be acquired 'n' times before sleeping.
* See mutex.c for single-acquisition sleeping locks which enforce
* rules which allow code to be debugged more easily.
*/
/*
* Some notes on the implementation:
*
* The spinlock controls access to the other members of the semaphore.
* down_trylock() and up() can be called from interrupt context, so we
* have to disable interrupts when taking the lock. It turns out various
* parts of the kernel expect to be able to use down() on a semaphore in
* interrupt context when they know it will succeed, so we have to use
* irqsave variants for down(), down_interruptible() and down_killable()
* too.
*
* The ->count variable represents how many more tasks can acquire this
* semaphore. If it's zero, there may be tasks waiting on the wait_list.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/ftrace.h>
#include <trace/events/lock.h>
static noinline void __down(struct semaphore *sem);
static noinline int __down_interruptible(struct semaphore *sem);
static noinline int __down_killable(struct semaphore *sem);
static noinline int __down_timeout(struct semaphore *sem, long timeout);
static noinline void __up(struct semaphore *sem);
/**
* down - acquire the semaphore
* @sem: the semaphore to be acquired
*
* Acquires the semaphore. If no more tasks are allowed to acquire the
* semaphore, calling this function will put the task to sleep until the
* semaphore is released.
*
* Use of this function is deprecated, please use down_interruptible() or
* down_killable() instead.
*/
void __sched down(struct semaphore *sem)
{
unsigned long flags;
might_sleep();
raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
__down(sem);
raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(down);
/**
* down_interruptible - acquire the semaphore unless interrupted
* @sem: the semaphore to be acquired
*
* Attempts to acquire the semaphore. If no more tasks are allowed to
* acquire the semaphore, calling this function will put the task to sleep.
* If the sleep is interrupted by a signal, this function will return -EINTR.
* If the semaphore is successfully acquired, this function returns 0.
*/
int __sched down_interruptible(struct semaphore *sem)
{
unsigned long flags;
int result = 0;
might_sleep();
raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_interruptible(sem);
raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
EXPORT_SYMBOL(down_interruptible);
/**
* down_killable - acquire the semaphore unless killed
* @sem: the semaphore to be acquired
*
* Attempts to acquire the semaphore. If no more tasks are allowed to
* acquire the semaphore, calling this function will put the task to sleep.
* If the sleep is interrupted by a fatal signal, this function will return
* -EINTR. If the semaphore is successfully acquired, this function returns
* 0.
*/
int __sched down_killable(struct semaphore *sem)
{
unsigned long flags;
int result = 0;
might_sleep();
raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_killable(sem);
raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
EXPORT_SYMBOL(down_killable);
/**
* down_trylock - try to acquire the semaphore, without waiting
* @sem: the semaphore to be acquired
*
* Try to acquire the semaphore atomically. Returns 0 if the semaphore has
* been acquired successfully or 1 if it cannot be acquired.
*
* NOTE: This return value is inverted from both spin_trylock and
* mutex_trylock! Be careful about this when converting code.
*
* Unlike mutex_trylock, this function can be used from interrupt context,
* and the semaphore can be released by any task or interrupt.
*/
int __sched down_trylock(struct semaphore *sem)
{
unsigned long flags;
int count;
raw_spin_lock_irqsave(&sem->lock, flags);
count = sem->count - 1;
if (likely(count >= 0))
sem->count = count;
raw_spin_unlock_irqrestore(&sem->lock, flags);
return (count < 0);
}
EXPORT_SYMBOL(down_trylock);
/**
* down_timeout - acquire the semaphore within a specified time
* @sem: the semaphore to be acquired
* @timeout: how long to wait before failing
*
* Attempts to acquire the semaphore. If no more tasks are allowed to
* acquire the semaphore, calling this function will put the task to sleep.
* If the semaphore is not released within the specified number of jiffies,
* this function returns -ETIME. It returns 0 if the semaphore was acquired.
*/
int __sched down_timeout(struct semaphore *sem, long timeout)
{
unsigned long flags;
int result = 0;
might_sleep();
raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_timeout(sem, timeout);
raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
EXPORT_SYMBOL(down_timeout);
/**
* up - release the semaphore
* @sem: the semaphore to release
*
* Release the semaphore. Unlike mutexes, up() may be called from any
* context and even by tasks which have never called down().
*/
void __sched up(struct semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
else
__up(sem);
raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(up);
/* Functions for the contended case */
struct semaphore_waiter {
struct list_head list;
struct task_struct *task;
bool up;
};
/*
* Because this function is inlined, the 'state' parameter will be
* constant, and thus optimised away by the compiler. Likewise the
* 'timeout' parameter for the cases without timeouts.
*/
static inline int __sched ___down_common(struct semaphore *sem, long state,
long timeout)
{
struct semaphore_waiter waiter;
list_add_tail(&waiter.list, &sem->wait_list);
waiter.task = current;
waiter.up = false;
for (;;) {
if (signal_pending_state(state, current))
goto interrupted;
if (unlikely(timeout <= 0))
goto timed_out;
__set_current_state(state);
raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->lock);
if (waiter.up)
return 0;
}
timed_out:
list_del(&waiter.list);
return -ETIME;
interrupted:
list_del(&waiter.list);
return -EINTR;
}
static inline int __sched __down_common(struct semaphore *sem, long state,
long timeout)
{
int ret;
trace_contention_begin(sem, 0);
ret = ___down_common(sem, state, timeout);
trace_contention_end(sem, ret);
return ret;
}
static noinline void __sched __down(struct semaphore *sem)
{
__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
static noinline int __sched __down_interruptible(struct semaphore *sem)
{
return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
static noinline int __sched __down_killable(struct semaphore *sem)
{
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
}
static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
{
return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
}
static noinline void __sched __up(struct semaphore *sem)
{
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
struct semaphore_waiter, list);
list_del(&waiter->list);
waiter->up = true;
wake_up_process(waiter->task);
}
| linux-master | kernel/locking/semaphore.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/lockdep.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
*
* - lock inversion scenarios
* - circular lock dependencies
* - hardirq/softirq safe/unsafe locking bugs
*
* Bugs are reported even if the current locking scenario does not cause
* any deadlock at this point.
*
* I.e. if anytime in the past two locks were taken in a different order,
* even if it happened for another task, even if those were different
* locks (but of the same class as this lock), this code will detect it.
*
* Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime.
*/
#define DISABLE_BRANCH_PROFILING
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/nmi.h>
#include <linux/rcupdate.h>
#include <linux/kprobes.h>
#include <linux/lockdep.h>
#include <linux/context_tracking.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
#include <trace/events/lock.h>
#ifdef CONFIG_PROVE_LOCKING
static int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif
#ifdef CONFIG_LOCK_STAT
static int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif
#ifdef CONFIG_SYSCTL
static struct ctl_table kern_lockdep_table[] = {
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
.data = &prove_locking,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_PROVE_LOCKING */
#ifdef CONFIG_LOCK_STAT
{
.procname = "lock_stat",
.data = &lock_stat,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_LOCK_STAT */
{ }
};
static __init int kernel_lockdep_sysctls_init(void)
{
register_sysctl_init("kernel", kern_lockdep_table);
return 0;
}
late_initcall(kernel_lockdep_sysctls_init);
#endif /* CONFIG_SYSCTL */
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
static __always_inline bool lockdep_enabled(void)
{
if (!debug_locks)
return false;
if (this_cpu_read(lockdep_recursion))
return false;
if (current->lockdep_recursion)
return false;
return true;
}
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static struct task_struct *__owner;
static inline void lockdep_lock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
__this_cpu_inc(lockdep_recursion);
arch_spin_lock(&__lock);
__owner = current;
}
static inline void lockdep_unlock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
__owner = NULL;
arch_spin_unlock(&__lock);
__this_cpu_dec(lockdep_recursion);
}
static inline bool lockdep_assert_locked(void)
{
return DEBUG_LOCKS_WARN_ON(__owner != current);
}
static struct task_struct *lockdep_selftest_task_struct;
static int graph_lock(void)
{
lockdep_lock();
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
* CPU is busy printing out stuff with the graph lock
* dropped already)
*/
if (!debug_locks) {
lockdep_unlock();
return 0;
}
return 1;
}
static inline void graph_unlock(void)
{
lockdep_unlock();
}
/*
* Turn lock debugging off and return with 0 if it was off already,
* and also release the graph lock:
*/
static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
lockdep_unlock();
return ret;
}
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
/*
* All data structures here are protected by the global debug_lock.
*
* nr_lock_classes is the number of elements of lock_classes[] that is
* in use.
*/
#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
unsigned long nr_lock_classes;
unsigned long nr_zapped_classes;
unsigned long max_lock_class_idx;
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
unsigned int class_idx = hlock->class_idx;
/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
barrier();
if (!test_bit(class_idx, lock_classes_in_use)) {
/*
* Someone passed in garbage, we give up.
*/
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
/*
* At this point, if the passed hlock->class_idx is still garbage,
* we just have to live with it
*/
return lock_classes + class_idx;
}
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
static inline u64 lockstat_clock(void)
{
return local_clock();
}
static int lock_point(unsigned long points[], unsigned long ip)
{
int i;
for (i = 0; i < LOCKSTAT_POINTS; i++) {
if (points[i] == 0) {
points[i] = ip;
break;
}
if (points[i] == ip)
break;
}
return i;
}
static void lock_time_inc(struct lock_time *lt, u64 time)
{
if (time > lt->max)
lt->max = time;
if (time < lt->min || !lt->nr)
lt->min = time;
lt->total += time;
lt->nr++;
}
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
if (!src->nr)
return;
if (src->max > dst->max)
dst->max = src->max;
if (src->min < dst->min || !dst->nr)
dst->min = src->min;
dst->total += src->total;
dst->nr += src->nr;
}
struct lock_class_stats lock_stats(struct lock_class *class)
{
struct lock_class_stats stats;
int cpu, i;
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
stats.contending_point[i] += pcs->contending_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
stats.bounces[i] += pcs->bounces[i];
}
return stats;
}
void clear_lock_stats(struct lock_class *class)
{
int cpu;
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
memset(class->contention_point, 0, sizeof(class->contention_point));
memset(class->contending_point, 0, sizeof(class->contending_point));
}
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
}
static void lock_release_holdtime(struct held_lock *hlock)
{
struct lock_class_stats *stats;
u64 holdtime;
if (!lock_stat)
return;
holdtime = lockstat_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
lock_time_inc(&stats->read_holdtime, holdtime);
else
lock_time_inc(&stats->write_holdtime, holdtime);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif
/*
* We keep a global list of all lock classes. The list is only accessed with
* the lockdep spinlock lock held. free_lock_classes is a list with free
* elements. These elements are linked together by the lock_entry member in
* struct lock_class.
*/
static LIST_HEAD(all_lock_classes);
static LIST_HEAD(free_lock_classes);
/**
* struct pending_free - information about data structures about to be freed
* @zapped: Head of a list with struct lock_class elements.
* @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
* are about to be freed.
*/
struct pending_free {
struct list_head zapped;
DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
};
/**
* struct delayed_free - data structures used for delayed freeing
*
* A data structure for delayed freeing of data structures that may be
* accessed by RCU readers at the time these were freed.
*
* @rcu_head: Used to schedule an RCU callback for freeing data structures.
* @index: Index of @pf to which freed data structures are added.
* @scheduled: Whether or not an RCU callback has been scheduled.
* @pf: Array with information about data structures about to be freed.
*/
static struct delayed_free {
struct rcu_head rcu_head;
int index;
int scheduled;
struct pending_free pf[2];
} delayed_free;
/*
* The lockdep classes are in a hash-table as well, for fast lookup:
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct hlist_head classhash_table[CLASSHASH_SIZE];
/*
* We put the lock dependency chains into a hash-table as well, to cache
* their existence:
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
/*
* the id of held_lock
*/
static inline u16 hlock_id(struct held_lock *hlock)
{
BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
}
static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
{
return hlock_id & (MAX_LOCKDEP_KEYS - 1);
}
/*
* The hash key of the lock dependency chains is a hash itself too:
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
*/
static inline u64 iterate_chain_key(u64 key, u32 idx)
{
u32 k0 = key, k1 = key >> 32;
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
return k0 | (u64)k1 << 32;
}
void lockdep_init_task(struct task_struct *task)
{
task->lockdep_depth = 0; /* no locks held yet */
task->curr_chain_key = INITIAL_CHAIN_KEY;
task->lockdep_recursion = 0;
}
static __always_inline void lockdep_recursion_inc(void)
{
__this_cpu_inc(lockdep_recursion);
}
static __always_inline void lockdep_recursion_finish(void)
{
if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
__this_cpu_write(lockdep_recursion, 0);
}
void lockdep_set_selftest_task(struct task_struct *task)
{
lockdep_selftest_task_struct = task;
}
/*
* Debugging switches:
*/
#define VERBOSE 0
#define VERY_VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
# define SOFTIRQ_VERBOSE 1
#else
# define HARDIRQ_VERBOSE 0
# define SOFTIRQ_VERBOSE 0
#endif
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
/*
* Quick filtering for interesting events:
*/
static int class_filter(struct lock_class *class)
{
#if 0
/* Example */
if (class->name_version == 1 &&
!strcmp(class->name, "lockname"))
return 1;
if (class->name_version == 1 &&
!strcmp(class->name, "&struct->lockfield"))
return 1;
#endif
/* Filter everything else. 1 would be to allow everything else */
return 0;
}
#endif
static int verbose(struct lock_class *class)
{
#if VERBOSE
return class_filter(class);
#endif
return 0;
}
static void print_lockdep_off(const char *bug_msg)
{
printk(KERN_DEBUG "%s\n", bug_msg);
printk(KERN_DEBUG "turning off the locking correctness validator.\n");
#ifdef CONFIG_LOCK_STAT
printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
#endif
}
unsigned long nr_stack_trace_entries;
#ifdef CONFIG_PROVE_LOCKING
/**
* struct lock_trace - single stack backtrace
* @hash_entry: Entry in a stack_trace_hash[] list.
* @hash: jhash() of @entries.
* @nr_entries: Number of entries in @entries.
* @entries: Actual stack backtrace.
*/
struct lock_trace {
struct hlist_node hash_entry;
u32 hash;
u32 nr_entries;
unsigned long entries[] __aligned(sizeof(unsigned long));
};
#define LOCK_TRACE_SIZE_IN_LONGS \
(sizeof(struct lock_trace) / sizeof(unsigned long))
/*
* Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
*/
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
{
return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
memcmp(t1->entries, t2->entries,
t1->nr_entries * sizeof(t1->entries[0])) == 0;
}
static struct lock_trace *save_trace(void)
{
struct lock_trace *trace, *t2;
struct hlist_head *hash_head;
u32 hash;
int max_entries;
BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
LOCK_TRACE_SIZE_IN_LONGS;
if (max_entries <= 0) {
if (!debug_locks_off_graph_unlock())
return NULL;
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
return NULL;
}
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
hash = jhash(trace->entries, trace->nr_entries *
sizeof(trace->entries[0]), 0);
trace->hash = hash;
hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
hlist_for_each_entry(t2, hash_head, hash_entry) {
if (traces_identical(trace, t2))
return t2;
}
nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
hlist_add_head(&trace->hash_entry, hash_head);
return trace;
}
/* Return the number of stack traces in the stack_trace[] array. */
u64 lockdep_stack_trace_count(void)
{
struct lock_trace *trace;
u64 c = 0;
int i;
for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
c++;
}
}
return c;
}
/* Return the number of stack hash chains that have at least one stack trace. */
u64 lockdep_stack_hash_count(void)
{
u64 c = 0;
int i;
for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
if (!hlist_empty(&stack_trace_hash[i]))
c++;
return c;
}
#endif
unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Various lockdep statistics:
*/
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
#endif
#ifdef CONFIG_PROVE_LOCKING
/*
* Locking printouts:
*/
#define __USAGE(__STATE) \
[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
static const char *usage_str[] =
{
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
[LOCK_USED] = "INITIAL USE",
[LOCK_USED_READ] = "INITIAL READ USE",
/* abused as string storage for verify_lock_unused() */
[LOCK_USAGE_STATES] = "IN-NMI",
};
#endif
const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
{
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
}
static inline unsigned long lock_flag(enum lock_usage_bit bit)
{
return 1UL << bit;
}
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
/*
* The usage character defaults to '.' (i.e., irqs disabled and not in
* irq context), which is the safest usage category.
*/
char c = '.';
/*
* The order of the following usage checks matters, which will
* result in the outcome character as follows:
*
* - '+': irq is enabled and not in irq context
* - '-': in irq context and irq is disabled
* - '?': in irq context and irq is enabled
*/
if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
c = '+';
if (class->usage_mask & lock_flag(bit))
c = '?';
} else if (class->usage_mask & lock_flag(bit))
c = '-';
return c;
}
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
{
int i = 0;
#define LOCKDEP_STATE(__STATE) \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE
usage[i] = '\0';
}
static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
printk(KERN_CONT "%s", name);
} else {
printk(KERN_CONT "%s", name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
if (class->subclass)
printk(KERN_CONT "/%d", class->subclass);
if (hlock && class->print_fn)
class->print_fn(hlock->instance);
}
}
static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
{
char usage[LOCK_USAGE_CHARS];
get_usage_chars(class, usage);
printk(KERN_CONT " (");
__print_lock_name(hlock, class);
printk(KERN_CONT "){%s}-{%d:%d}", usage,
class->wait_type_outer ?: class->wait_type_inner,
class->wait_type_inner);
}
static void print_lockdep_cache(struct lockdep_map *lock)
{
const char *name;
char str[KSYM_NAME_LEN];
name = lock->name;
if (!name)
name = __get_key_name(lock->key->subkeys, str);
printk(KERN_CONT "%s", name);
}
static void print_lock(struct held_lock *hlock)
{
/*
* We can be called locklessly through debug_show_all_locks() so be
* extra careful, the hlock might have been released and cleared.
*
* If this indeed happens, lets pretend it does not hurt to continue
* to print the lock unless the hlock class_idx does not point to a
* registered class. The rationale here is: since we don't attempt
* to distinguish whether we are in this situation, if it just
* happened we can't count on class_idx to tell either.
*/
struct lock_class *lock = hlock_class(hlock);
if (!lock) {
printk(KERN_CONT "<RELEASED>\n");
return;
}
printk(KERN_CONT "%px", hlock->instance);
print_lock_name(hlock, lock);
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
static void lockdep_print_held_locks(struct task_struct *p)
{
int i, depth = READ_ONCE(p->lockdep_depth);
if (!depth)
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
else
printk("%d lock%s held by %s/%d:\n", depth,
depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
/*
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
*/
if (p != current && task_is_running(p))
return;
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
print_lock(p->held_locks + i);
}
}
static void print_kernel_ident(void)
{
printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version,
print_tainted());
}
static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
return class_filter(class);
#endif
return 0;
}
/*
* Is this the address of a static object:
*/
#ifdef __KERNEL__
static int static_obj(const void *obj)
{
unsigned long addr = (unsigned long) obj;
if (is_kernel_core_data(addr))
return 1;
/*
* keys are allowed in the __ro_after_init section.
*/
if (is_kernel_rodata(addr))
return 1;
/*
* in initdata section and used during bootup only?
* NOTE: On some platforms the initdata section is
* outside of the _stext ... _end range.
*/
if (system_state < SYSTEM_FREEING_INITMEM &&
init_section_contains((void *)addr, 1))
return 1;
/*
* in-kernel percpu var?
*/
if (is_kernel_percpu_address(addr))
return 1;
/*
* module static or percpu var?
*/
return is_module_address(addr) || is_module_percpu_address(addr);
}
#endif
/*
* To make lock name printouts unique, we calculate a unique
* class->name_version generation counter. The caller must hold the graph
* lock.
*/
static int count_matching_names(struct lock_class *new_class)
{
struct lock_class *class;
int count = 0;
if (!new_class->name)
return 0;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key)
return class->name_version;
if (class->name && !strcmp(class->name, new_class->name))
count = max(count, class->name_version);
}
return count + 1;
}
/* used from NMI context -- must be lockless */
static noinstr struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
instrumentation_begin();
debug_locks_off();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
instrumentation_end();
return NULL;
}
/*
* If it is not initialised then it has never been locked,
* so it won't be present in the hash table.
*/
if (unlikely(!lock->key))
return NULL;
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
* lock_class_key variable is passed in through the mutex_init()
* (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key.
*/
BUILD_BUG_ON(sizeof(struct lock_class_key) >
sizeof(struct lockdep_map));
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
/*
* We do an RCU walk of the hash, see lockdep_free_key_range().
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
if (class->key == key) {
/*
* Huh! same key, different name? Did someone trample
* on some memory? We're most confused.
*/
WARN_ONCE(class->name != lock->name &&
lock->key != &__lockdep_no_validate__,
"Looking for class \"%s\" with key %ps, but found a different class \"%s\" with the same key\n",
lock->name, lock->key, class->name);
return class;
}
}
return NULL;
}
/*
* Static locks do not have their class-keys yet - for them the key is
* the lock object itself. If the lock is in the per cpu area, the
* canonical address of the lock (per cpu offset removed) is used.
*/
static bool assign_lock_key(struct lockdep_map *lock)
{
unsigned long can_addr, addr = (unsigned long)lock;
#ifdef __KERNEL__
/*
* lockdep_free_key_range() assumes that struct lock_class_key
* objects do not overlap. Since we use the address of lock
* objects as class key for static objects, check whether the
* size of lock_class_key objects does not exceed the size of
* the smallest lock object.
*/
BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
#endif
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (static_obj(lock))
lock->key = (void *)lock;
else {
/* Debug-check: all keys must be persistent! */
debug_locks_off();
pr_err("INFO: trying to register non-static key.\n");
pr_err("The code is fine but needs lockdep annotation, or maybe\n");
pr_err("you didn't initialize this object before use?\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
return false;
}
return true;
}
#ifdef CONFIG_DEBUG_LOCKDEP
/* Check whether element @e occurs in list @h */
static bool in_list(struct list_head *e, struct list_head *h)
{
struct list_head *f;
list_for_each(f, h) {
if (e == f)
return true;
}
return false;
}
/*
* Check whether entry @e occurs in any of the locks_after or locks_before
* lists.
*/
static bool in_any_class_list(struct list_head *e)
{
struct lock_class *class;
int i;
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
if (in_list(e, &class->locks_after) ||
in_list(e, &class->locks_before))
return true;
}
return false;
}
static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
{
struct lock_list *e;
list_for_each_entry(e, h, entry) {
if (e->links_to != c) {
printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
c->name ? : "(?)",
(unsigned long)(e - list_entries),
e->links_to && e->links_to->name ?
e->links_to->name : "(?)",
e->class && e->class->name ? e->class->name :
"(?)");
return false;
}
}
return true;
}
#ifdef CONFIG_PROVE_LOCKING
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
#endif
static bool check_lock_chain_key(struct lock_chain *chain)
{
#ifdef CONFIG_PROVE_LOCKING
u64 chain_key = INITIAL_CHAIN_KEY;
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
*/
if (chain->chain_key != chain_key) {
printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
(unsigned long long)(chain - lock_chains),
(unsigned long long)chain->chain_key,
(unsigned long long)chain_key);
return false;
}
#endif
return true;
}
static bool in_any_zapped_class_list(struct lock_class *class)
{
struct pending_free *pf;
int i;
for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
if (in_list(&class->lock_entry, &pf->zapped))
return true;
}
return false;
}
static bool __check_data_structures(void)
{
struct lock_class *class;
struct lock_chain *chain;
struct hlist_head *head;
struct lock_list *e;
int i;
/* Check whether all classes occur in a lock list. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
if (!in_list(&class->lock_entry, &all_lock_classes) &&
!in_list(&class->lock_entry, &free_lock_classes) &&
!in_any_zapped_class_list(class)) {
printk(KERN_INFO "class %px/%s is not in any class list\n",
class, class->name ? : "(?)");
return false;
}
}
/* Check whether all classes have valid lock lists. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
if (!class_lock_list_valid(class, &class->locks_before))
return false;
if (!class_lock_list_valid(class, &class->locks_after))
return false;
}
/* Check the chain_key of all lock chains. */
for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
head = chainhash_table + i;
hlist_for_each_entry_rcu(chain, head, entry) {
if (!check_lock_chain_key(chain))
return false;
}
}
/*
* Check whether all list entries that are in use occur in a class
* lock list.
*/
for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
e = list_entries + i;
if (!in_any_class_list(&e->entry)) {
printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
(unsigned int)(e - list_entries),
e->class->name ? : "(?)",
e->links_to->name ? : "(?)");
return false;
}
}
/*
* Check whether all list entries that are not in use do not occur in
* a class lock list.
*/
for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
e = list_entries + i;
if (in_any_class_list(&e->entry)) {
printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
(unsigned int)(e - list_entries),
e->class && e->class->name ? e->class->name :
"(?)",
e->links_to && e->links_to->name ?
e->links_to->name : "(?)");
return false;
}
}
return true;
}
int check_consistency = 0;
module_param(check_consistency, int, 0644);
static void check_data_structures(void)
{
static bool once = false;
if (check_consistency && !once) {
if (!__check_data_structures()) {
once = true;
WARN_ON(once);
}
}
}
#else /* CONFIG_DEBUG_LOCKDEP */
static inline void check_data_structures(void) { }
#endif /* CONFIG_DEBUG_LOCKDEP */
static void init_chain_block_buckets(void);
/*
* Initialize the lock_classes[] array elements, the free_lock_classes list
* and also the delayed_free structure.
*/
static void init_data_structures_once(void)
{
static bool __read_mostly ds_initialized, rcu_head_initialized;
int i;
if (likely(rcu_head_initialized))
return;
if (system_state >= SYSTEM_SCHEDULING) {
init_rcu_head(&delayed_free.rcu_head);
rcu_head_initialized = true;
}
if (ds_initialized)
return;
ds_initialized = true;
INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
INIT_LIST_HEAD(&lock_classes[i].locks_after);
INIT_LIST_HEAD(&lock_classes[i].locks_before);
}
init_chain_block_buckets();
}
static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
{
unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
return lock_keys_hash + hash;
}
/* Register a dynamically allocated key. */
void lockdep_register_key(struct lock_class_key *key)
{
struct hlist_head *hash_head;
struct lock_class_key *k;
unsigned long flags;
if (WARN_ON_ONCE(static_obj(key)))
return;
hash_head = keyhashentry(key);
raw_local_irq_save(flags);
if (!graph_lock())
goto restore_irqs;
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (WARN_ON_ONCE(k == key))
goto out_unlock;
}
hlist_add_head_rcu(&key->hash_entry, hash_head);
out_unlock:
graph_unlock();
restore_irqs:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_register_key);
/* Check whether a key has been registered as a dynamic key. */
static bool is_dynamic_key(const struct lock_class_key *key)
{
struct hlist_head *hash_head;
struct lock_class_key *k;
bool found = false;
if (WARN_ON_ONCE(static_obj(key)))
return false;
/*
* If lock debugging is disabled lock_keys_hash[] may contain
* pointers to memory that has already been freed. Avoid triggering
* a use-after-free in that case by returning early.
*/
if (!debug_locks)
return true;
hash_head = keyhashentry(key);
rcu_read_lock();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (k == key) {
found = true;
break;
}
}
rcu_read_unlock();
return found;
}
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
int idx;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
if (likely(class))
goto out_set_class_cache;
if (!lock->key) {
if (!assign_lock_key(lock))
return NULL;
} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
return NULL;
}
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
if (!graph_lock()) {
return NULL;
}
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
}
init_data_structures_once();
/* Allocate a new lock class and add it to the hash. */
class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
lock_entry);
if (!class) {
if (!debug_locks_off_graph_unlock()) {
return NULL;
}
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
return NULL;
}
nr_lock_classes++;
__set_bit(class - lock_classes, lock_classes_in_use);
debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
WARN_ON_ONCE(!list_empty(&class->locks_before));
WARN_ON_ONCE(!list_empty(&class->locks_after));
class->name_version = count_matching_names(class);
class->wait_type_inner = lock->wait_type_inner;
class->wait_type_outer = lock->wait_type_outer;
class->lock_type = lock->lock_type;
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
*/
hlist_add_head_rcu(&class->hash_entry, hash_head);
/*
* Remove the class from the free list and add it to the global list
* of classes.
*/
list_move_tail(&class->lock_entry, &all_lock_classes);
idx = class - lock_classes;
if (idx > max_lock_class_idx)
max_lock_class_idx = idx;
if (verbose(class)) {
graph_unlock();
printk("\nnew class %px: %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
if (!graph_lock()) {
return NULL;
}
}
out_unlock_set:
graph_unlock();
out_set_class_cache:
if (!subclass || force)
lock->class_cache[0] = class;
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
lock->class_cache[subclass] = class;
/*
* Hash collision, did we smoke some? We found a class with a matching
* hash but the subclass -- which is hashed in -- didn't match.
*/
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
return NULL;
return class;
}
#ifdef CONFIG_PROVE_LOCKING
/*
* Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
int idx = find_first_zero_bit(list_entries_in_use,
ARRAY_SIZE(list_entries));
if (idx >= ARRAY_SIZE(list_entries)) {
if (!debug_locks_off_graph_unlock())
return NULL;
print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
dump_stack();
return NULL;
}
nr_list_entries++;
__set_bit(idx, list_entries_in_use);
return list_entries + idx;
}
/*
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *this,
struct lock_class *links_to, struct list_head *head,
u16 distance, u8 dep,
const struct lock_trace *trace)
{
struct lock_list *entry;
/*
* Lock not present yet - get a new dependency struct and
* add it to the list:
*/
entry = alloc_list_entry();
if (!entry)
return 0;
entry->class = this;
entry->links_to = links_to;
entry->dep = dep;
entry->distance = distance;
entry->trace = trace;
/*
* Both allocation and removal are done under the graph lock; but
* iteration is under RCU-sched; see look_up_lock_class() and
* lockdep_free_key_range().
*/
list_add_tail_rcu(&entry->entry, head);
return 1;
}
/*
* For good efficiency of modular, we use power of 2
*/
#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
/*
* The circular_queue and helpers are used to implement graph
* breadth-first search (BFS) algorithm, by which we can determine
* whether there is a path from a lock to another. In deadlock checks,
* a path from the next lock to be acquired to a previous held lock
* indicates that adding the <prev> -> <next> lock dependency will
* produce a circle in the graph. Breadth-first search instead of
* depth-first search is used in order to find the shortest (circular)
* path.
*/
struct circular_queue {
struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
unsigned int front, rear;
};
static struct circular_queue lock_cq;
unsigned int max_bfs_queue_depth;
static unsigned int lockdep_dependency_gen_id;
static inline void __cq_init(struct circular_queue *cq)
{
cq->front = cq->rear = 0;
lockdep_dependency_gen_id++;
}
static inline int __cq_empty(struct circular_queue *cq)
{
return (cq->front == cq->rear);
}
static inline int __cq_full(struct circular_queue *cq)
{
return ((cq->rear + 1) & CQ_MASK) == cq->front;
}
static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
{
if (__cq_full(cq))
return -1;
cq->element[cq->rear] = elem;
cq->rear = (cq->rear + 1) & CQ_MASK;
return 0;
}
/*
* Dequeue an element from the circular_queue, return a lock_list if
* the queue is not empty, or NULL if otherwise.
*/
static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
{
struct lock_list * lock;
if (__cq_empty(cq))
return NULL;
lock = cq->element[cq->front];
cq->front = (cq->front + 1) & CQ_MASK;
return lock;
}
static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
{
return (cq->rear - cq->front) & CQ_MASK;
}
static inline void mark_lock_accessed(struct lock_list *lock)
{
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
static inline void visit_lock_entry(struct lock_list *lock,
struct lock_list *parent)
{
lock->parent = parent;
}
static inline unsigned long lock_accessed(struct lock_list *lock)
{
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
}
static inline struct lock_list *get_lock_parent(struct lock_list *child)
{
return child->parent;
}
static inline int get_lock_depth(struct lock_list *child)
{
int depth = 0;
struct lock_list *parent;
while ((parent = get_lock_parent(child))) {
child = parent;
depth++;
}
return depth;
}
/*
* Return the forward or backward dependency list.
*
* @lock: the lock_list to get its class's dependency list
* @offset: the offset to struct lock_class to determine whether it is
* locks_after or locks_before
*/
static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
{
void *lock_class = lock->class;
return lock_class + offset;
}
/*
* Return values of a bfs search:
*
* BFS_E* indicates an error
* BFS_R* indicates a result (match or not)
*
* BFS_EINVALIDNODE: Find a invalid node in the graph.
*
* BFS_EQUEUEFULL: The queue is full while doing the bfs.
*
* BFS_RMATCH: Find the matched node in the graph, and put that node into
* *@target_entry.
*
* BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
* _unchanged_.
*/
enum bfs_result {
BFS_EINVALIDNODE = -2,
BFS_EQUEUEFULL = -1,
BFS_RMATCH = 0,
BFS_RNOMATCH = 1,
};
/*
* bfs_result < 0 means error
*/
static inline bool bfs_error(enum bfs_result res)
{
return res < 0;
}
/*
* DEP_*_BIT in lock_list::dep
*
* For dependency @prev -> @next:
*
* SR: @prev is shared reader (->read != 0) and @next is recursive reader
* (->read == 2)
* ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
* SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
* EN: @prev is exclusive locker and @next is non-recursive locker
*
* Note that we define the value of DEP_*_BITs so that:
* bit0 is prev->read == 0
* bit1 is next->read != 2
*/
#define DEP_SR_BIT (0 + (0 << 1)) /* 0 */
#define DEP_ER_BIT (1 + (0 << 1)) /* 1 */
#define DEP_SN_BIT (0 + (1 << 1)) /* 2 */
#define DEP_EN_BIT (1 + (1 << 1)) /* 3 */
#define DEP_SR_MASK (1U << (DEP_SR_BIT))
#define DEP_ER_MASK (1U << (DEP_ER_BIT))
#define DEP_SN_MASK (1U << (DEP_SN_BIT))
#define DEP_EN_MASK (1U << (DEP_EN_BIT))
static inline unsigned int
__calc_dep_bit(struct held_lock *prev, struct held_lock *next)
{
return (prev->read == 0) + ((next->read != 2) << 1);
}
static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
{
return 1U << __calc_dep_bit(prev, next);
}
/*
* calculate the dep_bit for backwards edges. We care about whether @prev is
* shared and whether @next is recursive.
*/
static inline unsigned int
__calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
{
return (next->read != 2) + ((prev->read == 0) << 1);
}
static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
{
return 1U << __calc_dep_bitb(prev, next);
}
/*
* Initialize a lock_list entry @lock belonging to @class as the root for a BFS
* search.
*/
static inline void __bfs_init_root(struct lock_list *lock,
struct lock_class *class)
{
lock->class = class;
lock->parent = NULL;
lock->only_xr = 0;
}
/*
* Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
* root for a BFS search.
*
* ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
* that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
* and -(S*)->.
*/
static inline void bfs_init_root(struct lock_list *lock,
struct held_lock *hlock)
{
__bfs_init_root(lock, hlock_class(hlock));
lock->only_xr = (hlock->read == 2);
}
/*
* Similar to bfs_init_root() but initialize the root for backwards BFS.
*
* ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
* that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
* -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
*/
static inline void bfs_init_rootb(struct lock_list *lock,
struct held_lock *hlock)
{
__bfs_init_root(lock, hlock_class(hlock));
lock->only_xr = (hlock->read != 0);
}
static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
{
if (!lock || !lock->parent)
return NULL;
return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
&lock->entry, struct lock_list, entry);
}
/*
* Breadth-First Search to find a strong path in the dependency graph.
*
* @source_entry: the source of the path we are searching for.
* @data: data used for the second parameter of @match function
* @match: match function for the search
* @target_entry: pointer to the target of a matched path
* @offset: the offset to struct lock_class to determine whether it is
* locks_after or locks_before
*
* We may have multiple edges (considering different kinds of dependencies,
* e.g. ER and SN) between two nodes in the dependency graph. But
* only the strong dependency path in the graph is relevant to deadlocks. A
* strong dependency path is a dependency path that doesn't have two adjacent
* dependencies as -(*R)-> -(S*)->, please see:
*
* Documentation/locking/lockdep-design.rst
*
* for more explanation of the definition of strong dependency paths
*
* In __bfs(), we only traverse in the strong dependency path:
*
* In lock_list::only_xr, we record whether the previous dependency only
* has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
* filter out any -(S*)-> in the current dependency and after that, the
* ->only_xr is set according to whether we only have -(*R)-> left.
*/
static enum bfs_result __bfs(struct lock_list *source_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
int offset)
{
struct circular_queue *cq = &lock_cq;
struct lock_list *lock = NULL;
struct lock_list *entry;
struct list_head *head;
unsigned int cq_depth;
bool first;
lockdep_assert_locked();
__cq_init(cq);
__cq_enqueue(cq, source_entry);
while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
if (!lock->class)
return BFS_EINVALIDNODE;
/*
* Step 1: check whether we already finish on this one.
*
* If we have visited all the dependencies from this @lock to
* others (iow, if we have visited all lock_list entries in
* @lock->class->locks_{after,before}) we skip, otherwise go
* and visit all the dependencies in the list and mark this
* list accessed.
*/
if (lock_accessed(lock))
continue;
else
mark_lock_accessed(lock);
/*
* Step 2: check whether prev dependency and this form a strong
* dependency path.
*/
if (lock->parent) { /* Parent exists, check prev dependency */
u8 dep = lock->dep;
bool prev_only_xr = lock->parent->only_xr;
/*
* Mask out all -(S*)-> if we only have *R in previous
* step, because -(*R)-> -(S*)-> don't make up a strong
* dependency.
*/
if (prev_only_xr)
dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
/* If nothing left, we skip */
if (!dep)
continue;
/* If there are only -(*R)-> left, set that for the next step */
lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
}
/*
* Step 3: we haven't visited this and there is a strong
* dependency path to this, so check with @match.
* If @skip is provide and returns true, we skip this
* lock (and any path this lock is in).
*/
if (skip && skip(lock, data))
continue;
if (match(lock, data)) {
*target_entry = lock;
return BFS_RMATCH;
}
/*
* Step 4: if not match, expand the path by adding the
* forward or backwards dependencies in the search
*
*/
first = true;
head = get_dep_list(lock, offset);
list_for_each_entry_rcu(entry, head, entry) {
visit_lock_entry(entry, lock);
/*
* Note we only enqueue the first of the list into the
* queue, because we can always find a sibling
* dependency from one (see __bfs_next()), as a result
* the space of queue is saved.
*/
if (!first)
continue;
first = false;
if (__cq_enqueue(cq, entry))
return BFS_EQUEUEFULL;
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
return BFS_RNOMATCH;
}
static inline enum bfs_result
__bfs_forwards(struct lock_list *src_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, skip, target_entry,
offsetof(struct lock_class, locks_after));
}
static inline enum bfs_result
__bfs_backwards(struct lock_list *src_entry,
void *data,
bool (*match)(struct lock_list *entry, void *data),
bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, skip, target_entry,
offsetof(struct lock_class, locks_before));
}
static void print_lock_trace(const struct lock_trace *trace,
unsigned int spaces)
{
stack_trace_print(trace->entries, trace->nr_entries, spaces);
}
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
static noinline void
print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
return;
printk("\n-> #%u", depth);
print_lock_name(NULL, target->class);
printk(KERN_CONT ":\n");
print_lock_trace(target->trace, 6);
}
static void
print_circular_lock_scenario(struct held_lock *src,
struct held_lock *tgt,
struct lock_list *prt)
{
struct lock_class *source = hlock_class(src);
struct lock_class *target = hlock_class(tgt);
struct lock_class *parent = prt->class;
int src_read = src->read;
int tgt_read = tgt->read;
/*
* A direct locking problem where unsafe_class lock is taken
* directly by safe_class lock, then all we need to show
* is the deadlock scenario, as it is obvious that the
* unsafe lock is taken under the safe lock.
*
* But if there is a chain instead, where the safe lock takes
* an intermediate lock (middle_class) where this lock is
* not the same as the safe lock, then the lock chain is
* used to describe the problem. Otherwise we would need
* to show a different CPU case for each link in the chain
* from the safe_class lock to the unsafe_class lock.
*/
if (parent != source) {
printk("Chain exists of:\n ");
__print_lock_name(src, source);
printk(KERN_CONT " --> ");
__print_lock_name(NULL, parent);
printk(KERN_CONT " --> ");
__print_lock_name(tgt, target);
printk(KERN_CONT "\n\n");
}
printk(" Possible unsafe locking scenario:\n\n");
printk(" CPU0 CPU1\n");
printk(" ---- ----\n");
if (tgt_read != 0)
printk(" rlock(");
else
printk(" lock(");
__print_lock_name(tgt, target);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(NULL, parent);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(tgt, target);
printk(KERN_CONT ");\n");
if (src_read != 0)
printk(" rlock(");
else if (src->sync)
printk(" sync(");
else
printk(" lock(");
__print_lock_name(src, source);
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
/*
* When a circular dependency is detected, print the
* header first:
*/
static noinline void
print_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct held_lock *check_src,
struct held_lock *check_tgt)
{
struct task_struct *curr = current;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("======================================================\n");
pr_warn("WARNING: possible circular locking dependency detected\n");
print_kernel_ident();
pr_warn("------------------------------------------------------\n");
pr_warn("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(check_src);
pr_warn("\nbut task is already holding lock:\n");
print_lock(check_tgt);
pr_warn("\nwhich lock already depends on the new lock.\n\n");
pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
print_circular_bug_entry(entry, depth);
}
/*
* We are about to add A -> B into the dependency graph, and in __bfs() a
* strong dependency path A -> .. -> B is found: hlock_class equals
* entry->class.
*
* If A -> .. -> B can replace A -> B in any __bfs() search (means the former
* is _stronger_ than or equal to the latter), we consider A -> B as redundant.
* For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
* -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
* dependency graph, as any strong path ..-> A -> B ->.. we can get with
* having dependency A -> B, we could already get a equivalent path ..-> A ->
* .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
*
* We need to make sure both the start and the end of A -> .. -> B is not
* weaker than A -> B. For the start part, please see the comment in
* check_redundant(). For the end part, we need:
*
* Either
*
* a) A -> B is -(*R)-> (everything is not weaker than that)
*
* or
*
* b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
*
*/
static inline bool hlock_equal(struct lock_list *entry, void *data)
{
struct held_lock *hlock = (struct held_lock *)data;
return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
(hlock->read == 2 || /* A -> B is -(*R)-> */
!entry->only_xr); /* A -> .. -> B is -(*N)-> */
}
/*
* We are about to add B -> A into the dependency graph, and in __bfs() a
* strong dependency path A -> .. -> B is found: hlock_class equals
* entry->class.
*
* We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
* dependency cycle, that means:
*
* Either
*
* a) B -> A is -(E*)->
*
* or
*
* b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
*
* as then we don't have -(*R)-> -(S*)-> in the cycle.
*/
static inline bool hlock_conflict(struct lock_list *entry, void *data)
{
struct held_lock *hlock = (struct held_lock *)data;
return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
(hlock->read == 0 || /* B -> A is -(E*)-> */
!entry->only_xr); /* A -> .. -> B is -(*N)-> */
}
static noinline void print_circular_bug(struct lock_list *this,
struct lock_list *target,
struct held_lock *check_src,
struct held_lock *check_tgt)
{
struct task_struct *curr = current;
struct lock_list *parent;
struct lock_list *first_parent;
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
this->trace = save_trace();
if (!this->trace)
return;
depth = get_lock_depth(target);
print_circular_bug_header(target, depth, check_src, check_tgt);
parent = get_lock_parent(target);
first_parent = parent;
while (parent) {
print_circular_bug_entry(parent, --depth);
parent = get_lock_parent(parent);
}
printk("\nother info that might help us debug this:\n\n");
print_circular_lock_scenario(check_src, check_tgt,
first_parent);
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
}
static noinline void print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
return;
/*
* Breadth-first-search failed, graph got corrupted?
*/
WARN(1, "lockdep bfs error:%d\n", ret);
}
static bool noop_count(struct lock_list *entry, void *data)
{
(*(unsigned long *)data)++;
return false;
}
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
{
unsigned long count = 0;
struct lock_list *target_entry;
__bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
return count;
}
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
unsigned long ret, flags;
struct lock_list this;
__bfs_init_root(&this, class);
raw_local_irq_save(flags);
lockdep_lock();
ret = __lockdep_count_forward_deps(&this);
lockdep_unlock();
raw_local_irq_restore(flags);
return ret;
}
static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
{
unsigned long count = 0;
struct lock_list *target_entry;
__bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
return count;
}
unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
unsigned long ret, flags;
struct lock_list this;
__bfs_init_root(&this, class);
raw_local_irq_save(flags);
lockdep_lock();
ret = __lockdep_count_backward_deps(&this);
lockdep_unlock();
raw_local_irq_restore(flags);
return ret;
}
/*
* Check that the dependency graph starting at <src> can lead to
* <target> or not.
*/
static noinline enum bfs_result
check_path(struct held_lock *target, struct lock_list *src_entry,
bool (*match)(struct lock_list *entry, void *data),
bool (*skip)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
enum bfs_result ret;
ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
if (unlikely(bfs_error(ret)))
print_bfs_bug(ret);
return ret;
}
static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
/*
* Prove that the dependency graph starting at <src> can not
* lead to <target>. If it can, there is a circle when adding
* <target> -> <src> dependency.
*
* Print an error and return BFS_RMATCH if it does.
*/
static noinline enum bfs_result
check_noncircular(struct held_lock *src, struct held_lock *target,
struct lock_trace **const trace)
{
enum bfs_result ret;
struct lock_list *target_entry;
struct lock_list src_entry;
bfs_init_root(&src_entry, src);
debug_atomic_inc(nr_cyclic_checks);
ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
if (unlikely(ret == BFS_RMATCH)) {
if (!*trace) {
/*
* If save_trace fails here, the printing might
* trigger a WARN but because of the !nr_entries it
* should not do bad things.
*/
*trace = save_trace();
}
if (src->class_idx == target->class_idx)
print_deadlock_bug(current, src, target);
else
print_circular_bug(&src_entry, target_entry, src, target);
}
return ret;
}
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*
* A irq safe->unsafe deadlock happens with the following conditions:
*
* 1) We have a strong dependency path A -> ... -> B
*
* 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore
* irq can create a new dependency B -> A (consider the case that a holder
* of B gets interrupted by an irq whose handler will try to acquire A).
*
* 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
* strong circle:
*
* For the usage bits of B:
* a) if A -> B is -(*N)->, then B -> A could be any type, so any
* ENABLED_IRQ usage suffices.
* b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
* ENABLED_IRQ_*_READ usage suffices.
*
* For the usage bits of A:
* c) if A -> B is -(E*)->, then B -> A could be any type, so any
* USED_IN_IRQ usage suffices.
* d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
* USED_IN_IRQ_*_READ usage suffices.
*/
/*
* There is a strong dependency path in the dependency graph: A -> B, and now
* we need to decide which usage bit of A should be accumulated to detect
* safe->unsafe bugs.
*
* Note that usage_accumulate() is used in backwards search, so ->only_xr
* stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
*
* As above, if only_xr is false, which means A -> B has -(E*)-> dependency
* path, any usage of A should be considered. Otherwise, we should only
* consider _READ usage.
*/
static inline bool usage_accumulate(struct lock_list *entry, void *mask)
{
if (!entry->only_xr)
*(unsigned long *)mask |= entry->class->usage_mask;
else /* Mask out _READ usage bits */
*(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
return false;
}
/*
* There is a strong dependency path in the dependency graph: A -> B, and now
* we need to decide which usage bit of B conflicts with the usage bits of A,
* i.e. which usage bit of B may introduce safe->unsafe deadlocks.
*
* As above, if only_xr is false, which means A -> B has -(*N)-> dependency
* path, any usage of B should be considered. Otherwise, we should only
* consider _READ usage.
*/
static inline bool usage_match(struct lock_list *entry, void *mask)
{
if (!entry->only_xr)
return !!(entry->class->usage_mask & *(unsigned long *)mask);
else /* Mask out _READ usage bits */
return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
}
static inline bool usage_skip(struct lock_list *entry, void *mask)
{
if (entry->class->lock_type == LD_LOCK_NORMAL)
return false;
/*
* Skip local_lock() for irq inversion detection.
*
* For !RT, local_lock() is not a real lock, so it won't carry any
* dependency.
*
* For RT, an irq inversion happens when we have lock A and B, and on
* some CPU we can have:
*
* lock(A);
* <interrupted>
* lock(B);
*
* where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
*
* Now we prove local_lock() cannot exist in that dependency. First we
* have the observation for any lock chain L1 -> ... -> Ln, for any
* 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
* wait context check will complain. And since B is not a sleep lock,
* therefore B.inner_wait_type >= 2, and since the inner_wait_type of
* local_lock() is 3, which is greater than 2, therefore there is no
* way the local_lock() exists in the dependency B -> ... -> A.
*
* As a result, we will skip local_lock(), when we search for irq
* inversion bugs.
*/
if (entry->class->lock_type == LD_LOCK_PERCPU &&
DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
return false;
/*
* Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
* a lock and only used to override the wait_type.
*/
return true;
}
/*
* Find a node in the forwards-direction dependency sub-graph starting
* at @root->class that matches @bit.
*
* Return BFS_MATCH if such a node exists in the subgraph, and put that node
* into *@target_entry.
*/
static enum bfs_result
find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
enum bfs_result result;
debug_atomic_inc(nr_find_usage_forwards_checks);
result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
return result;
}
/*
* Find a node in the backwards-direction dependency sub-graph starting
* at @root->class that matches @bit.
*/
static enum bfs_result
find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
enum bfs_result result;
debug_atomic_inc(nr_find_usage_backwards_checks);
result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
return result;
}
static void print_lock_class_header(struct lock_class *class, int depth)
{
int bit;
printk("%*s->", depth, "");
print_lock_name(NULL, class);
#ifdef CONFIG_DEBUG_LOCKDEP
printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
#endif
printk(KERN_CONT " {\n");
for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
if (class->usage_mask & (1 << bit)) {
int len = depth;
len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(KERN_CONT " at:\n");
print_lock_trace(class->usage_traces[bit], len);
}
}
printk("%*s }\n", depth, "");
printk("%*s ... key at: [<%px>] %pS\n",
depth, "", class->key, class->key);
}
/*
* Dependency path printing:
*
* After BFS we get a lock dependency path (linked via ->parent of lock_list),
* printing out each lock in the dependency path will help on understanding how
* the deadlock could happen. Here are some details about dependency path
* printing:
*
* 1) A lock_list can be either forwards or backwards for a lock dependency,
* for a lock dependency A -> B, there are two lock_lists:
*
* a) lock_list in the ->locks_after list of A, whose ->class is B and
* ->links_to is A. In this case, we can say the lock_list is
* "A -> B" (forwards case).
*
* b) lock_list in the ->locks_before list of B, whose ->class is A
* and ->links_to is B. In this case, we can say the lock_list is
* "B <- A" (bacwards case).
*
* The ->trace of both a) and b) point to the call trace where B was
* acquired with A held.
*
* 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
* represent a certain lock dependency, it only provides an initial entry
* for BFS. For example, BFS may introduce a "helper" lock_list whose
* ->class is A, as a result BFS will search all dependencies starting with
* A, e.g. A -> B or A -> C.
*
* The notation of a forwards helper lock_list is like "-> A", which means
* we should search the forwards dependencies starting with "A", e.g A -> B
* or A -> C.
*
* The notation of a bacwards helper lock_list is like "<- B", which means
* we should search the backwards dependencies ending with "B", e.g.
* B <- A or B <- C.
*/
/*
* printk the shortest lock dependencies from @root to @leaf in reverse order.
*
* We have a lock dependency path as follow:
*
* @root @leaf
* | |
* V V
* ->parent ->parent
* | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
* | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
*
* , so it's natural that we start from @leaf and print every ->class and
* ->trace until we reach the @root.
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
struct lock_list *root)
{
struct lock_list *entry = leaf;
int depth;
/*compute depth from generated tree by BFS*/
depth = get_lock_depth(leaf);
do {
print_lock_class_header(entry->class, depth);
printk("%*s ... acquired at:\n", depth, "");
print_lock_trace(entry->trace, 2);
printk("\n");
if (depth == 0 && (entry != root)) {
printk("lockdep:%s bad path found in chain graph\n", __func__);
break;
}
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
}
/*
* printk the shortest lock dependencies from @leaf to @root.
*
* We have a lock dependency path (from a backwards search) as follow:
*
* @leaf @root
* | |
* V V
* ->parent ->parent
* | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
* | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
*
* , so when we iterate from @leaf to @root, we actually print the lock
* dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
*
* Another thing to notice here is that ->class of L2 <- L1 is L1, while the
* ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
* trace of L1 in the dependency path, which is alright, because most of the
* time we can figure out where L1 is held from the call trace of L2.
*/
static void __used
print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
struct lock_list *root)
{
struct lock_list *entry = leaf;
const struct lock_trace *trace = NULL;
int depth;
/*compute depth from generated tree by BFS*/
depth = get_lock_depth(leaf);
do {
print_lock_class_header(entry->class, depth);
if (trace) {
printk("%*s ... acquired at:\n", depth, "");
print_lock_trace(trace, 2);
printk("\n");
}
/*
* Record the pointer to the trace for the next lock_list
* entry, see the comments for the function.
*/
trace = entry->trace;
if (depth == 0 && (entry != root)) {
printk("lockdep:%s bad path found in chain graph\n", __func__);
break;
}
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
}
static void
print_irq_lock_scenario(struct lock_list *safe_entry,
struct lock_list *unsafe_entry,
struct lock_class *prev_class,
struct lock_class *next_class)
{
struct lock_class *safe_class = safe_entry->class;
struct lock_class *unsafe_class = unsafe_entry->class;
struct lock_class *middle_class = prev_class;
if (middle_class == safe_class)
middle_class = next_class;
/*
* A direct locking problem where unsafe_class lock is taken
* directly by safe_class lock, then all we need to show
* is the deadlock scenario, as it is obvious that the
* unsafe lock is taken under the safe lock.
*
* But if there is a chain instead, where the safe lock takes
* an intermediate lock (middle_class) where this lock is
* not the same as the safe lock, then the lock chain is
* used to describe the problem. Otherwise we would need
* to show a different CPU case for each link in the chain
* from the safe_class lock to the unsafe_class lock.
*/
if (middle_class != unsafe_class) {
printk("Chain exists of:\n ");
__print_lock_name(NULL, safe_class);
printk(KERN_CONT " --> ");
__print_lock_name(NULL, middle_class);
printk(KERN_CONT " --> ");
__print_lock_name(NULL, unsafe_class);
printk(KERN_CONT "\n\n");
}
printk(" Possible interrupt unsafe locking scenario:\n\n");
printk(" CPU0 CPU1\n");
printk(" ---- ----\n");
printk(" lock(");
__print_lock_name(NULL, unsafe_class);
printk(KERN_CONT ");\n");
printk(" local_irq_disable();\n");
printk(" lock(");
__print_lock_name(NULL, safe_class);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(NULL, middle_class);
printk(KERN_CONT ");\n");
printk(" <Interrupt>\n");
printk(" lock(");
__print_lock_name(NULL, safe_class);
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
static void
print_bad_irq_dependency(struct task_struct *curr,
struct lock_list *prev_root,
struct lock_list *next_root,
struct lock_list *backwards_entry,
struct lock_list *forwards_entry,
struct held_lock *prev,
struct held_lock *next,
enum lock_usage_bit bit1,
enum lock_usage_bit bit2,
const char *irqclass)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
pr_warn("\n");
pr_warn("=====================================================\n");
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
irqclass, irqclass);
print_kernel_ident();
pr_warn("-----------------------------------------------------\n");
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
lockdep_hardirqs_enabled(),
curr->softirqs_enabled);
print_lock(next);
pr_warn("\nand this task is already holding:\n");
print_lock(prev);
pr_warn("which would create a new lock dependency:\n");
print_lock_name(prev, hlock_class(prev));
pr_cont(" ->");
print_lock_name(next, hlock_class(next));
pr_cont("\n");
pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
print_lock_name(NULL, backwards_entry->class);
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
print_lock_name(NULL, forwards_entry->class);
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
pr_warn("...");
print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
pr_warn("\nother info that might help us debug this:\n\n");
print_irq_lock_scenario(backwards_entry, forwards_entry,
hlock_class(prev), hlock_class(next));
lockdep_print_held_locks(curr);
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
next_root->trace = save_trace();
if (!next_root->trace)
return;
print_shortest_lock_dependencies(forwards_entry, next_root);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static const char *state_names[] = {
#define LOCKDEP_STATE(__STATE) \
__stringify(__STATE),
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};
static const char *state_rnames[] = {
#define LOCKDEP_STATE(__STATE) \
__stringify(__STATE)"-READ",
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};
static inline const char *state_name(enum lock_usage_bit bit)
{
if (bit & LOCK_USAGE_READ_MASK)
return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
else
return state_names[bit >> LOCK_USAGE_DIR_MASK];
}
/*
* The bit number is encoded like:
*
* bit0: 0 exclusive, 1 read lock
* bit1: 0 used in irq, 1 irq enabled
* bit2-n: state
*/
static int exclusive_bit(int new_bit)
{
int state = new_bit & LOCK_USAGE_STATE_MASK;
int dir = new_bit & LOCK_USAGE_DIR_MASK;
/*
* keep state, bit flip the direction and strip read.
*/
return state | (dir ^ LOCK_USAGE_DIR_MASK);
}
/*
* Observe that when given a bitmask where each bitnr is encoded as above, a
* right shift of the mask transforms the individual bitnrs as -1 and
* conversely, a left shift transforms into +1 for the individual bitnrs.
*
* So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
* create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
* instead by subtracting the bit number by 2, or shifting the mask right by 2.
*
* Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
*
* So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
* all bits set) and recompose with bitnr1 flipped.
*/
static unsigned long invert_dir_mask(unsigned long mask)
{
unsigned long excl = 0;
/* Invert dir */
excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
return excl;
}
/*
* Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ
* usage may cause deadlock too, for example:
*
* P1 P2
* <irq disabled>
* write_lock(l1); <irq enabled>
* read_lock(l2);
* write_lock(l2);
* <in irq>
* read_lock(l1);
*
* , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2
* will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
* deadlock.
*
* In fact, all of the following cases may cause deadlocks:
*
* LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
* LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
* LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
* LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
*
* As a result, to calculate the "exclusive mask", first we invert the
* direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with
* bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all
* bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*).
*/
static unsigned long exclusive_mask(unsigned long mask)
{
unsigned long excl = invert_dir_mask(mask);
excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
return excl;
}
/*
* Retrieve the _possible_ original mask to which @mask is
* exclusive. Ie: this is the opposite of exclusive_mask().
* Note that 2 possible original bits can match an exclusive
* bit: one has LOCK_USAGE_READ_MASK set, the other has it
* cleared. So both are returned for each exclusive bit.
*/
static unsigned long original_mask(unsigned long mask)
{
unsigned long excl = invert_dir_mask(mask);
/* Include read in existing usages */
excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
return excl;
}
/*
* Find the first pair of bit match between an original
* usage mask and an exclusive usage mask.
*/
static int find_exclusive_match(unsigned long mask,
unsigned long excl_mask,
enum lock_usage_bit *bitp,
enum lock_usage_bit *excl_bitp)
{
int bit, excl, excl_read;
for_each_set_bit(bit, &mask, LOCK_USED) {
/*
* exclusive_bit() strips the read bit, however,
* LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need
* to search excl | LOCK_USAGE_READ_MASK as well.
*/
excl = exclusive_bit(bit);
excl_read = excl | LOCK_USAGE_READ_MASK;
if (excl_mask & lock_flag(excl)) {
*bitp = bit;
*excl_bitp = excl;
return 0;
} else if (excl_mask & lock_flag(excl_read)) {
*bitp = bit;
*excl_bitp = excl_read;
return 0;
}
}
return -1;
}
/*
* Prove that the new dependency does not connect a hardirq-safe(-read)
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
unsigned long usage_mask = 0, forward_mask, backward_mask;
enum lock_usage_bit forward_bit = 0, backward_bit = 0;
struct lock_list *target_entry1;
struct lock_list *target_entry;
struct lock_list this, that;
enum bfs_result ret;
/*
* Step 1: gather all hard/soft IRQs usages backward in an
* accumulated usage mask.
*/
bfs_init_rootb(&this, prev);
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
}
usage_mask &= LOCKF_USED_IN_IRQ_ALL;
if (!usage_mask)
return 1;
/*
* Step 2: find exclusive uses forward that match the previous
* backward accumulated mask.
*/
forward_mask = exclusive_mask(usage_mask);
bfs_init_root(&that, next);
ret = find_usage_forwards(&that, forward_mask, &target_entry1);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
}
if (ret == BFS_RNOMATCH)
return 1;
/*
* Step 3: we found a bad match! Now retrieve a lock from the backward
* list whose usage mask matches the exclusive usage mask from the
* lock found on the forward list.
*
* Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
* the follow case:
*
* When trying to add A -> B to the graph, we find that there is a
* hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
* that B -> ... -> M. However M is **softirq-safe**, if we use exact
* invert bits of M's usage_mask, we will find another lock N that is
* **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
* cause a inversion deadlock.
*/
backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
}
if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
return 1;
/*
* Step 4: narrow down to a pair of incompatible usage bits
* and report it.
*/
ret = find_exclusive_match(target_entry->class->usage_mask,
target_entry1->class->usage_mask,
&backward_bit, &forward_bit);
if (DEBUG_LOCKS_WARN_ON(ret == -1))
return 1;
print_bad_irq_dependency(curr, &this, &that,
target_entry, target_entry1,
prev, next,
backward_bit, forward_bit,
state_name(backward_bit));
return 0;
}
#else
static inline int check_irq_usage(struct task_struct *curr,
struct held_lock *prev, struct held_lock *next)
{
return 1;
}
static inline bool usage_skip(struct lock_list *entry, void *mask)
{
return false;
}
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_LOCKDEP_SMALL
/*
* Check that the dependency graph starting at <src> can lead to
* <target> or not. If it can, <src> -> <target> dependency is already
* in the graph.
*
* Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
* any error appears in the bfs search.
*/
static noinline enum bfs_result
check_redundant(struct held_lock *src, struct held_lock *target)
{
enum bfs_result ret;
struct lock_list *target_entry;
struct lock_list src_entry;
bfs_init_root(&src_entry, src);
/*
* Special setup for check_redundant().
*
* To report redundant, we need to find a strong dependency path that
* is equal to or stronger than <src> -> <target>. So if <src> is E,
* we need to let __bfs() only search for a path starting at a -(E*)->,
* we achieve this by setting the initial node's ->only_xr to true in
* that case. And if <prev> is S, we set initial ->only_xr to false
* because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
*/
src_entry.only_xr = src->read == 0;
debug_atomic_inc(nr_redundant_checks);
/*
* Note: we skip local_lock() for redundant check, because as the
* comment in usage_skip(), A -> local_lock() -> B and A -> B are not
* the same.
*/
ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
if (ret == BFS_RMATCH)
debug_atomic_inc(nr_redundant);
return ret;
}
#else
static inline enum bfs_result
check_redundant(struct held_lock *src, struct held_lock *target)
{
return BFS_RNOMATCH;
}
#endif
static void inc_chains(int irq_context)
{
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains++;
else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
nr_softirq_chains++;
else
nr_process_chains++;
}
static void dec_chains(int irq_context)
{
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains--;
else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
nr_softirq_chains--;
else
nr_process_chains--;
}
static void
print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
{
struct lock_class *next = hlock_class(nxt);
struct lock_class *prev = hlock_class(prv);
printk(" Possible unsafe locking scenario:\n\n");
printk(" CPU0\n");
printk(" ----\n");
printk(" lock(");
__print_lock_name(prv, prev);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(nxt, next);
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
printk(" May be due to missing lock nesting notation\n\n");
}
static void
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
struct lock_class *class = hlock_class(prev);
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
pr_warn("\n");
pr_warn("============================================\n");
pr_warn("WARNING: possible recursive locking detected\n");
print_kernel_ident();
pr_warn("--------------------------------------------\n");
pr_warn("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(next);
pr_warn("\nbut task is already holding lock:\n");
print_lock(prev);
if (class->cmp_fn) {
pr_warn("and the lock comparison function returns %i:\n",
class->cmp_fn(prev->instance, next->instance));
}
pr_warn("\nother info that might help us debug this:\n");
print_deadlock_scenario(next, prev);
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
/*
* Check whether we are holding such a class already.
*
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
* Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
* lock class is held but nest_lock is also held, i.e. we rely on the
* nest_lock to avoid the deadlock.
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next)
{
struct lock_class *class;
struct held_lock *prev;
struct held_lock *nest = NULL;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
if (prev->instance == next->nest_lock)
nest = prev;
if (hlock_class(prev) != hlock_class(next))
continue;
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((next->read == 2) && prev->read)
continue;
class = hlock_class(prev);
if (class->cmp_fn &&
class->cmp_fn(prev->instance, next->instance) < 0)
continue;
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
*/
if (nest)
return 2;
print_deadlock_bug(curr, prev, next);
return 0;
}
return 1;
}
/*
* There was a chain-cache miss, and we are about to add a new dependency
* to a previous lock. We validate the following rules:
*
* - would the adding of the <prev> -> <next> dependency create a
* circular dependency in the graph? [== circular deadlock]
*
* - does the new prev->next dependency connect any hardirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* hardirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with hardirq contexts]
*
* - does the new prev->next dependency connect any softirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* softirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with softirq contexts]
*
* any of these scenarios could lead to a deadlock.
*
* Then if all the validations pass, we add the forwards and backwards
* dependency.
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, u16 distance,
struct lock_trace **const trace)
{
struct lock_list *entry;
enum bfs_result ret;
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
/*
* The warning statements below may trigger a use-after-free
* of the class name. It is better to trigger a use-after free
* and to have the class name most of the time instead of not
* having the class name available.
*/
WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
"Detected use-after-free of lock class %px/%s\n",
hlock_class(prev),
hlock_class(prev)->name);
WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
"Detected use-after-free of lock class %px/%s\n",
hlock_class(next),
hlock_class(next)->name);
return 2;
}
if (prev->class_idx == next->class_idx) {
struct lock_class *class = hlock_class(prev);
if (class->cmp_fn &&
class->cmp_fn(prev->instance, next->instance) < 0)
return 2;
}
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
* a breadth-first search into the graph starting at <next>,
* and check whether we can reach <prev>.)
*
* The search is limited by the size of the circular queue (i.e.,
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
ret = check_noncircular(next, prev, trace);
if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
return 0;
if (!check_irq_usage(curr, prev, next))
return 0;
/*
* Is the <prev> -> <next> dependency already present?
*
* (this may occur even though this is a new chain: consider
* e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
if (entry->class == hlock_class(next)) {
if (distance == 1)
entry->distance = 1;
entry->dep |= calc_dep(prev, next);
/*
* Also, update the reverse dependency in @next's
* ->locks_before list.
*
* Here we reuse @entry as the cursor, which is fine
* because we won't go to the next iteration of the
* outer loop:
*
* For normal cases, we return in the inner loop.
*
* If we fail to return, we have inconsistency, i.e.
* <prev>::locks_after contains <next> while
* <next>::locks_before doesn't contain <prev>. In
* that case, we return after the inner and indicate
* something is wrong.
*/
list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
if (entry->class == hlock_class(prev)) {
if (distance == 1)
entry->distance = 1;
entry->dep |= calc_depb(prev, next);
return 1;
}
}
/* <prev> is not found in <next>::locks_before */
return 0;
}
}
/*
* Is the <prev> -> <next> link redundant?
*/
ret = check_redundant(prev, next);
if (bfs_error(ret))
return 0;
else if (ret == BFS_RMATCH)
return 2;
if (!*trace) {
*trace = save_trace();
if (!*trace)
return 0;
}
/*
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(prev)->locks_after, distance,
calc_dep(prev, next), *trace);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(next)->locks_before, distance,
calc_depb(prev, next), *trace);
if (!ret)
return 0;
return 2;
}
/*
* Add the dependency to all directly-previous locks that are 'relevant'.
* The ones that are relevant are (in increasing distance from curr):
* all consecutive trylock entries and the final non-trylock entry - or
* the end of this context's lock-chain - whichever comes first.
*/
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
struct lock_trace *trace = NULL;
int depth = curr->lockdep_depth;
struct held_lock *hlock;
/*
* Debugging checks.
*
* Depth must not be zero for a non-head lock:
*/
if (!depth)
goto out_bug;
/*
* At least two relevant locks must exist for this
* to be a head:
*/
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
goto out_bug;
for (;;) {
u16 distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth - 1;
if (hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance, &trace);
if (!ret)
return 0;
/*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
* own direct dependencies already, so this
* lock is connected to them indirectly:
*/
if (!hlock->trylock)
break;
}
depth--;
/*
* End of lock-stack?
*/
if (!depth)
break;
/*
* Stop the search if we cross into another context:
*/
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
}
return 1;
out_bug:
if (!debug_locks_off_graph_unlock())
return 0;
/*
* Clearly we all shouldn't be here, but since we made it we
* can reliable say we messed up our state. See the above two
* gotos for reasons why we could possibly end up here.
*/
WARN_ON(1);
return 0;
}
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
unsigned long nr_zapped_lock_chains;
unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */
unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */
unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */
/*
* The first 2 chain_hlocks entries in the chain block in the bucket
* list contains the following meta data:
*
* entry[0]:
* Bit 15 - always set to 1 (it is not a class index)
* Bits 0-14 - upper 15 bits of the next block index
* entry[1] - lower 16 bits of next block index
*
* A next block index of all 1 bits means it is the end of the list.
*
* On the unsized bucket (bucket-0), the 3rd and 4th entries contain
* the chain block size:
*
* entry[2] - upper 16 bits of the chain block size
* entry[3] - lower 16 bits of the chain block size
*/
#define MAX_CHAIN_BUCKETS 16
#define CHAIN_BLK_FLAG (1U << 15)
#define CHAIN_BLK_LIST_END 0xFFFFU
static int chain_block_buckets[MAX_CHAIN_BUCKETS];
static inline int size_to_bucket(int size)
{
if (size > MAX_CHAIN_BUCKETS)
return 0;
return size - 1;
}
/*
* Iterate all the chain blocks in a bucket.
*/
#define for_each_chain_block(bucket, prev, curr) \
for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
(curr) >= 0; \
(prev) = (curr), (curr) = chain_block_next(curr))
/*
* next block or -1
*/
static inline int chain_block_next(int offset)
{
int next = chain_hlocks[offset];
WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
if (next == CHAIN_BLK_LIST_END)
return -1;
next &= ~CHAIN_BLK_FLAG;
next <<= 16;
next |= chain_hlocks[offset + 1];
return next;
}
/*
* bucket-0 only
*/
static inline int chain_block_size(int offset)
{
return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
}
static inline void init_chain_block(int offset, int next, int bucket, int size)
{
chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
chain_hlocks[offset + 1] = (u16)next;
if (size && !bucket) {
chain_hlocks[offset + 2] = size >> 16;
chain_hlocks[offset + 3] = (u16)size;
}
}
static inline void add_chain_block(int offset, int size)
{
int bucket = size_to_bucket(size);
int next = chain_block_buckets[bucket];
int prev, curr;
if (unlikely(size < 2)) {
/*
* We can't store single entries on the freelist. Leak them.
*
* One possible way out would be to uniquely mark them, other
* than with CHAIN_BLK_FLAG, such that we can recover them when
* the block before it is re-added.
*/
if (size)
nr_lost_chain_hlocks++;
return;
}
nr_free_chain_hlocks += size;
if (!bucket) {
nr_large_chain_blocks++;
/*
* Variable sized, sort large to small.
*/
for_each_chain_block(0, prev, curr) {
if (size >= chain_block_size(curr))
break;
}
init_chain_block(offset, curr, 0, size);
if (prev < 0)
chain_block_buckets[0] = offset;
else
init_chain_block(prev, offset, 0, 0);
return;
}
/*
* Fixed size, add to head.
*/
init_chain_block(offset, next, bucket, size);
chain_block_buckets[bucket] = offset;
}
/*
* Only the first block in the list can be deleted.
*
* For the variable size bucket[0], the first block (the largest one) is
* returned, broken up and put back into the pool. So if a chain block of
* length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
* queued up after the primordial chain block and never be used until the
* hlock entries in the primordial chain block is almost used up. That
* causes fragmentation and reduce allocation efficiency. That can be
* monitored by looking at the "large chain blocks" number in lockdep_stats.
*/
static inline void del_chain_block(int bucket, int size, int next)
{
nr_free_chain_hlocks -= size;
chain_block_buckets[bucket] = next;
if (!bucket)
nr_large_chain_blocks--;
}
static void init_chain_block_buckets(void)
{
int i;
for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
chain_block_buckets[i] = -1;
add_chain_block(0, ARRAY_SIZE(chain_hlocks));
}
/*
* Return offset of a chain block of the right size or -1 if not found.
*
* Fairly simple worst-fit allocator with the addition of a number of size
* specific free lists.
*/
static int alloc_chain_hlocks(int req)
{
int bucket, curr, size;
/*
* We rely on the MSB to act as an escape bit to denote freelist
* pointers. Make sure this bit isn't set in 'normal' class_idx usage.
*/
BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
init_data_structures_once();
if (nr_free_chain_hlocks < req)
return -1;
/*
* We require a minimum of 2 (u16) entries to encode a freelist
* 'pointer'.
*/
req = max(req, 2);
bucket = size_to_bucket(req);
curr = chain_block_buckets[bucket];
if (bucket) {
if (curr >= 0) {
del_chain_block(bucket, req, chain_block_next(curr));
return curr;
}
/* Try bucket 0 */
curr = chain_block_buckets[0];
}
/*
* The variable sized freelist is sorted by size; the first entry is
* the largest. Use it if it fits.
*/
if (curr >= 0) {
size = chain_block_size(curr);
if (likely(size >= req)) {
del_chain_block(0, size, chain_block_next(curr));
add_chain_block(curr + req, size - req);
return curr;
}
}
/*
* Last resort, split a block in a larger sized bucket.
*/
for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
bucket = size_to_bucket(size);
curr = chain_block_buckets[bucket];
if (curr < 0)
continue;
del_chain_block(bucket, size, chain_block_next(curr));
add_chain_block(curr + req, size - req);
return curr;
}
return -1;
}
static inline void free_chain_hlocks(int base, int size)
{
add_chain_block(base, max(size, 2));
}
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
{
u16 chain_hlock = chain_hlocks[chain->base + i];
unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
return lock_classes + class_idx;
}
/*
* Returns the index of the first held_lock of the current chain
*/
static inline int get_first_held_lock(struct task_struct *curr,
struct held_lock *hlock)
{
int i;
struct held_lock *hlock_curr;
for (i = curr->lockdep_depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock->irq_context)
break;
}
return ++i;
}
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Returns the next chain_key iteration
*/
static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
{
u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
printk(" hlock_id:%d -> chain_key:%016Lx",
(unsigned int)hlock_id,
(unsigned long long)new_chain_key);
return new_chain_key;
}
static void
print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
{
struct held_lock *hlock;
u64 chain_key = INITIAL_CHAIN_KEY;
int depth = curr->lockdep_depth;
int i = get_first_held_lock(curr, hlock_next);
printk("depth: %u (irq_context %u)\n", depth - i + 1,
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
print_lock(hlock);
}
print_chain_key_iteration(hlock_id(hlock_next), chain_key);
print_lock(hlock_next);
}
static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
u16 hlock_id;
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
hlock_id = chain_hlocks[chain->base + i];
chain_key = print_chain_key_iteration(hlock_id, chain_key);
print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id));
printk("\n");
}
}
static void print_collision(struct task_struct *curr,
struct held_lock *hlock_next,
struct lock_chain *chain)
{
pr_warn("\n");
pr_warn("============================\n");
pr_warn("WARNING: chain_key collision\n");
print_kernel_ident();
pr_warn("----------------------------\n");
pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
pr_warn("Hash chain already cached but the contents don't match!\n");
pr_warn("Held locks:");
print_chain_keys_held_locks(curr, hlock_next);
pr_warn("Locks in cached chain:");
print_chain_keys_chain(chain);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
#endif
/*
* Checks whether the chain and the current held locks are consistent
* in depth and also in content. If they are not it most likely means
* that there was a collision during the calculation of the chain_key.
* Returns: 0 not passed, 1 passed
*/
static int check_no_collision(struct task_struct *curr,
struct held_lock *hlock,
struct lock_chain *chain)
{
#ifdef CONFIG_DEBUG_LOCKDEP
int i, j, id;
i = get_first_held_lock(curr, hlock);
if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
print_collision(curr, hlock, chain);
return 0;
}
for (j = 0; j < chain->depth - 1; j++, i++) {
id = hlock_id(&curr->held_locks[i]);
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
print_collision(curr, hlock, chain);
return 0;
}
}
#endif
return 1;
}
/*
* Given an index that is >= -1, return the index of the next lock chain.
* Return -2 if there is no next lock chain.
*/
long lockdep_next_lockchain(long i)
{
i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
return i < ARRAY_SIZE(lock_chains) ? i : -2;
}
unsigned long lock_chain_count(void)
{
return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
}
/* Must be called with the graph lock held. */
static struct lock_chain *alloc_lock_chain(void)
{
int idx = find_first_zero_bit(lock_chains_in_use,
ARRAY_SIZE(lock_chains));
if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
return NULL;
__set_bit(idx, lock_chains_in_use);
return lock_chains + idx;
}
/*
* Adds a dependency chain into chain hashtable. And must be called with
* graph_lock held.
*
* Return 0 if fail, and graph_lock is released.
* Return 1 if succeed, with graph_lock held.
*/
static inline int add_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
int i, j;
/*
* The caller must hold the graph lock, ensure we've got IRQs
* disabled to make this an IRQ-safe lock.. for recursion reasons
* lockdep won't complain about its own locking errors.
*/
if (lockdep_assert_locked())
return 0;
chain = alloc_lock_chain();
if (!chain) {
if (!debug_locks_off_graph_unlock())
return 0;
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
return 0;
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
chain->depth = curr->lockdep_depth + 1 - i;
BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
j = alloc_chain_hlocks(chain->depth);
if (j < 0) {
if (!debug_locks_off_graph_unlock())
return 0;
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
dump_stack();
return 0;
}
chain->base = j;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = hlock_id(curr->held_locks + i);
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = hlock_id(hlock);
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains(chain->irq_context);
return 1;
}
/*
* Look up a dependency chain. Must be called with either the graph lock or
* the RCU read lock held.
*/
static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
{
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (READ_ONCE(chain->chain_key) == chain_key) {
debug_atomic_inc(chain_lookup_hits);
return chain;
}
}
return NULL;
}
/*
* If the key is not present yet in dependency chain cache then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
static inline int lookup_chain_cache_add(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
if (chain) {
cache_hit:
if (!check_no_collision(curr, hlock, chain))
return 0;
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
}
return 0;
}
if (very_verbose(class)) {
printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key, class->key, class->name);
}
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
chain = lookup_chain_cache(chain_key);
if (chain) {
graph_unlock();
goto cache_hit;
}
if (!add_chain_cache(curr, hlock, chain_key))
return 0;
return 1;
}
static int validate_chain(struct task_struct *curr,
struct held_lock *hlock,
int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
* does not add new dependencies, because trylock can be done
* in any order.
*
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache_add() return with 1 it acquires
* graph_lock for us)
*/
if (!hlock->trylock && hlock->check &&
lookup_chain_cache_add(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
*
* - is irq-safe, if this lock is irq-unsafe
* - is softirq-safe, if this lock is hardirq-unsafe
*
* And check whether the new lock's dependency graph
* could lead back to the previous lock:
*
* - within the current held-lock stack
* - across our accumulated lock dependency records
*
* any of these scenarios could lead to a deadlock.
*/
/*
* The simple case: does the current hold the same lock
* already?
*/
int ret = check_deadlock(curr, hlock);
if (!ret)
return 0;
/*
* Add dependency only if this lock is not the head
* of the chain, and if the new lock introduces no more
* lock dependency (because we already hold a lock with the
* same lock class) nor deadlock (because the nest_lock
* serializes nesting locks), see the comments for
* check_deadlock().
*/
if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock))
return 0;
}
graph_unlock();
} else {
/* after lookup_chain_cache_add(): */
if (unlikely(!debug_locks))
return 0;
}
return 1;
}
#else
static inline int validate_chain(struct task_struct *curr,
struct held_lock *hlock,
int chain_head, u64 chain_key)
{
return 1;
}
static void init_chain_block_buckets(void) { }
#endif /* CONFIG_PROVE_LOCKING */
/*
* We are building curr_chain_key incrementally, so double-check
* it from scratch, to make sure that it's done correctly:
*/
static void check_chain_key(struct task_struct *curr)
{
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i;
u64 chain_key = INITIAL_CHAIN_KEY;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
/*
* We got mighty confused, our chain keys don't match
* with what we expect, someone trample on our task state?
*/
WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)hlock->prev_chain_key);
return;
}
/*
* hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
* it registered lock class index?
*/
if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
return;
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = INITIAL_CHAIN_KEY;
chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
/*
* More smoking hash instead of calculating it, damn see these
* numbers float.. I bet that a pink elephant stepped on my memory.
*/
WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)curr->curr_chain_key);
}
#endif
}
#ifdef CONFIG_PROVE_LOCKING
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
static void print_usage_bug_scenario(struct held_lock *lock)
{
struct lock_class *class = hlock_class(lock);
printk(" Possible unsafe locking scenario:\n\n");
printk(" CPU0\n");
printk(" ----\n");
printk(" lock(");
__print_lock_name(lock, class);
printk(KERN_CONT ");\n");
printk(" <Interrupt>\n");
printk(" lock(");
__print_lock_name(lock, class);
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
static void
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
if (!debug_locks_off() || debug_locks_silent)
return;
pr_warn("\n");
pr_warn("================================\n");
pr_warn("WARNING: inconsistent lock state\n");
print_kernel_ident();
pr_warn("--------------------------------\n");
pr_warn("inconsistent {%s} -> {%s} usage.\n",
usage_str[prev_bit], usage_str[new_bit]);
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
curr->comm, task_pid_nr(curr),
lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
lockdep_hardirqs_enabled(),
lockdep_softirqs_enabled(curr));
print_lock(this);
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
print_irqtrace_events(curr);
pr_warn("\nother info that might help us debug this:\n");
print_usage_bug_scenario(this);
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
/*
* Print out an error if an invalid bit is set:
*/
static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
graph_unlock();
print_usage_bug(curr, this, bad_bit, new_bit);
return 0;
}
return 1;
}
/*
* print irq inversion bug:
*/
static void
print_irq_inversion_bug(struct task_struct *curr,
struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
struct lock_list *entry = other;
struct lock_list *middle = NULL;
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
pr_warn("\n");
pr_warn("========================================================\n");
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
print_kernel_ident();
pr_warn("--------------------------------------------------------\n");
pr_warn("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(this);
if (forwards)
pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
print_lock_name(NULL, other->class);
pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
pr_warn("\nother info that might help us debug this:\n");
/* Find a middle lock (if one exists) */
depth = get_lock_depth(other);
do {
if (depth == 0 && (entry != root)) {
pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
break;
}
middle = entry;
entry = get_lock_parent(entry);
depth--;
} while (entry && entry != root && (depth >= 0));
if (forwards)
print_irq_lock_scenario(root, other,
middle ? middle->class : root->class, other->class);
else
print_irq_lock_scenario(other, root,
middle ? middle->class : other->class, root->class);
lockdep_print_held_locks(curr);
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
root->trace = save_trace();
if (!root->trace)
return;
print_shortest_lock_dependencies(other, root);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
/*
* Prove that in the forwards-direction subgraph starting at <this>
* there is no lock matching <mask>:
*/
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit)
{
enum bfs_result ret;
struct lock_list root;
struct lock_list *target_entry;
enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
bfs_init_root(&root, this);
ret = find_usage_forwards(&root, usage_mask, &target_entry);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
}
if (ret == BFS_RNOMATCH)
return 1;
/* Check whether write or read usage is the match */
if (target_entry->class->usage_mask & lock_flag(bit)) {
print_irq_inversion_bug(curr, &root, target_entry,
this, 1, state_name(bit));
} else {
print_irq_inversion_bug(curr, &root, target_entry,
this, 1, state_name(read_bit));
}
return 0;
}
/*
* Prove that in the backwards-direction subgraph starting at <this>
* there is no lock matching <mask>:
*/
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit)
{
enum bfs_result ret;
struct lock_list root;
struct lock_list *target_entry;
enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
bfs_init_rootb(&root, this);
ret = find_usage_backwards(&root, usage_mask, &target_entry);
if (bfs_error(ret)) {
print_bfs_bug(ret);
return 0;
}
if (ret == BFS_RNOMATCH)
return 1;
/* Check whether write or read usage is the match */
if (target_entry->class->usage_mask & lock_flag(bit)) {
print_irq_inversion_bug(curr, &root, target_entry,
this, 0, state_name(bit));
} else {
print_irq_inversion_bug(curr, &root, target_entry,
this, 0, state_name(read_bit));
}
return 0;
}
void print_irqtrace_events(struct task_struct *curr)
{
const struct irqtrace_events *trace = &curr->irqtrace;
printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
(void *)trace->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
(void *)trace->hardirq_disable_ip);
printk("softirqs last enabled at (%u): [<%px>] %pS\n",
trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
(void *)trace->softirq_enable_ip);
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
(void *)trace->softirq_disable_ip);
}
static int HARDIRQ_verbose(struct lock_class *class)
{
#if HARDIRQ_VERBOSE
return class_filter(class);
#endif
return 0;
}
static int SOFTIRQ_verbose(struct lock_class *class)
{
#if SOFTIRQ_VERBOSE
return class_filter(class);
#endif
return 0;
}
static int (*state_verbose_f[])(struct lock_class *class) = {
#define LOCKDEP_STATE(__STATE) \
__STATE##_verbose,
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};
static inline int state_verbose(enum lock_usage_bit bit,
struct lock_class *class)
{
return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
}
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
enum lock_usage_bit bit, const char *name);
static int
mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
int excl_bit = exclusive_bit(new_bit);
int read = new_bit & LOCK_USAGE_READ_MASK;
int dir = new_bit & LOCK_USAGE_DIR_MASK;
/*
* Validate that this particular lock does not have conflicting
* usage states.
*/
if (!valid_state(curr, this, new_bit, excl_bit))
return 0;
/*
* Check for read in write conflicts
*/
if (!read && !valid_state(curr, this, new_bit,
excl_bit + LOCK_USAGE_READ_MASK))
return 0;
/*
* Validate that the lock dependencies don't have conflicting usage
* states.
*/
if (dir) {
/*
* mark ENABLED has to look backwards -- to ensure no dependee
* has USED_IN state, which, again, would allow recursion deadlocks.
*/
if (!check_usage_backwards(curr, this, excl_bit))
return 0;
} else {
/*
* mark USED_IN has to look forwards -- to ensure no dependency
* has ENABLED state, which would allow recursion deadlocks.
*/
if (!check_usage_forwards(curr, this, excl_bit))
return 0;
}
if (state_verbose(new_bit, hlock_class(this)))
return 2;
return 1;
}
/*
* Mark all held locks with a usage bit:
*/
static int
mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
{
struct held_lock *hlock;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
enum lock_usage_bit hlock_bit = base_bit;
hlock = curr->held_locks + i;
if (hlock->read)
hlock_bit += LOCK_USAGE_READ_MASK;
BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
if (!hlock->check)
continue;
if (!mark_lock(curr, hlock, hlock_bit))
return 0;
}
return 1;
}
/*
* Hardirqs will be enabled:
*/
static void __trace_hardirqs_on_caller(void)
{
struct task_struct *curr = current;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
return;
/*
* If we have softirqs enabled, then set the usage
* bit for all held locks. (disabled hardirqs prevented
* this bit from being set before)
*/
if (curr->softirqs_enabled)
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
}
/**
* lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
*
* Invoked before a possible transition to RCU idle from exit to user or
* guest mode. This ensures that all RCU operations are done before RCU
* stops watching. After the RCU transition lockdep_hardirqs_on() has to be
* invoked to set the final state.
*/
void lockdep_hardirqs_on_prepare(void)
{
if (unlikely(!debug_locks))
return;
/*
* NMIs do not (and cannot) track lock dependencies, nothing to do.
*/
if (unlikely(in_nmi()))
return;
if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (unlikely(lockdep_hardirqs_enabled())) {
/*
* Neither irq nor preemption are disabled here
* so this is racy by nature but losing one hit
* in a stat is not a big deal.
*/
__debug_atomic_inc(redundant_hardirqs_on);
return;
}
/*
* We're enabling irqs and according to our state above irqs weren't
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
/*
* See the fine text that goes along with this variable definition.
*/
if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
return;
/*
* Can't allow enabling interrupts while in an interrupt handler,
* that's general bad form and such. Recursion, limited stack etc..
*/
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
return;
current->hardirq_chain_key = current->curr_chain_key;
lockdep_recursion_inc();
__trace_hardirqs_on_caller();
lockdep_recursion_finish();
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
void noinstr lockdep_hardirqs_on(unsigned long ip)
{
struct irqtrace_events *trace = ¤t->irqtrace;
if (unlikely(!debug_locks))
return;
/*
* NMIs can happen in the middle of local_irq_{en,dis}able() where the
* tracking state and hardware state are out of sync.
*
* NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
* and not rely on hardware state like normal interrupts.
*/
if (unlikely(in_nmi())) {
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
return;
/*
* Skip:
* - recursion check, because NMI can hit lockdep;
* - hardware state check, because above;
* - chain_key check, see lockdep_hardirqs_on_prepare().
*/
goto skip_checks;
}
if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (lockdep_hardirqs_enabled()) {
/*
* Neither irq nor preemption are disabled here
* so this is racy by nature but losing one hit
* in a stat is not a big deal.
*/
__debug_atomic_inc(redundant_hardirqs_on);
return;
}
/*
* We're enabling irqs and according to our state above irqs weren't
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
/*
* Ensure the lock stack remained unchanged between
* lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
*/
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
current->curr_chain_key);
skip_checks:
/* we'll do an OFF -> ON transition: */
__this_cpu_write(hardirqs_enabled, 1);
trace->hardirq_enable_ip = ip;
trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
/*
* Hardirqs were disabled:
*/
void noinstr lockdep_hardirqs_off(unsigned long ip)
{
if (unlikely(!debug_locks))
return;
/*
* Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
* they will restore the software state. This ensures the software
* state is consistent inside NMIs as well.
*/
if (in_nmi()) {
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
return;
} else if (__this_cpu_read(lockdep_recursion))
return;
/*
* So we're supposed to get called after you mask local IRQs, but for
* some reason the hardware doesn't quite think you did a proper job.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (lockdep_hardirqs_enabled()) {
struct irqtrace_events *trace = ¤t->irqtrace;
/*
* We have done an ON -> OFF transition:
*/
__this_cpu_write(hardirqs_enabled, 0);
trace->hardirq_disable_ip = ip;
trace->hardirq_disable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_off_events);
} else {
debug_atomic_inc(redundant_hardirqs_off);
}
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
/*
* Softirqs will be enabled:
*/
void lockdep_softirqs_on(unsigned long ip)
{
struct irqtrace_events *trace = ¤t->irqtrace;
if (unlikely(!lockdep_enabled()))
return;
/*
* We fancy IRQs being disabled here, see softirq.c, avoids
* funny state and nesting things.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (current->softirqs_enabled) {
debug_atomic_inc(redundant_softirqs_on);
return;
}
lockdep_recursion_inc();
/*
* We'll do an OFF -> ON transition:
*/
current->softirqs_enabled = 1;
trace->softirq_enable_ip = ip;
trace->softirq_enable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
* enabled too:
*/
if (lockdep_hardirqs_enabled())
mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
lockdep_recursion_finish();
}
/*
* Softirqs were disabled:
*/
void lockdep_softirqs_off(unsigned long ip)
{
if (unlikely(!lockdep_enabled()))
return;
/*
* We fancy IRQs being disabled here, see softirq.c
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (current->softirqs_enabled) {
struct irqtrace_events *trace = ¤t->irqtrace;
/*
* We have done an ON -> OFF transition:
*/
current->softirqs_enabled = 0;
trace->softirq_disable_ip = ip;
trace->softirq_disable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_off_events);
/*
* Whoops, we wanted softirqs off, so why aren't they?
*/
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
debug_atomic_inc(redundant_softirqs_off);
}
static int
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
if (!check)
goto lock_used;
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
*/
if (!hlock->trylock) {
if (hlock->read) {
if (lockdep_hardirq_context())
if (!mark_lock(curr, hlock,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
} else {
if (lockdep_hardirq_context())
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
return 0;
}
}
/*
* For lock_sync(), don't mark the ENABLED usage, since lock_sync()
* creates no critical section and no extra dependency can be introduced
* by interrupts
*/
if (!hlock->hardirqs_off && !hlock->sync) {
if (hlock->read) {
if (!mark_lock(curr, hlock,
LOCK_ENABLED_HARDIRQ_READ))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
LOCK_ENABLED_SOFTIRQ_READ))
return 0;
} else {
if (!mark_lock(curr, hlock,
LOCK_ENABLED_HARDIRQ))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
LOCK_ENABLED_SOFTIRQ))
return 0;
}
}
lock_used:
/* mark it as used: */
if (!mark_lock(curr, hlock, LOCK_USED))
return 0;
return 1;
}
static inline unsigned int task_irq_context(struct task_struct *task)
{
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
}
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
unsigned int depth = curr->lockdep_depth;
/*
* Keep track of points where we cross into an interrupt context:
*/
if (depth) {
struct held_lock *prev_hlock;
prev_hlock = curr->held_locks + depth-1;
/*
* If we cross into another context, reset the
* hash key (this also prevents the checking and the
* adding of the dependency to 'prev'):
*/
if (prev_hlock->irq_context != hlock->irq_context)
return 1;
}
return 0;
}
/*
* Mark a lock with a usage bit, and validate the state transition:
*/
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
unsigned int new_mask, ret = 1;
if (new_bit >= LOCK_USAGE_STATES) {
DEBUG_LOCKS_WARN_ON(1);
return 0;
}
if (new_bit == LOCK_USED && this->read)
new_bit = LOCK_USED_READ;
new_mask = 1 << new_bit;
/*
* If already set then do not dirty the cacheline,
* nor do any checks:
*/
if (likely(hlock_class(this)->usage_mask & new_mask))
return 1;
if (!graph_lock())
return 0;
/*
* Make sure we didn't race:
*/
if (unlikely(hlock_class(this)->usage_mask & new_mask))
goto unlock;
if (!hlock_class(this)->usage_mask)
debug_atomic_dec(nr_unused_locks);
hlock_class(this)->usage_mask |= new_mask;
if (new_bit < LOCK_TRACE_STATES) {
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
return 0;
}
if (new_bit < LOCK_USED) {
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
}
unlock:
graph_unlock();
/*
* We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
print_lock(this);
print_irqtrace_events(curr);
dump_stack();
}
return ret;
}
static inline short task_wait_context(struct task_struct *curr)
{
/*
* Set appropriate wait type for the context; for IRQs we have to take
* into account force_irqthread as that is implied by PREEMPT_RT.
*/
if (lockdep_hardirq_context()) {
/*
* Check if force_irqthreads will run us threaded.
*/
if (curr->hardirq_threaded || curr->irq_config)
return LD_WAIT_CONFIG;
return LD_WAIT_SPIN;
} else if (curr->softirq_context) {
/*
* Softirqs are always threaded.
*/
return LD_WAIT_CONFIG;
}
return LD_WAIT_MAX;
}
static int
print_lock_invalid_wait_context(struct task_struct *curr,
struct held_lock *hlock)
{
short curr_inner;
if (!debug_locks_off())
return 0;
if (debug_locks_silent)
return 0;
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("[ BUG: Invalid wait context ]\n");
print_kernel_ident();
pr_warn("-----------------------------\n");
pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
print_lock(hlock);
pr_warn("other info that might help us debug this:\n");
curr_inner = task_wait_context(curr);
pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
lockdep_print_held_locks(curr);
pr_warn("stack backtrace:\n");
dump_stack();
return 0;
}
/*
* Verify the wait_type context.
*
* This check validates we take locks in the right wait-type order; that is it
* ensures that we do not take mutexes inside spinlocks and do not attempt to
* acquire spinlocks inside raw_spinlocks and the sort.
*
* The entire thing is slightly more complex because of RCU, RCU is a lock that
* can be taken from (pretty much) any context but also has constraints.
* However when taken in a stricter environment the RCU lock does not loosen
* the constraints.
*
* Therefore we must look for the strictest environment in the lock stack and
* compare that to the lock we're trying to acquire.
*/
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
{
u8 next_inner = hlock_class(next)->wait_type_inner;
u8 next_outer = hlock_class(next)->wait_type_outer;
u8 curr_inner;
int depth;
if (!next_inner || next->trylock)
return 0;
if (!next_outer)
next_outer = next_inner;
/*
* Find start of current irq_context..
*/
for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
struct held_lock *prev = curr->held_locks + depth;
if (prev->irq_context != next->irq_context)
break;
}
depth++;
curr_inner = task_wait_context(curr);
for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
struct lock_class *class = hlock_class(prev);
u8 prev_inner = class->wait_type_inner;
if (prev_inner) {
/*
* We can have a bigger inner than a previous one
* when outer is smaller than inner, as with RCU.
*
* Also due to trylocks.
*/
curr_inner = min(curr_inner, prev_inner);
/*
* Allow override for annotations -- this is typically
* only valid/needed for code that only exists when
* CONFIG_PREEMPT_RT=n.
*/
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
curr_inner = prev_inner;
}
}
if (next_outer > curr_inner)
return print_lock_invalid_wait_context(curr, next);
return 0;
}
#else /* CONFIG_PROVE_LOCKING */
static inline int
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
return 1;
}
static inline unsigned int task_irq_context(struct task_struct *task)
{
return 0;
}
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
return 0;
}
static inline int check_wait_context(struct task_struct *curr,
struct held_lock *next)
{
return 0;
}
#endif /* CONFIG_PROVE_LOCKING */
/*
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass,
u8 inner, u8 outer, u8 lock_type)
{
int i;
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
#endif
/*
* Can't be having no nameless bastards around this place!
*/
if (DEBUG_LOCKS_WARN_ON(!name)) {
lock->name = "NULL";
return;
}
lock->name = name;
lock->wait_type_outer = outer;
lock->wait_type_inner = inner;
lock->lock_type = lock_type;
/*
* No key, no joy, we need to hash something.
*/
if (DEBUG_LOCKS_WARN_ON(!key))
return;
/*
* Sanity check, the lock-class key must either have been allocated
* statically or must have been registered as a dynamic key.
*/
if (!static_obj(key) && !is_dynamic_key(key)) {
if (debug_locks)
printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
lock->key = key;
if (unlikely(!debug_locks))
return;
if (subclass) {
unsigned long flags;
if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
lockdep_recursion_inc();
register_lock_class(lock, subclass, 1);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
}
EXPORT_SYMBOL_GPL(lockdep_init_map_type);
struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
#ifdef CONFIG_PROVE_LOCKING
void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
lock_print_fn print_fn)
{
struct lock_class *class = lock->class_cache[0];
unsigned long flags;
raw_local_irq_save(flags);
lockdep_recursion_inc();
if (!class)
class = register_lock_class(lock, 0, 0);
if (class) {
WARN_ON(class->cmp_fn && class->cmp_fn != cmp_fn);
WARN_ON(class->print_fn && class->print_fn != print_fn);
class->cmp_fn = cmp_fn;
class->print_fn = print_fn;
}
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
#endif
static void
print_lock_nested_lock_not_held(struct task_struct *curr,
struct held_lock *hlock)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("==================================\n");
pr_warn("WARNING: Nested lock was not taken\n");
print_kernel_ident();
pr_warn("----------------------------------\n");
pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
print_lock(hlock);
pr_warn("\nbut this task is not holding:\n");
pr_warn("%s\n", hlock->nest_lock->name);
pr_warn("\nstack backtrace:\n");
dump_stack();
pr_warn("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
* We maintain the dependency maps and validate the locking attempt:
*
* The callers must make sure that IRQs are disabled before calling it,
* otherwise we could get an interrupt which would want to take locks,
* which would end up in lockdep again.
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
struct lockdep_map *nest_lock, unsigned long ip,
int references, int pin_count, int sync)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth;
int chain_head = 0;
int class_idx;
u64 chain_key;
if (unlikely(!debug_locks))
return 0;
if (!prove_locking || lock->key == &__lockdep_no_validate__)
check = 0;
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
class = lock->class_cache[subclass];
/*
* Not cached?
*/
if (unlikely(!class)) {
class = register_lock_class(lock, subclass, 0);
if (!class)
return 0;
}
debug_class_ops_inc(class);
if (very_verbose(class)) {
printk("\nacquire class [%px] %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
}
/*
* Add the lock to the list of currently held locks.
* (we dont increase the depth just yet, up until the
* dependency checks are done)
*/
depth = curr->lockdep_depth;
/*
* Ran out of static storage for our per-task lock stack again have we?
*/
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
class_idx = class - lock_classes;
if (depth && !sync) {
/* we're holding locks and the new held lock is not a sync */
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
if (!references)
references++;
if (!hlock->references)
hlock->references++;
hlock->references += references;
/* Overflow */
if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
return 0;
return 2;
}
}
hlock = curr->held_locks + depth;
/*
* Plain impossible, we just registered it and checked it weren't no
* NULL like.. I bet this mushroom I ate was good!
*/
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
hlock->class_idx = class_idx;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
hlock->irq_context = task_irq_context(curr);
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
hlock->sync = !!sync;
hlock->hardirqs_off = !!hardirqs_off;
hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = lockstat_clock();
#endif
hlock->pin_count = pin_count;
if (check_wait_context(curr, hlock))
return 0;
/* Initialize the lock usage bit */
if (!mark_usage(curr, hlock, check))
return 0;
/*
* Calculate the chain hash: it's the combined hash of all the
* lock keys along the dependency chain. We save the hash value
* at every step so that we can get the current hash easily
* after unlock. The chain hash is then used to cache dependency
* results.
*
* The 'key ID' is what is the most compact key value to drive
* the hash, not class->key.
*/
/*
* Whoops, we did it again.. class_idx is invalid.
*/
if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
return 0;
chain_key = curr->curr_chain_key;
if (!depth) {
/*
* How can we have a chain hash when we ain't got no keys?!
*/
if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
chain_head = 1;
}
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
chain_key = INITIAL_CHAIN_KEY;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
print_lock_nested_lock_not_held(curr, hlock);
return 0;
}
if (!debug_locks_silent) {
WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
if (!validate_chain(curr, hlock, chain_head, chain_key))
return 0;
/* For lock_sync(), we are done here since no actual critical section */
if (hlock->sync)
return 1;
curr->curr_chain_key = chain_key;
curr->lockdep_depth++;
check_chain_key(curr);
#ifdef CONFIG_DEBUG_LOCKDEP
if (unlikely(!debug_locks))
return 0;
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
printk(KERN_DEBUG "depth: %i max: %lu!\n",
curr->lockdep_depth, MAX_LOCK_DEPTH);
lockdep_print_held_locks(current);
debug_show_all_locks();
dump_stack();
return 0;
}
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
return 1;
}
static void print_unlock_imbalance_bug(struct task_struct *curr,
struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("=====================================\n");
pr_warn("WARNING: bad unlock balance detected!\n");
print_kernel_ident();
pr_warn("-------------------------------------\n");
pr_warn("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
pr_cont(") at:\n");
print_ip_sym(KERN_WARNING, ip);
pr_warn("but there are no more locks to release!\n");
pr_warn("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static noinstr int match_held_lock(const struct held_lock *hlock,
const struct lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
if (hlock->references) {
const struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
/*
* If look_up_lock_class() failed to find a class, we're trying
* to test if we hold a lock that has never yet been acquired.
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
if (!class)
return 0;
/*
* References, but not a lock we're actually ref-counting?
* State got messed up, follow the sites that change ->references
* and try to make sense of it.
*/
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
return 0;
if (hlock->class_idx == class - lock_classes)
return 1;
}
return 0;
}
/* @depth must not be zero */
static struct held_lock *find_held_lock(struct task_struct *curr,
struct lockdep_map *lock,
unsigned int depth, int *idx)
{
struct held_lock *ret, *hlock, *prev_hlock;
int i;
i = depth - 1;
hlock = curr->held_locks + i;
ret = hlock;
if (match_held_lock(hlock, lock))
goto out;
ret = NULL;
for (i--, prev_hlock = hlock--;
i >= 0;
i--, prev_hlock = hlock--) {
/*
* We must not cross into another context:
*/
if (prev_hlock->irq_context != hlock->irq_context) {
ret = NULL;
break;
}
if (match_held_lock(hlock, lock)) {
ret = hlock;
break;
}
}
out:
*idx = i;
return ret;
}
static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
int idx, unsigned int *merged)
{
struct held_lock *hlock;
int first_idx = idx;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
switch (__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass,
hlock->trylock,
hlock->read, hlock->check,
hlock->hardirqs_off,
hlock->nest_lock, hlock->acquire_ip,
hlock->references, hlock->pin_count, 0)) {
case 0:
return 1;
case 1:
break;
case 2:
*merged += (idx == first_idx);
break;
default:
WARN_ON(1);
return 0;
}
}
return 0;
}
static int
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
{
struct task_struct *curr = current;
unsigned int depth, merged = 0;
struct held_lock *hlock;
struct lock_class *class;
int i;
if (unlikely(!debug_locks))
return 0;
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
* yet we're not actually holding any locks. Naughty user!
*/
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
lockdep_init_map_type(lock, name, key, 0,
lock->wait_type_inner,
lock->wait_type_outer,
lock->lock_type);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes;
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
if (reacquire_held_locks(curr, depth, i, &merged))
return 0;
/*
* I took it apart and put it back together again, except now I have
* these 'spare' parts.. where shall I put them.
*/
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
return 0;
return 1;
}
static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
unsigned int depth, merged = 0;
struct held_lock *hlock;
int i;
if (unlikely(!debug_locks))
return 0;
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
* yet we're not actually holding any locks. Naughty user!
*/
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
WARN(hlock->read, "downgrading a read lock");
hlock->read = 1;
hlock->acquire_ip = ip;
if (reacquire_held_locks(curr, depth, i, &merged))
return 0;
/* Merging can't happen with unchanged classes.. */
if (DEBUG_LOCKS_WARN_ON(merged))
return 0;
/*
* I took it apart and put it back together again, except now I have
* these 'spare' parts.. where shall I put them.
*/
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0;
return 1;
}
/*
* Remove the lock from the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()).
*/
static int
__lock_release(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
unsigned int depth, merged = 1;
struct held_lock *hlock;
int i;
if (unlikely(!debug_locks))
return 0;
depth = curr->lockdep_depth;
/*
* So we're all set to release this lock.. wait what lock? We don't
* own any locks, you've been drinking again?
*/
if (depth <= 0) {
print_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
/*
* Check whether the lock exists in the current stack
* of held locks:
*/
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
if (hlock->instance == lock)
lock_release_holdtime(hlock);
WARN(hlock->pin_count, "releasing a pinned lock\n");
if (hlock->references) {
hlock->references--;
if (hlock->references) {
/*
* We had, and after removing one, still have
* references, the current lock stack is still
* valid. We're done!
*/
return 1;
}
}
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
/*
* The most likely case is when the unlock is on the innermost
* lock. In this case, we are done!
*/
if (i == depth-1)
return 1;
if (reacquire_held_locks(curr, depth, i + 1, &merged))
return 0;
/*
* We had N bottles of beer on the wall, we drank one, but now
* there's not N-1 bottles of beer left on the wall...
* Pouring two of the bottles together is acceptable.
*/
DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
/*
* Since reacquire_held_locks() would have called check_chain_key()
* indirectly via __lock_acquire(), we don't need to do it again
* on return.
*/
return 0;
}
static __always_inline
int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
if (match_held_lock(hlock, lock)) {
if (read == -1 || !!hlock->read == read)
return LOCK_STATE_HELD;
return LOCK_STATE_NOT_HELD;
}
}
return LOCK_STATE_NOT_HELD;
}
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
{
struct pin_cookie cookie = NIL_COOKIE;
struct task_struct *curr = current;
int i;
if (unlikely(!debug_locks))
return cookie;
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
if (match_held_lock(hlock, lock)) {
/*
* Grab 16bits of randomness; this is sufficient to not
* be guessable and still allows some pin nesting in
* our u32 pin_count.
*/
cookie.val = 1 + (sched_clock() & 0xffff);
hlock->pin_count += cookie.val;
return cookie;
}
}
WARN(1, "pinning an unheld lock\n");
return cookie;
}
static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
struct task_struct *curr = current;
int i;
if (unlikely(!debug_locks))
return;
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
if (match_held_lock(hlock, lock)) {
hlock->pin_count += cookie.val;
return;
}
}
WARN(1, "pinning an unheld lock\n");
}
static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
struct task_struct *curr = current;
int i;
if (unlikely(!debug_locks))
return;
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
if (match_held_lock(hlock, lock)) {
if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
return;
hlock->pin_count -= cookie.val;
if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
hlock->pin_count = 0;
return;
}
}
WARN(1, "unpinning an unheld lock\n");
}
/*
* Check whether we follow the irq-flags state precisely:
*/
static noinstr void check_flags(unsigned long flags)
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
if (!debug_locks)
return;
/* Get the warning out.. */
instrumentation_begin();
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-off.\n");
}
} else {
if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-on.\n");
}
}
#ifndef CONFIG_PREEMPT_RT
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
* check if not in hardirq contexts:
*/
if (!hardirq_count()) {
if (softirq_count()) {
/* like the above, but with softirqs */
DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
} else {
/* lick the above, does it taste good? */
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
#endif
if (!debug_locks)
print_irqtrace_events(current);
instrumentation_end();
#endif
}
void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
{
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
lockdep_recursion_inc();
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
lockdep_recursion_inc();
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);
/* NMI context !!! */
static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
{
#ifdef CONFIG_PROVE_LOCKING
struct lock_class *class = look_up_lock_class(lock, subclass);
unsigned long mask = LOCKF_USED;
/* if it doesn't have a class (yet), it certainly hasn't been used yet */
if (!class)
return;
/*
* READ locks only conflict with USED, such that if we only ever use
* READ locks, there is no deadlock possible -- RCU.
*/
if (!hlock->read)
mask |= LOCKF_USED_READ;
if (!(class->usage_mask & mask))
return;
hlock->class_idx = class - lock_classes;
print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
#endif
}
static bool lockdep_nmi(void)
{
if (raw_cpu_read(lockdep_recursion))
return false;
if (!in_nmi())
return false;
return true;
}
/*
* read_lock() is recursive if:
* 1. We force lockdep think this way in selftests or
* 2. The implementation is not queued read/write lock or
* 3. The locker is at an in_interrupt() context.
*/
bool read_lock_is_recursive(void)
{
return force_read_lock_recursive ||
!IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
in_interrupt();
}
EXPORT_SYMBOL_GPL(read_lock_is_recursive);
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
if (!debug_locks)
return;
if (unlikely(!lockdep_enabled())) {
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
struct held_lock hlock;
hlock.acquire_ip = ip;
hlock.instance = lock;
hlock.nest_lock = nest_lock;
hlock.irq_context = 2; // XXX
hlock.trylock = trylock;
hlock.read = read;
hlock.check = check;
hlock.hardirqs_off = true;
hlock.references = 0;
verify_lock_unused(lock, &hlock, subclass);
}
return;
}
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
trace_lock_release(lock, ip);
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
/*
* lock_sync() - A special annotation for synchronize_{s,}rcu()-like API.
*
* No actual critical section is created by the APIs annotated with this: these
* APIs are used to wait for one or multiple critical sections (on other CPUs
* or threads), and it means that calling these APIs inside these critical
* sections is potential deadlock.
*/
void lock_sync(struct lockdep_map *lock, unsigned subclass, int read,
int check, struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_acquire(lock, subclass, 0, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 1);
check_chain_key(current);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_sync);
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = LOCK_STATE_NOT_HELD;
/*
* Avoid false negative lockdep_assert_held() and
* lockdep_assert_not_held().
*/
if (unlikely(!lockdep_enabled()))
return LOCK_STATE_UNKNOWN;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
ret = __lock_is_held(lock, read);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(lock_is_held_type);
NOKPROBE_SYMBOL(lock_is_held_type);
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
{
struct pin_cookie cookie = NIL_COOKIE;
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return cookie;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
cookie = __lock_pin_lock(lock);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
return cookie;
}
EXPORT_SYMBOL_GPL(lock_pin_lock);
void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_repin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);
void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_unpin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);
#ifdef CONFIG_LOCK_STAT
static void print_lock_contention_bug(struct task_struct *curr,
struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("=================================\n");
pr_warn("WARNING: bad contention detected!\n");
print_kernel_ident();
pr_warn("---------------------------------\n");
pr_warn("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
pr_cont(") at:\n");
print_ip_sym(KERN_WARNING, ip);
pr_warn("but there are no locks held!\n");
pr_warn("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static void
__lock_contended(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock;
struct lock_class_stats *stats;
unsigned int depth;
int i, contention_point, contending_point;
depth = curr->lockdep_depth;
/*
* Whee, we contended on this lock, except it seems we're not
* actually trying to acquire anything much at all..
*/
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_lock_contention_bug(curr, lock, ip);
return;
}
if (hlock->instance != lock)
return;
hlock->waittime_stamp = lockstat_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point,
lock->ip);
stats = get_lock_stats(hlock_class(hlock));
if (contention_point < LOCKSTAT_POINTS)
stats->contention_point[contention_point]++;
if (contending_point < LOCKSTAT_POINTS)
stats->contending_point[contending_point]++;
if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++;
}
static void
__lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock;
struct lock_class_stats *stats;
unsigned int depth;
u64 now, waittime = 0;
int i, cpu;
depth = curr->lockdep_depth;
/*
* Yay, we acquired ownership of this lock we didn't try to
* acquire, how the heck did that happen?
*/
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
hlock = find_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_lock_contention_bug(curr, lock, _RET_IP_);
return;
}
if (hlock->instance != lock)
return;
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
now = lockstat_clock();
waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now;
}
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
lock_time_inc(&stats->read_waittime, waittime);
else
lock_time_inc(&stats->write_waittime, waittime);
}
if (lock->cpu != cpu)
stats->bounces[bounce_acquired + !!hlock->read]++;
lock->cpu = cpu;
lock->ip = ip;
}
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_acquired(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
#endif
/*
* Used by the testsuite, sanitize the validator state
* after a simulated failure:
*/
void lockdep_reset(void)
{
unsigned long flags;
int i;
raw_local_irq_save(flags);
lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
nr_process_chains = 0;
debug_locks = 1;
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_HLIST_HEAD(chainhash_table + i);
raw_local_irq_restore(flags);
}
/* Remove a class from a lock chain. Must be called with the graph lock held. */
static void remove_class_from_lock_chain(struct pending_free *pf,
struct lock_chain *chain,
struct lock_class *class)
{
#ifdef CONFIG_PROVE_LOCKING
int i;
for (i = chain->base; i < chain->base + chain->depth; i++) {
if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
continue;
/*
* Each lock class occurs at most once in a lock chain so once
* we found a match we can break out of this loop.
*/
goto free_lock_chain;
}
/* Since the chain has not been modified, return. */
return;
free_lock_chain:
free_chain_hlocks(chain->base, chain->depth);
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
dec_chains(chain->irq_context);
/*
* Note: calling hlist_del_rcu() from inside a
* hlist_for_each_entry_rcu() loop is safe.
*/
hlist_del_rcu(&chain->entry);
__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
nr_zapped_lock_chains++;
#endif
}
/* Must be called with the graph lock held. */
static void remove_class_from_lock_chains(struct pending_free *pf,
struct lock_class *class)
{
struct lock_chain *chain;
struct hlist_head *head;
int i;
for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
head = chainhash_table + i;
hlist_for_each_entry_rcu(chain, head, entry) {
remove_class_from_lock_chain(pf, chain, class);
}
}
}
/*
* Remove all references to a lock class. The caller must hold the graph lock.
*/
static void zap_class(struct pending_free *pf, struct lock_class *class)
{
struct lock_list *entry;
int i;
WARN_ON_ONCE(!class->key);
/*
* Remove all dependencies this lock is
* involved in:
*/
for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
entry = list_entries + i;
if (entry->class != class && entry->links_to != class)
continue;
__clear_bit(i, list_entries_in_use);
nr_list_entries--;
list_del_rcu(&entry->entry);
}
if (list_empty(&class->locks_after) &&
list_empty(&class->locks_before)) {
list_move_tail(&class->lock_entry, &pf->zapped);
hlist_del_rcu(&class->hash_entry);
WRITE_ONCE(class->key, NULL);
WRITE_ONCE(class->name, NULL);
nr_lock_classes--;
__clear_bit(class - lock_classes, lock_classes_in_use);
if (class - lock_classes == max_lock_class_idx)
max_lock_class_idx--;
} else {
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
class->name);
}
remove_class_from_lock_chains(pf, class);
nr_zapped_classes++;
}
static void reinit_class(struct lock_class *class)
{
WARN_ON_ONCE(!class->lock_entry.next);
WARN_ON_ONCE(!list_empty(&class->locks_after));
WARN_ON_ONCE(!list_empty(&class->locks_before));
memset_startat(class, 0, key);
WARN_ON_ONCE(!class->lock_entry.next);
WARN_ON_ONCE(!list_empty(&class->locks_after));
WARN_ON_ONCE(!list_empty(&class->locks_before));
}
static inline int within(const void *addr, void *start, unsigned long size)
{
return addr >= start && addr < start + size;
}
static bool inside_selftest(void)
{
return current == lockdep_selftest_task_struct;
}
/* The caller must hold the graph lock. */
static struct pending_free *get_pending_free(void)
{
return delayed_free.pf + delayed_free.index;
}
static void free_zapped_rcu(struct rcu_head *cb);
/*
* Schedule an RCU callback if no RCU callback is pending. Must be called with
* the graph lock held.
*/
static void call_rcu_zapped(struct pending_free *pf)
{
WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped))
return;
if (delayed_free.scheduled)
return;
delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
delayed_free.index ^= 1;
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
}
/* The caller must hold the graph lock. May be called from RCU context. */
static void __free_zapped_classes(struct pending_free *pf)
{
struct lock_class *class;
check_data_structures();
list_for_each_entry(class, &pf->zapped, lock_entry)
reinit_class(class);
list_splice_init(&pf->zapped, &free_lock_classes);
#ifdef CONFIG_PROVE_LOCKING
bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
#endif
}
static void free_zapped_rcu(struct rcu_head *ch)
{
struct pending_free *pf;
unsigned long flags;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
return;
raw_local_irq_save(flags);
lockdep_lock();
/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
__free_zapped_classes(pf);
delayed_free.scheduled = false;
/*
* If there's anything on the open list, close and start a new callback.
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);
lockdep_unlock();
raw_local_irq_restore(flags);
}
/*
* Remove all lock classes from the class hash table and from the
* all_lock_classes list whose key or name is in the address range [start,
* start + size). Move these lock classes to the zapped_classes list. Must
* be called with the graph lock held.
*/
static void __lockdep_free_key_range(struct pending_free *pf, void *start,
unsigned long size)
{
struct lock_class *class;
struct hlist_head *head;
int i;
/* Unhash all classes that were created by a module. */
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
hlist_for_each_entry_rcu(class, head, hash_entry) {
if (!within(class->key, start, size) &&
!within(class->name, start, size))
continue;
zap_class(pf, class);
}
}
}
/*
* Used in module.c to remove lock classes from memory that is going to be
* freed; and possibly re-used by other modules.
*
* We will have had one synchronize_rcu() before getting here, so we're
* guaranteed nobody will look up these exact classes -- they're properly dead
* but still allocated.
*/
static void lockdep_free_key_range_reg(void *start, unsigned long size)
{
struct pending_free *pf;
unsigned long flags;
init_data_structures_once();
raw_local_irq_save(flags);
lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
lockdep_unlock();
raw_local_irq_restore(flags);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
*/
synchronize_rcu();
}
/*
* Free all lockdep keys in the range [start, start+size). Does not sleep.
* Ignores debug_locks. Must only be used by the lockdep selftests.
*/
static void lockdep_free_key_range_imm(void *start, unsigned long size)
{
struct pending_free *pf = delayed_free.pf;
unsigned long flags;
init_data_structures_once();
raw_local_irq_save(flags);
lockdep_lock();
__lockdep_free_key_range(pf, start, size);
__free_zapped_classes(pf);
lockdep_unlock();
raw_local_irq_restore(flags);
}
void lockdep_free_key_range(void *start, unsigned long size)
{
init_data_structures_once();
if (inside_selftest())
lockdep_free_key_range_imm(start, size);
else
lockdep_free_key_range_reg(start, size);
}
/*
* Check whether any element of the @lock->class_cache[] array refers to a
* registered lock class. The caller must hold either the graph lock or the
* RCU read lock.
*/
static bool lock_class_cache_is_registered(struct lockdep_map *lock)
{
struct lock_class *class;
struct hlist_head *head;
int i, j;
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
hlist_for_each_entry_rcu(class, head, hash_entry) {
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
if (lock->class_cache[j] == class)
return true;
}
}
return false;
}
/* The caller must hold the graph lock. Does not sleep. */
static void __lockdep_reset_lock(struct pending_free *pf,
struct lockdep_map *lock)
{
struct lock_class *class;
int j;
/*
* Remove all classes this lock might have:
*/
for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
/*
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
if (class)
zap_class(pf, class);
}
/*
* Debug check: in the end all mapped classes should
* be gone.
*/
if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
debug_locks_off();
}
/*
* Remove all information lockdep has about a lock if debug_locks == 1. Free
* released data structures from RCU context.
*/
static void lockdep_reset_lock_reg(struct lockdep_map *lock)
{
struct pending_free *pf;
unsigned long flags;
int locked;
raw_local_irq_save(flags);
locked = graph_lock();
if (!locked)
goto out_irq;
pf = get_pending_free();
__lockdep_reset_lock(pf, lock);
call_rcu_zapped(pf);
graph_unlock();
out_irq:
raw_local_irq_restore(flags);
}
/*
* Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
* lockdep selftests.
*/
static void lockdep_reset_lock_imm(struct lockdep_map *lock)
{
struct pending_free *pf = delayed_free.pf;
unsigned long flags;
raw_local_irq_save(flags);
lockdep_lock();
__lockdep_reset_lock(pf, lock);
__free_zapped_classes(pf);
lockdep_unlock();
raw_local_irq_restore(flags);
}
void lockdep_reset_lock(struct lockdep_map *lock)
{
init_data_structures_once();
if (inside_selftest())
lockdep_reset_lock_imm(lock);
else
lockdep_reset_lock_reg(lock);
}
/*
* Unregister a dynamically allocated key.
*
* Unlike lockdep_register_key(), a search is always done to find a matching
* key irrespective of debug_locks to avoid potential invalid access to freed
* memory in lock_class entry.
*/
void lockdep_unregister_key(struct lock_class_key *key)
{
struct hlist_head *hash_head = keyhashentry(key);
struct lock_class_key *k;
struct pending_free *pf;
unsigned long flags;
bool found = false;
might_sleep();
if (WARN_ON_ONCE(static_obj(key)))
return;
raw_local_irq_save(flags);
lockdep_lock();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (k == key) {
hlist_del_rcu(&k->hash_entry);
found = true;
break;
}
}
WARN_ON_ONCE(!found && debug_locks);
if (found) {
pf = get_pending_free();
__lockdep_free_key_range(pf, key, 1);
call_rcu_zapped(pf);
}
lockdep_unlock();
raw_local_irq_restore(flags);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(lockdep_unregister_key);
void __init lockdep_init(void)
{
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
printk(" memory used by lock dependency info: %zu kB\n",
(sizeof(lock_classes) +
sizeof(lock_classes_in_use) +
sizeof(classhash_table) +
sizeof(list_entries) +
sizeof(list_entries_in_use) +
sizeof(chainhash_table) +
sizeof(delayed_free)
#ifdef CONFIG_PROVE_LOCKING
+ sizeof(lock_cq)
+ sizeof(lock_chains)
+ sizeof(lock_chains_in_use)
+ sizeof(chain_hlocks)
#endif
) / 1024
);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
printk(" memory used for stack traces: %zu kB\n",
(sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
);
#endif
printk(" per task-struct memory footprint: %zu bytes\n",
sizeof(((struct task_struct *)NULL)->held_locks));
}
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("=========================\n");
pr_warn("WARNING: held lock freed!\n");
print_kernel_ident();
pr_warn("-------------------------\n");
pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
const void* lock_from, unsigned long lock_len)
{
return lock_from + lock_len <= mem_from ||
mem_from + mem_len <= lock_from;
}
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
* any held lock in the memory range of <from> to <to>:
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
int i;
if (unlikely(!debug_locks))
return;
raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (not_in_range(mem_from, mem_len, hlock->instance,
sizeof(*hlock->instance)))
continue;
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(void)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
pr_warn("\n");
pr_warn("====================================\n");
pr_warn("WARNING: %s/%d still has locks held!\n",
current->comm, task_pid_nr(current));
print_kernel_ident();
pr_warn("------------------------------------\n");
lockdep_print_held_locks(current);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
void debug_check_no_locks_held(void)
{
if (unlikely(current->lockdep_depth > 0))
print_held_locks_bug();
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
#ifdef __KERNEL__
void debug_show_all_locks(void)
{
struct task_struct *g, *p;
if (unlikely(!debug_locks)) {
pr_warn("INFO: lockdep is turned off.\n");
return;
}
pr_warn("\nShowing all locks held in the system:\n");
rcu_read_lock();
for_each_process_thread(g, p) {
if (!p->lockdep_depth)
continue;
lockdep_print_held_locks(p);
touch_nmi_watchdog();
touch_all_softlockup_watchdogs();
}
rcu_read_unlock();
pr_warn("\n");
pr_warn("=============================================\n\n");
}
EXPORT_SYMBOL_GPL(debug_show_all_locks);
#endif
/*
* Careful: only use this function if you are sure that
* the task cannot run in parallel!
*/
void debug_show_held_locks(struct task_struct *task)
{
if (unlikely(!debug_locks)) {
printk("INFO: lockdep is turned off.\n");
return;
}
lockdep_print_held_locks(task);
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
asmlinkage __visible void lockdep_sys_exit(void)
{
struct task_struct *curr = current;
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
pr_warn("\n");
pr_warn("================================================\n");
pr_warn("WARNING: lock held when returning to user space!\n");
print_kernel_ident();
pr_warn("------------------------------------------------\n");
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
}
/*
* The lock history for each syscall should be independent. So wipe the
* slate clean on return to userspace.
*/
lockdep_invariant_state(false);
}
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
struct task_struct *curr = current;
int dl = READ_ONCE(debug_locks);
bool rcu = warn_rcu_enter();
/* Note: the following can be executed concurrently, so be careful. */
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("WARNING: suspicious RCU usage\n");
print_kernel_ident();
pr_warn("-----------------------------\n");
pr_warn("%s:%d %s!\n", file, line, s);
pr_warn("\nother info that might help us debug this:\n\n");
pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
!rcu_lockdep_current_cpu_online()
? "RCU used illegally from offline CPU!\n"
: "",
rcu_scheduler_active, dl,
dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
/*
* If a CPU is in the RCU-free window in idle (ie: in the section
* between ct_idle_enter() and ct_idle_exit(), then RCU
* considers that CPU to be in an "extended quiescent state",
* which means that RCU will be completely ignoring that CPU.
* Therefore, rcu_read_lock() and friends have absolutely no
* effect on a CPU running in that state. In other words, even if
* such an RCU-idle CPU has called rcu_read_lock(), RCU might well
* delete data structures out from under it. RCU really has no
* choice here: we need to keep an RCU-free window in idle where
* the CPU may possibly enter into low power mode. This way we can
* notice an extended quiescent state to other CPUs that started a grace
* period. Otherwise we would delay any grace period as long as we run
* in the idle task.
*
* So complain bitterly if someone does call rcu_read_lock(),
* rcu_read_lock_bh() and so on from extended quiescent states.
*/
if (!rcu_is_watching())
pr_warn("RCU used illegally from extended quiescent state!\n");
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
warn_rcu_exit(rcu);
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
| linux-master | kernel/locking/lockdep.c |
// SPDX-License-Identifier: GPL-2.0
/*
* kernel/lockdep_proc.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Code for /proc/lockdep and /proc/lockdep_stats:
*
*/
#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
#include "lockdep_internals.h"
/*
* Since iteration of lock_classes is done without holding the lockdep lock,
* it is not safe to iterate all_lock_classes list directly as the iteration
* may branch off to free_lock_classes or the zapped list. Iteration is done
* directly on the lock_classes array by checking the lock_classes_in_use
* bitmap and max_lock_class_idx.
*/
#define iterate_lock_classes(idx, class) \
for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
idx++, class++)
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
struct lock_class *class = v;
++class;
*pos = class - lock_classes;
return (*pos > max_lock_class_idx) ? NULL : class;
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
unsigned long idx = *pos;
if (idx > max_lock_class_idx)
return NULL;
return lock_classes + idx;
}
static void l_stop(struct seq_file *m, void *v)
{
}
static void print_name(struct seq_file *m, struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
seq_printf(m, "%s", name);
} else{
seq_printf(m, "%s", name);
if (class->name_version > 1)
seq_printf(m, "#%d", class->name_version);
if (class->subclass)
seq_printf(m, "/%d", class->subclass);
}
}
static int l_show(struct seq_file *m, void *v)
{
struct lock_class *class = v;
struct lock_list *entry;
char usage[LOCK_USAGE_CHARS];
int idx = class - lock_classes;
if (v == lock_classes)
seq_printf(m, "all lock classes:\n");
if (!test_bit(idx, lock_classes_in_use))
return 0;
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
#endif
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
get_usage_chars(class, usage);
seq_printf(m, " %s", usage);
}
seq_printf(m, ": ");
print_name(m, class);
seq_puts(m, "\n");
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
list_for_each_entry(entry, &class->locks_after, entry) {
if (entry->distance == 1) {
seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
seq_puts(m, "\n");
}
}
seq_puts(m, "\n");
}
return 0;
}
static const struct seq_operations lockdep_ops = {
.start = l_start,
.next = l_next,
.stop = l_stop,
.show = l_show,
};
#ifdef CONFIG_PROVE_LOCKING
static void *lc_start(struct seq_file *m, loff_t *pos)
{
if (*pos < 0)
return NULL;
if (*pos == 0)
return SEQ_START_TOKEN;
return lock_chains + (*pos - 1);
}
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
{
*pos = lockdep_next_lockchain(*pos - 1) + 1;
return lc_start(m, pos);
}
static void lc_stop(struct seq_file *m, void *v)
{
}
static int lc_show(struct seq_file *m, void *v)
{
struct lock_chain *chain = v;
struct lock_class *class;
int i;
static const char * const irq_strs[] = {
[0] = "0",
[LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq",
[LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq",
[LOCK_CHAIN_SOFTIRQ_CONTEXT|
LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq",
};
if (v == SEQ_START_TOKEN) {
if (!nr_free_chain_hlocks)
seq_printf(m, "(buggered) ");
seq_printf(m, "all lock chains:\n");
return 0;
}
seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]);
for (i = 0; i < chain->depth; i++) {
class = lock_chain_get_class(chain, i);
if (!class->key)
continue;
seq_printf(m, "[%p] ", class->key);
print_name(m, class);
seq_puts(m, "\n");
}
seq_puts(m, "\n");
return 0;
}
static const struct seq_operations lockdep_chains_ops = {
.start = lc_start,
.next = lc_next,
.stop = lc_stop,
.show = lc_show,
};
#endif /* CONFIG_PROVE_LOCKING */
static void lockdep_stats_debug_show(struct seq_file *m)
{
#ifdef CONFIG_DEBUG_LOCKDEP
unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
hi2 = debug_atomic_read(hardirqs_off_events),
hr1 = debug_atomic_read(redundant_hardirqs_on),
hr2 = debug_atomic_read(redundant_hardirqs_off),
si1 = debug_atomic_read(softirqs_on_events),
si2 = debug_atomic_read(softirqs_off_events),
sr1 = debug_atomic_read(redundant_softirqs_on),
sr2 = debug_atomic_read(redundant_softirqs_off);
seq_printf(m, " chain lookup misses: %11llu\n",
debug_atomic_read(chain_lookup_misses));
seq_printf(m, " chain lookup hits: %11llu\n",
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
seq_printf(m, " redundant checks: %11llu\n",
debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links: %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
debug_atomic_read(nr_find_usage_forwards_checks));
seq_printf(m, " find-mask backwards checks: %11llu\n",
debug_atomic_read(nr_find_usage_backwards_checks));
seq_printf(m, " hardirq on events: %11llu\n", hi1);
seq_printf(m, " hardirq off events: %11llu\n", hi2);
seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
seq_printf(m, " softirq on events: %11llu\n", si1);
seq_printf(m, " softirq off events: %11llu\n", si2);
seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
#endif
}
static int lockdep_stats_show(struct seq_file *m, void *v)
{
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
sum_forward_deps = 0;
#ifdef CONFIG_PROVE_LOCKING
struct lock_class *class;
unsigned long idx;
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
if (class->usage_mask == 0)
nr_unused++;
if (class->usage_mask == LOCKF_USED)
nr_uncategorized++;
if (class->usage_mask & LOCKF_USED_IN_IRQ)
nr_irq_safe++;
if (class->usage_mask & LOCKF_ENABLED_IRQ)
nr_irq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
nr_softirq_safe++;
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
nr_softirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
nr_hardirq_safe++;
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
nr_hardirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
nr_irq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
nr_irq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
nr_softirq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
nr_softirq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
nr_hardirq_read_safe++;
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
sum_forward_deps += lockdep_count_forward_deps(class);
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
#endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
nr_list_entries, MAX_LOCKDEP_ENTRIES);
seq_printf(m, " indirect dependencies: %11lu\n",
sum_forward_deps);
/*
* Total number of dependencies:
*
* All irq-safe locks may nest inside irq-unsafe locks,
* plus all the other known dependencies:
*/
seq_printf(m, " all direct dependencies: %11lu\n",
nr_irq_unsafe * nr_irq_safe +
nr_hardirq_unsafe * nr_hardirq_safe +
nr_list_entries);
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
lock_chain_count(), MAX_LOCKDEP_CHAINS);
seq_printf(m, " dependency chain hlocks used: %11lu [max: %lu]\n",
MAX_LOCKDEP_CHAIN_HLOCKS -
(nr_free_chain_hlocks + nr_lost_chain_hlocks),
MAX_LOCKDEP_CHAIN_HLOCKS);
seq_printf(m, " dependency chain hlocks lost: %11u\n",
nr_lost_chain_hlocks);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
seq_printf(m, " in-hardirq chains: %11u\n",
nr_hardirq_chains);
seq_printf(m, " in-softirq chains: %11u\n",
nr_softirq_chains);
#endif
seq_printf(m, " in-process chains: %11u\n",
nr_process_chains);
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
seq_printf(m, " number of stack traces: %11llu\n",
lockdep_stack_trace_count());
seq_printf(m, " number of stack hash chains: %11llu\n",
lockdep_stack_hash_count());
#endif
seq_printf(m, " combined max dependencies: %11u\n",
(nr_hardirq_chains + 1) *
(nr_softirq_chains + 1) *
(nr_process_chains + 1)
);
seq_printf(m, " hardirq-safe locks: %11lu\n",
nr_hardirq_safe);
seq_printf(m, " hardirq-unsafe locks: %11lu\n",
nr_hardirq_unsafe);
seq_printf(m, " softirq-safe locks: %11lu\n",
nr_softirq_safe);
seq_printf(m, " softirq-unsafe locks: %11lu\n",
nr_softirq_unsafe);
seq_printf(m, " irq-safe locks: %11lu\n",
nr_irq_safe);
seq_printf(m, " irq-unsafe locks: %11lu\n",
nr_irq_unsafe);
seq_printf(m, " hardirq-read-safe locks: %11lu\n",
nr_hardirq_read_safe);
seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
nr_hardirq_read_unsafe);
seq_printf(m, " softirq-read-safe locks: %11lu\n",
nr_softirq_read_safe);
seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
nr_softirq_read_unsafe);
seq_printf(m, " irq-read-safe locks: %11lu\n",
nr_irq_read_safe);
seq_printf(m, " irq-read-unsafe locks: %11lu\n",
nr_irq_read_unsafe);
seq_printf(m, " uncategorized locks: %11lu\n",
nr_uncategorized);
seq_printf(m, " unused locks: %11lu\n",
nr_unused);
seq_printf(m, " max locking depth: %11u\n",
max_lockdep_depth);
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " max bfs queue depth: %11u\n",
max_bfs_queue_depth);
#endif
seq_printf(m, " max lock class index: %11lu\n",
max_lock_class_idx);
lockdep_stats_debug_show(m);
seq_printf(m, " debug_locks: %11u\n",
debug_locks);
/*
* Zapped classes and lockdep data buffers reuse statistics.
*/
seq_puts(m, "\n");
seq_printf(m, " zapped classes: %11lu\n",
nr_zapped_classes);
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " zapped lock chains: %11lu\n",
nr_zapped_lock_chains);
seq_printf(m, " large chain blocks: %11u\n",
nr_large_chain_blocks);
#endif
return 0;
}
#ifdef CONFIG_LOCK_STAT
struct lock_stat_data {
struct lock_class *class;
struct lock_class_stats stats;
};
struct lock_stat_seq {
struct lock_stat_data *iter_end;
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
};
/*
* sort on absolute number of contentions
*/
static int lock_stat_cmp(const void *l, const void *r)
{
const struct lock_stat_data *dl = l, *dr = r;
unsigned long nl, nr;
nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
return nr - nl;
}
static void seq_line(struct seq_file *m, char c, int offset, int length)
{
int i;
for (i = 0; i < offset; i++)
seq_puts(m, " ");
for (i = 0; i < length; i++)
seq_printf(m, "%c", c);
seq_puts(m, "\n");
}
static void snprint_time(char *buf, size_t bufsiz, s64 nr)
{
s64 div;
s32 rem;
nr += 5; /* for display rounding */
div = div_s64_rem(nr, 1000, &rem);
snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
}
static void seq_time(struct seq_file *m, s64 time)
{
char num[15];
snprint_time(num, sizeof(num), time);
seq_printf(m, " %14s", num);
}
static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
{
seq_printf(m, "%14lu", lt->nr);
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
{
const struct lockdep_subclass_key *ckey;
struct lock_class_stats *stats;
struct lock_class *class;
const char *cname;
int i, namelen;
char name[39];
class = data->class;
stats = &data->stats;
namelen = 38;
if (class->name_version > 1)
namelen -= 2; /* XXX truncates versions > 9 */
if (class->subclass)
namelen -= 2;
rcu_read_lock_sched();
cname = rcu_dereference_sched(class->name);
ckey = rcu_dereference_sched(class->key);
if (!cname && !ckey) {
rcu_read_unlock_sched();
return;
} else if (!cname) {
char str[KSYM_NAME_LEN];
const char *key_name;
key_name = __get_key_name(ckey, str);
snprintf(name, namelen, "%s", key_name);
} else {
snprintf(name, namelen, "%s", cname);
}
rcu_read_unlock_sched();
namelen = strlen(name);
if (class->name_version > 1) {
snprintf(name+namelen, 3, "#%d", class->name_version);
namelen += 2;
}
if (class->subclass) {
snprintf(name+namelen, 3, "/%d", class->subclass);
namelen += 2;
}
if (stats->write_holdtime.nr) {
if (stats->read_holdtime.nr)
seq_printf(m, "%38s-W:", name);
else
seq_printf(m, "%40s:", name);
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
seq_lock_time(m, &stats->write_waittime);
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
seq_lock_time(m, &stats->write_holdtime);
seq_puts(m, "\n");
}
if (stats->read_holdtime.nr) {
seq_printf(m, "%38s-R:", name);
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
seq_lock_time(m, &stats->read_waittime);
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
seq_lock_time(m, &stats->read_holdtime);
seq_puts(m, "\n");
}
if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
return;
if (stats->read_holdtime.nr)
namelen += 2;
for (i = 0; i < LOCKSTAT_POINTS; i++) {
char ip[32];
if (class->contention_point[i] == 0)
break;
if (!i)
seq_line(m, '-', 40-namelen, namelen);
snprintf(ip, sizeof(ip), "[<%p>]",
(void *)class->contention_point[i]);
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contention_point[i],
ip, (void *)class->contention_point[i]);
}
for (i = 0; i < LOCKSTAT_POINTS; i++) {
char ip[32];
if (class->contending_point[i] == 0)
break;
if (!i)
seq_line(m, '-', 40-namelen, namelen);
snprintf(ip, sizeof(ip), "[<%p>]",
(void *)class->contending_point[i]);
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contending_point[i],
ip, (void *)class->contending_point[i]);
}
if (i) {
seq_puts(m, "\n");
seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
seq_puts(m, "\n");
}
}
static void seq_header(struct seq_file *m)
{
seq_puts(m, "lock_stat version 0.4\n");
if (unlikely(!debug_locks))
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n",
"class name",
"con-bounces",
"contentions",
"waittime-min",
"waittime-max",
"waittime-total",
"waittime-avg",
"acq-bounces",
"acquisitions",
"holdtime-min",
"holdtime-max",
"holdtime-total",
"holdtime-avg");
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
seq_printf(m, "\n");
}
static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
struct lock_stat_data *iter;
if (*pos == 0)
return SEQ_START_TOKEN;
iter = data->stats + (*pos - 1);
if (iter >= data->iter_end)
iter = NULL;
return iter;
}
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return ls_start(m, pos);
}
static void ls_stop(struct seq_file *m, void *v)
{
}
static int ls_show(struct seq_file *m, void *v)
{
if (v == SEQ_START_TOKEN)
seq_header(m);
else
seq_stats(m, v);
return 0;
}
static const struct seq_operations lockstat_ops = {
.start = ls_start,
.next = ls_next,
.stop = ls_stop,
.show = ls_show,
};
static int lock_stat_open(struct inode *inode, struct file *file)
{
int res;
struct lock_class *class;
struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
if (!data)
return -ENOMEM;
res = seq_open(file, &lockstat_ops);
if (!res) {
struct lock_stat_data *iter = data->stats;
struct seq_file *m = file->private_data;
unsigned long idx;
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
iter->class = class;
iter->stats = lock_stats(class);
iter++;
}
data->iter_end = iter;
sort(data->stats, data->iter_end - data->stats,
sizeof(struct lock_stat_data),
lock_stat_cmp, NULL);
m->private = data;
} else
vfree(data);
return res;
}
static ssize_t lock_stat_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct lock_class *class;
unsigned long idx;
char c;
if (count) {
if (get_user(c, buf))
return -EFAULT;
if (c != '0')
return count;
iterate_lock_classes(idx, class) {
if (!test_bit(idx, lock_classes_in_use))
continue;
clear_lock_stats(class);
}
}
return count;
}
static int lock_stat_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
vfree(seq->private);
return seq_release(inode, file);
}
static const struct proc_ops lock_stat_proc_ops = {
.proc_open = lock_stat_open,
.proc_write = lock_stat_write,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = lock_stat_release,
};
#endif /* CONFIG_LOCK_STAT */
static int __init lockdep_proc_init(void)
{
proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
#ifdef CONFIG_PROVE_LOCKING
proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
#endif
proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
#ifdef CONFIG_LOCK_STAT
proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
#endif
return 0;
}
__initcall(lockdep_proc_init);
| linux-master | kernel/locking/lockdep_proc.c |
/*
* Debugging code for mutexes
*
* Started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
*
* lock debugging, locking tree, deadlock detection started by:
*
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Released under the General Public License (GPL).
*/
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include "mutex.h"
/*
* Must be called with lock->wait_lock held.
*/
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
{
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
waiter->magic = waiter;
INIT_LIST_HEAD(&waiter->list);
waiter->ww_ctx = MUTEX_POISON_WW_CTX;
}
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
}
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
{
DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;
}
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
void debug_mutex_unlock(struct mutex *lock)
{
if (likely(debug_locks)) {
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
}
}
void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
lock->magic = lock;
}
/***
* mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
void mutex_destroy(struct mutex *lock)
{
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
lock->magic = NULL;
}
EXPORT_SYMBOL_GPL(mutex_destroy);
| linux-master | kernel/locking/mutex-debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PREEMPT_RT substitution for spin/rw_locks
*
* spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
* resemble the non RT semantics:
*
* - Contrary to plain rtmutexes, spinlocks and rwlocks are state
* preserving. The task state is saved before blocking on the underlying
* rtmutex, and restored when the lock has been acquired. Regular wakeups
* during that time are redirected to the saved state so no wake up is
* missed.
*
* - Non RT spin/rwlocks disable preemption and eventually interrupts.
* Disabling preemption has the side effect of disabling migration and
* preventing RCU grace periods.
*
* The RT substitutions explicitly disable migration and take
* rcu_read_lock() across the lock held section.
*/
#include <linux/spinlock.h>
#include <linux/export.h>
#define RT_MUTEX_BUILD_SPINLOCKS
#include "rtmutex.c"
/*
* __might_resched() skips the state check as rtlocks are state
* preserving. Take RCU nesting into account as spin/read/write_lock() can
* legitimately nest into an RCU read side critical section.
*/
#define RTLOCK_RESCHED_OFFSETS \
(rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
#define rtlock_might_resched() \
__might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
{
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
rtlock_slowlock(rtm);
}
static __always_inline void __rt_spin_lock(spinlock_t *lock)
{
rtlock_might_resched();
rtlock_lock(&lock->lock);
rcu_read_lock();
migrate_disable();
}
void __sched rt_spin_lock(spinlock_t *lock)
{
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_nested);
void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
struct lockdep_map *nest_lock)
{
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_nest_lock);
#endif
void __sched rt_spin_unlock(spinlock_t *lock)
{
spin_release(&lock->dep_map, _RET_IP_);
migrate_enable();
rcu_read_unlock();
if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
rt_mutex_slowunlock(&lock->lock);
}
EXPORT_SYMBOL(rt_spin_unlock);
/*
* Wait for the lock to get unlocked: instead of polling for an unlock
* (like raw spinlocks do), lock and unlock, to force the kernel to
* schedule if there's contention:
*/
void __sched rt_spin_lock_unlock(spinlock_t *lock)
{
spin_lock(lock);
spin_unlock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_unlock);
static __always_inline int __rt_spin_trylock(spinlock_t *lock)
{
int ret = 1;
if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
ret = rt_mutex_slowtrylock(&lock->lock);
if (ret) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
rcu_read_lock();
migrate_disable();
}
return ret;
}
int __sched rt_spin_trylock(spinlock_t *lock)
{
return __rt_spin_trylock(lock);
}
EXPORT_SYMBOL(rt_spin_trylock);
int __sched rt_spin_trylock_bh(spinlock_t *lock)
{
int ret;
local_bh_disable();
ret = __rt_spin_trylock(lock);
if (!ret)
local_bh_enable();
return ret;
}
EXPORT_SYMBOL(rt_spin_trylock_bh);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key, bool percpu)
{
u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
LD_WAIT_INV, type);
}
EXPORT_SYMBOL(__rt_spin_lock_init);
#endif
/*
* RT-specific reader/writer locks
*/
#define rwbase_set_and_save_current_state(state) \
current_save_and_set_rtlock_wait_state()
#define rwbase_restore_current_state() \
current_restore_rtlock_saved_state()
static __always_inline int
rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
{
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
rtlock_slowlock(rtm);
return 0;
}
static __always_inline int
rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
{
rtlock_slowlock_locked(rtm);
return 0;
}
static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
{
if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
return;
rt_mutex_slowunlock(rtm);
}
static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
{
if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
return 1;
return rt_mutex_slowtrylock(rtm);
}
#define rwbase_signal_pending_state(state, current) (0)
#define rwbase_schedule() \
schedule_rtlock()
#include "rwbase_rt.c"
/*
* The common functions which get wrapped into the rwlock API.
*/
int __sched rt_read_trylock(rwlock_t *rwlock)
{
int ret;
ret = rwbase_read_trylock(&rwlock->rwbase);
if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
rcu_read_lock();
migrate_disable();
}
return ret;
}
EXPORT_SYMBOL(rt_read_trylock);
int __sched rt_write_trylock(rwlock_t *rwlock)
{
int ret;
ret = rwbase_write_trylock(&rwlock->rwbase);
if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
rcu_read_lock();
migrate_disable();
}
return ret;
}
EXPORT_SYMBOL(rt_write_trylock);
void __sched rt_read_lock(rwlock_t *rwlock)
{
rtlock_might_resched();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
migrate_disable();
}
EXPORT_SYMBOL(rt_read_lock);
void __sched rt_write_lock(rwlock_t *rwlock)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
migrate_disable();
}
EXPORT_SYMBOL(rt_write_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
migrate_disable();
}
EXPORT_SYMBOL(rt_write_lock_nested);
#endif
void __sched rt_read_unlock(rwlock_t *rwlock)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
migrate_enable();
rcu_read_unlock();
rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
}
EXPORT_SYMBOL(rt_read_unlock);
void __sched rt_write_unlock(rwlock_t *rwlock)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
rcu_read_unlock();
migrate_enable();
rwbase_write_unlock(&rwlock->rwbase);
}
EXPORT_SYMBOL(rt_write_unlock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
}
EXPORT_SYMBOL(__rt_rwlock_init);
#endif
| linux-master | kernel/locking/spinlock_rt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Queued read/write locks
*
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <[email protected]>
*/
#include <linux/smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <trace/events/lock.h>
/**
* queued_read_lock_slowpath - acquire read lock of a queued rwlock
* @lock: Pointer to queued rwlock structure
*/
void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock)
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if (unlikely(in_interrupt())) {
/*
* Readers in interrupt context will get the lock immediately
* if the writer is just waiting (not holding the lock yet),
* so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue.
*/
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
return;
}
atomic_sub(_QR_BIAS, &lock->cnts);
trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
/*
* Put the reader into the wait queue
*/
arch_spin_lock(&lock->wait_lock);
atomic_add(_QR_BIAS, &lock->cnts);
/*
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
/*
* Signal the next one in queue to become queue head
*/
arch_spin_unlock(&lock->wait_lock);
trace_contention_end(lock, 0);
}
EXPORT_SYMBOL(queued_read_lock_slowpath);
/**
* queued_write_lock_slowpath - acquire write lock of a queued rwlock
* @lock : Pointer to queued rwlock structure
*/
void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
{
int cnts;
trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */
if (!(cnts = atomic_read(&lock->cnts)) &&
atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
goto unlock;
/* Set the waiting flag to notify readers that a writer is pending */
atomic_or(_QW_WAITING, &lock->cnts);
/* When no more readers or writers, set the locked flag */
do {
cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
unlock:
arch_spin_unlock(&lock->wait_lock);
trace_contention_end(lock, 0);
}
EXPORT_SYMBOL(queued_write_lock_slowpath);
| linux-master | kernel/locking/qrwlock.c |
// SPDX-License-Identifier: GPL-2.0
/* kernel/rwsem.c: R/W semaphores, public implementation
*
* Written by David Howells ([email protected]).
* Derived from asm-i386/semaphore.h
*
* Writer lock-stealing by Alex Shi <[email protected]>
* and Michel Lespinasse <[email protected]>
*
* Optimistic spinning by Tim Chen <[email protected]>
* and Davidlohr Bueso <[email protected]>. Based on mutexes.
*
* Rwsem count bit fields re-definition and rwsem rearchitecture by
* Waiman Long <[email protected]> and
* Peter Zijlstra <[email protected]>.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <linux/sched/debug.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/signal.h>
#include <linux/sched/clock.h>
#include <linux/export.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
#include <trace/events/lock.h>
#ifndef CONFIG_PREEMPT_RT
#include "lock_events.h"
/*
* The least significant 2 bits of the owner value has the following
* meanings when set.
* - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
* - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
*
* When the rwsem is reader-owned and a spinning writer has timed out,
* the nonspinnable bit will be set to disable optimistic spinning.
* When a writer acquires a rwsem, it puts its task_struct pointer
* into the owner field. It is cleared after an unlock.
*
* When a reader acquires a rwsem, it will also puts its task_struct
* pointer into the owner field with the RWSEM_READER_OWNED bit set.
* On unlock, the owner field will largely be left untouched. So
* for a free or reader-owned rwsem, the owner value may contain
* information about the last reader that acquires the rwsem.
*
* That information may be helpful in debugging cases where the system
* seems to hang on a reader owned rwsem especially if only one reader
* is involved. Ideally we would like to track all the readers that own
* a rwsem, but the overhead is simply too big.
*
* A fast path reader optimistic lock stealing is supported when the rwsem
* is previously owned by a writer and the following conditions are met:
* - rwsem is not currently writer owned
* - the handoff isn't set.
*/
#define RWSEM_READER_OWNED (1UL << 0)
#define RWSEM_NONSPINNABLE (1UL << 1)
#define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
#ifdef CONFIG_DEBUG_RWSEMS
# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
if (!debug_locks_silent && \
WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
#c, atomic_long_read(&(sem)->count), \
(unsigned long) sem->magic, \
atomic_long_read(&(sem)->owner), (long)current, \
list_empty(&(sem)->wait_list) ? "" : "not ")) \
debug_locks_off(); \
} while (0)
#else
# define DEBUG_RWSEMS_WARN_ON(c, sem)
#endif
/*
* On 64-bit architectures, the bit definitions of the count are:
*
* Bit 0 - writer locked bit
* Bit 1 - waiters present bit
* Bit 2 - lock handoff bit
* Bits 3-7 - reserved
* Bits 8-62 - 55-bit reader count
* Bit 63 - read fail bit
*
* On 32-bit architectures, the bit definitions of the count are:
*
* Bit 0 - writer locked bit
* Bit 1 - waiters present bit
* Bit 2 - lock handoff bit
* Bits 3-7 - reserved
* Bits 8-30 - 23-bit reader count
* Bit 31 - read fail bit
*
* It is not likely that the most significant bit (read fail bit) will ever
* be set. This guard bit is still checked anyway in the down_read() fastpath
* just in case we need to use up more of the reader bits for other purpose
* in the future.
*
* atomic_long_fetch_add() is used to obtain reader lock, whereas
* atomic_long_cmpxchg() will be used to obtain writer lock.
*
* There are three places where the lock handoff bit may be set or cleared.
* 1) rwsem_mark_wake() for readers -- set, clear
* 2) rwsem_try_write_lock() for writers -- set, clear
* 3) rwsem_del_waiter() -- clear
*
* For all the above cases, wait_lock will be held. A writer must also
* be the first one in the wait_list to be eligible for setting the handoff
* bit. So concurrent setting/clearing of handoff bit is not possible.
*/
#define RWSEM_WRITER_LOCKED (1UL << 0)
#define RWSEM_FLAG_WAITERS (1UL << 1)
#define RWSEM_FLAG_HANDOFF (1UL << 2)
#define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
#define RWSEM_READER_SHIFT 8
#define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
#define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
#define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
#define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
#define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
/*
* All writes to owner are protected by WRITE_ONCE() to make sure that
* store tearing can't happen as optimistic spinners may read and use
* the owner value concurrently without lock. Read from owner, however,
* may not need READ_ONCE() as long as the pointer value is only used
* for comparison and isn't being dereferenced.
*
* Both rwsem_{set,clear}_owner() functions should be in the same
* preempt disable section as the atomic op that changes sem->count.
*/
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
lockdep_assert_preemption_disabled();
atomic_long_set(&sem->owner, (long)current);
}
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
lockdep_assert_preemption_disabled();
atomic_long_set(&sem->owner, 0);
}
/*
* Test the flags in the owner field.
*/
static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
{
return atomic_long_read(&sem->owner) & flags;
}
/*
* The task_struct pointer of the last owning reader will be left in
* the owner field.
*
* Note that the owner value just indicates the task has owned the rwsem
* previously, it may not be the real owner or one of the real owners
* anymore when that field is examined, so take it with a grain of salt.
*
* The reader non-spinnable bit is preserved.
*/
static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
struct task_struct *owner)
{
unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
(atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
atomic_long_set(&sem->owner, val);
}
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
__rwsem_set_reader_owned(sem, current);
}
/*
* Return true if the rwsem is owned by a reader.
*/
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
{
#ifdef CONFIG_DEBUG_RWSEMS
/*
* Check the count to see if it is write-locked.
*/
long count = atomic_long_read(&sem->count);
if (count & RWSEM_WRITER_MASK)
return false;
#endif
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
}
#ifdef CONFIG_DEBUG_RWSEMS
/*
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
* is a task pointer in owner of a reader-owned rwsem, it will be the
* real owner or one of the real owners. The only exception is when the
* unlock is done by up_read_non_owner().
*/
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{
unsigned long val = atomic_long_read(&sem->owner);
while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
if (atomic_long_try_cmpxchg(&sem->owner, &val,
val & RWSEM_OWNER_FLAGS_MASK))
return;
}
}
#else
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{
}
#endif
/*
* Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
* remains set. Otherwise, the operation will be aborted.
*/
static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
{
unsigned long owner = atomic_long_read(&sem->owner);
do {
if (!(owner & RWSEM_READER_OWNED))
break;
if (owner & RWSEM_NONSPINNABLE)
break;
} while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
owner | RWSEM_NONSPINNABLE));
}
static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
{
*cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(*cntp < 0))
rwsem_set_nonspinnable(sem);
if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
rwsem_set_reader_owned(sem);
return true;
}
return false;
}
static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
{
long tmp = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
return true;
}
return false;
}
/*
* Return just the real task structure pointer of the owner
*/
static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
{
return (struct task_struct *)
(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
}
/*
* Return the real task structure pointer of the owner and the embedded
* flags in the owner. pflags must be non-NULL.
*/
static inline struct task_struct *
rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
{
unsigned long owner = atomic_long_read(&sem->owner);
*pflags = owner & RWSEM_OWNER_FLAGS_MASK;
return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
}
/*
* Guide to the rw_semaphore's count field.
*
* When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
* by a writer.
*
* The lock is owned by readers when
* (1) the RWSEM_WRITER_LOCKED isn't set in count,
* (2) some of the reader bits are set in count, and
* (3) the owner field has RWSEM_READ_OWNED bit set.
*
* Having some reader bits set is not enough to guarantee a readers owned
* lock as the readers may be in the process of backing out from the count
* and a writer has just released the lock. So another writer may steal
* the lock immediately after that.
*/
/*
* Initialize an rwsem:
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
#ifdef CONFIG_DEBUG_RWSEMS
sem->magic = sem;
#endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
atomic_long_set(&sem->owner, 0L);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq);
#endif
}
EXPORT_SYMBOL(__init_rwsem);
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
unsigned long timeout;
bool handoff_set;
};
#define rwsem_first_waiter(sem) \
list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
enum rwsem_wake_type {
RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
RWSEM_WAKE_READERS, /* Wake readers only */
RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
};
/*
* The typical HZ value is either 250 or 1000. So set the minimum waiting
* time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
* queue before initiating the handoff protocol.
*/
#define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
/*
* Magic number to batch-wakeup waiting readers, even when writers are
* also present in the queue. This both limits the amount of work the
* waking thread must do and also prevents any potential counter overflow,
* however unlikely.
*/
#define MAX_READERS_WAKEUP 0x100
static inline void
rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
{
lockdep_assert_held(&sem->wait_lock);
list_add_tail(&waiter->list, &sem->wait_list);
/* caller will set RWSEM_FLAG_WAITERS */
}
/*
* Remove a waiter from the wait_list and clear flags.
*
* Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
* this function. Modify with care.
*
* Return: true if wait_list isn't empty and false otherwise
*/
static inline bool
rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
{
lockdep_assert_held(&sem->wait_lock);
list_del(&waiter->list);
if (likely(!list_empty(&sem->wait_list)))
return true;
atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
return false;
}
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
* have been set.
* - there must be someone on the queue
* - the wait_lock must be held by the caller
* - tasks are marked for wakeup, the caller must later invoke wake_up_q()
* to actually wakeup the blocked task(s) and drop the reference count,
* preferably when the wait_lock is released
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only marked woken if downgrading is false
*
* Implies rwsem_del_waiter() for all woken readers.
*/
static void rwsem_mark_wake(struct rw_semaphore *sem,
enum rwsem_wake_type wake_type,
struct wake_q_head *wake_q)
{
struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0;
struct list_head wlist;
lockdep_assert_held(&sem->wait_lock);
/*
* Take a peek at the queue head waiter such that we can determine
* the wakeup(s) to perform.
*/
waiter = rwsem_first_waiter(sem);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wake_type == RWSEM_WAKE_ANY) {
/*
* Mark writer at the front of the queue for wakeup.
* Until the task is actually later awoken later by
* the caller, other writers are able to steal it.
* Readers, on the other hand, will block as they
* will notice the queued writer.
*/
wake_q_add(wake_q, waiter->task);
lockevent_inc(rwsem_wake_writer);
}
return;
}
/*
* No reader wakeup if there are too many of them already.
*/
if (unlikely(atomic_long_read(&sem->count) < 0))
return;
/*
* Writers might steal the lock before we grant it to the next reader.
* We prefer to do the first reader grant before counting readers
* so we can bail out early if a writer stole the lock.
*/
if (wake_type != RWSEM_WAKE_READ_OWNED) {
struct task_struct *owner;
adjustment = RWSEM_READER_BIAS;
oldcount = atomic_long_fetch_add(adjustment, &sem->count);
if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
/*
* When we've been waiting "too" long (for writers
* to give up the lock), request a HANDOFF to
* force the issue.
*/
if (time_after(jiffies, waiter->timeout)) {
if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
adjustment -= RWSEM_FLAG_HANDOFF;
lockevent_inc(rwsem_rlock_handoff);
}
waiter->handoff_set = true;
}
atomic_long_add(-adjustment, &sem->count);
return;
}
/*
* Set it to reader-owned to give spinners an early
* indication that readers now have the lock.
* The reader nonspinnable bit seen at slowpath entry of
* the reader is copied over.
*/
owner = waiter->task;
__rwsem_set_reader_owned(sem, owner);
}
/*
* Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
* queue. We know that the woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
*
* This is an adaptation of the phase-fair R/W locks where at the
* reader phase (first waiter is a reader), all readers are eligible
* to acquire the lock at the same time irrespective of their order
* in the queue. The writers acquire the lock according to their
* order in the queue.
*
* We have to do wakeup in 2 passes to prevent the possibility that
* the reader count may be decremented before it is incremented. It
* is because the to-be-woken waiter may not have slept yet. So it
* may see waiter->task got cleared, finish its critical section and
* do an unlock before the reader count increment.
*
* 1) Collect the read-waiters in a separate list, count them and
* fully increment the reader count in rwsem.
* 2) For each waiters in the new list, clear waiter->task and
* put them into wake_q to be woken up later.
*/
INIT_LIST_HEAD(&wlist);
list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
continue;
woken++;
list_move_tail(&waiter->list, &wlist);
/*
* Limit # of readers that can be woken up per wakeup call.
*/
if (unlikely(woken >= MAX_READERS_WAKEUP))
break;
}
adjustment = woken * RWSEM_READER_BIAS - adjustment;
lockevent_cond_inc(rwsem_wake_reader, woken);
oldcount = atomic_long_read(&sem->count);
if (list_empty(&sem->wait_list)) {
/*
* Combined with list_move_tail() above, this implies
* rwsem_del_waiter().
*/
adjustment -= RWSEM_FLAG_WAITERS;
if (oldcount & RWSEM_FLAG_HANDOFF)
adjustment -= RWSEM_FLAG_HANDOFF;
} else if (woken) {
/*
* When we've woken a reader, we no longer need to force
* writers to give up the lock and we can clear HANDOFF.
*/
if (oldcount & RWSEM_FLAG_HANDOFF)
adjustment -= RWSEM_FLAG_HANDOFF;
}
if (adjustment)
atomic_long_add(adjustment, &sem->count);
/* 2nd pass */
list_for_each_entry_safe(waiter, tmp, &wlist, list) {
struct task_struct *tsk;
tsk = waiter->task;
get_task_struct(tsk);
/*
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_slowpath() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
smp_store_release(&waiter->task, NULL);
/*
* Ensure issuing the wakeup (either by us or someone else)
* after setting the reader waiter to nil.
*/
wake_q_add_safe(wake_q, tsk);
}
}
/*
* Remove a waiter and try to wake up other waiters in the wait queue
* This function is called from the out_nolock path of both the reader and
* writer slowpaths with wait_lock held. It releases the wait_lock and
* optionally wake up waiters before it returns.
*/
static inline void
rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
struct wake_q_head *wake_q)
__releases(&sem->wait_lock)
{
bool first = rwsem_first_waiter(sem) == waiter;
wake_q_init(wake_q);
/*
* If the wait_list isn't empty and the waiter to be deleted is
* the first waiter, we wake up the remaining waiters as they may
* be eligible to acquire or spin on the lock.
*/
if (rwsem_del_waiter(sem, waiter) && first)
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
if (!wake_q_empty(wake_q))
wake_up_q(wake_q);
}
/*
* This function must be called with the sem->wait_lock held to prevent
* race conditions between checking the rwsem wait list and setting the
* sem->count accordingly.
*
* Implies rwsem_del_waiter() on success.
*/
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
struct rwsem_waiter *waiter)
{
struct rwsem_waiter *first = rwsem_first_waiter(sem);
long count, new;
lockdep_assert_held(&sem->wait_lock);
count = atomic_long_read(&sem->count);
do {
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
if (has_handoff) {
/*
* Honor handoff bit and yield only when the first
* waiter is the one that set it. Otherwisee, we
* still try to acquire the rwsem.
*/
if (first->handoff_set && (waiter != first))
return false;
}
new = count;
if (count & RWSEM_LOCK_MASK) {
/*
* A waiter (first or not) can set the handoff bit
* if it is an RT task or wait in the wait queue
* for too long.
*/
if (has_handoff || (!rt_task(waiter->task) &&
!time_after(jiffies, waiter->timeout)))
return false;
new |= RWSEM_FLAG_HANDOFF;
} else {
new |= RWSEM_WRITER_LOCKED;
new &= ~RWSEM_FLAG_HANDOFF;
if (list_is_singular(&sem->wait_list))
new &= ~RWSEM_FLAG_WAITERS;
}
} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
/*
* We have either acquired the lock with handoff bit cleared or set
* the handoff bit. Only the first waiter can have its handoff_set
* set here to enable optimistic spinning in slowpath loop.
*/
if (new & RWSEM_FLAG_HANDOFF) {
first->handoff_set = true;
lockevent_inc(rwsem_wlock_handoff);
return false;
}
/*
* Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
* success.
*/
list_del(&waiter->list);
rwsem_set_owner(sem);
return true;
}
/*
* The rwsem_spin_on_owner() function returns the following 4 values
* depending on the lock owner state.
* OWNER_NULL : owner is currently NULL
* OWNER_WRITER: when owner changes and is a writer
* OWNER_READER: when owner changes and the new owner may be a reader.
* OWNER_NONSPINNABLE:
* when optimistic spinning has to stop because either the
* owner stops running, is unknown, or its timeslice has
* been used up.
*/
enum owner_state {
OWNER_NULL = 1 << 0,
OWNER_WRITER = 1 << 1,
OWNER_READER = 1 << 2,
OWNER_NONSPINNABLE = 1 << 3,
};
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* Try to acquire write lock before the writer has been put on wait queue.
*/
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
long count = atomic_long_read(&sem->count);
while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
count | RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
lockevent_inc(rwsem_opt_lock);
return true;
}
}
return false;
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
unsigned long flags;
bool ret = true;
if (need_resched()) {
lockevent_inc(rwsem_opt_fail);
return false;
}
/*
* Disable preemption is equal to the RCU read-side crital section,
* thus the task_strcut structure won't go away.
*/
owner = rwsem_owner_flags(sem, &flags);
/*
* Don't check the read-owner as the entry may be stale.
*/
if ((flags & RWSEM_NONSPINNABLE) ||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false;
lockevent_cond_inc(rwsem_opt_fail, !ret);
return ret;
}
#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
static inline enum owner_state
rwsem_owner_state(struct task_struct *owner, unsigned long flags)
{
if (flags & RWSEM_NONSPINNABLE)
return OWNER_NONSPINNABLE;
if (flags & RWSEM_READER_OWNED)
return OWNER_READER;
return owner ? OWNER_WRITER : OWNER_NULL;
}
static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *new, *owner;
unsigned long flags, new_flags;
enum owner_state state;
lockdep_assert_preemption_disabled();
owner = rwsem_owner_flags(sem, &flags);
state = rwsem_owner_state(owner, flags);
if (state != OWNER_WRITER)
return state;
for (;;) {
/*
* When a waiting writer set the handoff flag, it may spin
* on the owner as well. Once that writer acquires the lock,
* we can spin on it. So we don't need to quit even when the
* handoff bit is set.
*/
new = rwsem_owner_flags(sem, &new_flags);
if ((new != owner) || (new_flags != flags)) {
state = rwsem_owner_state(new, new_flags);
break;
}
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking sem->owner still matches owner, if that fails,
* owner might point to free()d memory, if it still matches,
* our spinning context already disabled preemption which is
* equal to RCU read-side crital section ensures the memory
* stays valid.
*/
barrier();
if (need_resched() || !owner_on_cpu(owner)) {
state = OWNER_NONSPINNABLE;
break;
}
cpu_relax();
}
return state;
}
/*
* Calculate reader-owned rwsem spinning threshold for writer
*
* The more readers own the rwsem, the longer it will take for them to
* wind down and free the rwsem. So the empirical formula used to
* determine the actual spinning time limit here is:
*
* Spinning threshold = (10 + nr_readers/2)us
*
* The limit is capped to a maximum of 25us (30 readers). This is just
* a heuristic and is subjected to change in the future.
*/
static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
{
long count = atomic_long_read(&sem->count);
int readers = count >> RWSEM_READER_SHIFT;
u64 delta;
if (readers > 30)
readers = 30;
delta = (20 + readers) * NSEC_PER_USEC / 2;
return sched_clock() + delta;
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
bool taken = false;
int prev_owner_state = OWNER_NULL;
int loop = 0;
u64 rspin_threshold = 0;
/* sem->wait_lock should not be held when doing optimistic spinning */
if (!osq_lock(&sem->osq))
goto done;
/*
* Optimistically spin on the owner field and attempt to acquire the
* lock whenever the owner changes. Spinning will be stopped when:
* 1) the owning writer isn't running; or
* 2) readers own the lock and spinning time has exceeded limit.
*/
for (;;) {
enum owner_state owner_state;
owner_state = rwsem_spin_on_owner(sem);
if (!(owner_state & OWNER_SPINNABLE))
break;
/*
* Try to acquire the lock
*/
taken = rwsem_try_write_lock_unqueued(sem);
if (taken)
break;
/*
* Time-based reader-owned rwsem optimistic spinning
*/
if (owner_state == OWNER_READER) {
/*
* Re-initialize rspin_threshold every time when
* the owner state changes from non-reader to reader.
* This allows a writer to steal the lock in between
* 2 reader phases and have the threshold reset at
* the beginning of the 2nd reader phase.
*/
if (prev_owner_state != OWNER_READER) {
if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
break;
rspin_threshold = rwsem_rspin_threshold(sem);
loop = 0;
}
/*
* Check time threshold once every 16 iterations to
* avoid calling sched_clock() too frequently so
* as to reduce the average latency between the times
* when the lock becomes free and when the spinner
* is ready to do a trylock.
*/
else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
rwsem_set_nonspinnable(sem);
lockevent_inc(rwsem_opt_nospin);
break;
}
}
/*
* An RT task cannot do optimistic spinning if it cannot
* be sure the lock holder is running or live-lock may
* happen if the current task and the lock holder happen
* to run in the same CPU. However, aborting optimistic
* spinning while a NULL owner is detected may miss some
* opportunity where spinning can continue without causing
* problem.
*
* There are 2 possible cases where an RT task may be able
* to continue spinning.
*
* 1) The lock owner is in the process of releasing the
* lock, sem->owner is cleared but the lock has not
* been released yet.
* 2) The lock was free and owner cleared, but another
* task just comes in and acquire the lock before
* we try to get it. The new owner may be a spinnable
* writer.
*
* To take advantage of two scenarios listed above, the RT
* task is made to retry one more time to see if it can
* acquire the lock or continue spinning on the new owning
* writer. Of course, if the time lag is long enough or the
* new owner is not a writer or spinnable, the RT task will
* quit spinning.
*
* If the owner is a writer, the need_resched() check is
* done inside rwsem_spin_on_owner(). If the owner is not
* a writer, need_resched() check needs to be done here.
*/
if (owner_state != OWNER_WRITER) {
if (need_resched())
break;
if (rt_task(current) &&
(prev_owner_state != OWNER_WRITER))
break;
}
prev_owner_state = owner_state;
/*
* The cpu_relax() call is a compiler barrier which forces
* everything in this loop to be re-loaded. We don't need
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
cpu_relax();
}
osq_unlock(&sem->osq);
done:
lockevent_cond_inc(rwsem_opt_fail, !taken);
return taken;
}
/*
* Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
* only be called when the reader count reaches 0.
*/
static inline void clear_nonspinnable(struct rw_semaphore *sem)
{
if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)))
atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
}
#else
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
return false;
}
static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
return false;
}
static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
static inline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore *sem)
{
return OWNER_NONSPINNABLE;
}
#endif
/*
* Prepare to wake up waiter(s) in the wait queue by putting them into the
* given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
* reader-owned, wake up read lock waiters in queue front or wake up any
* front waiter otherwise.
* This is being called from both reader and writer slow paths.
*/
static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count,
struct wake_q_head *wake_q)
{
enum rwsem_wake_type wake_type;
if (count & RWSEM_WRITER_MASK)
return;
if (count & RWSEM_READER_MASK) {
wake_type = RWSEM_WAKE_READERS;
} else {
wake_type = RWSEM_WAKE_ANY;
clear_nonspinnable(sem);
}
rwsem_mark_wake(sem, wake_type, wake_q);
}
/*
* Wait for the read lock to be granted
*/
static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
{
long adjustment = -RWSEM_READER_BIAS;
long rcnt = (count >> RWSEM_READER_SHIFT);
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
/*
* To prevent a constant stream of readers from starving a sleeping
* waiter, don't attempt optimistic lock stealing if the lock is
* currently owned by readers.
*/
if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
(rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
goto queue;
/*
* Reader optimistic lock stealing.
*/
if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_steal);
/*
* Wake up other readers in the wait queue if it is
* the first reader.
*/
if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
&wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
}
return sem;
}
queue:
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list)) {
/*
* In case the wait queue is empty and the lock isn't owned
* by a writer, this reader can exit the slowpath and return
* immediately as its RWSEM_READER_BIAS has already been set
* in the count.
*/
if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
/* Provide lock ACQUIRE */
smp_acquire__after_ctrl_dep();
raw_spin_unlock_irq(&sem->wait_lock);
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_fast);
return sem;
}
adjustment += RWSEM_FLAG_WAITERS;
}
rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
rwsem_cond_wake_waiter(sem, count, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
if (!wake_q_empty(&wake_q))
wake_up_q(&wake_q);
trace_contention_begin(sem, LCB_F_READ);
/* wait to be given the lock */
for (;;) {
set_current_state(state);
if (!smp_load_acquire(&waiter.task)) {
/* Matches rwsem_mark_wake()'s smp_store_release(). */
break;
}
if (signal_pending_state(state, current)) {
raw_spin_lock_irq(&sem->wait_lock);
if (waiter.task)
goto out_nolock;
raw_spin_unlock_irq(&sem->wait_lock);
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
break;
}
schedule_preempt_disabled();
lockevent_inc(rwsem_sleep_reader);
}
__set_current_state(TASK_RUNNING);
lockevent_inc(rwsem_rlock);
trace_contention_end(sem, 0);
return sem;
out_nolock:
rwsem_del_wake_waiter(sem, &waiter, &wake_q);
__set_current_state(TASK_RUNNING);
lockevent_inc(rwsem_rlock_fail);
trace_contention_end(sem, -EINTR);
return ERR_PTR(-EINTR);
}
/*
* Wait until we successfully acquire the write lock
*/
static struct rw_semaphore __sched *
rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
/* do optimistic spinning and steal lock if possible */
if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
/* rwsem_optimistic_spin() implies ACQUIRE on success */
return sem;
}
/*
* Optimistic spinning failed, proceed to the slowpath
* and block until we can acquire the sem.
*/
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_WRITE;
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock);
rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock */
if (rwsem_first_waiter(sem) != &waiter) {
rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count),
&wake_q);
if (!wake_q_empty(&wake_q)) {
/*
* We want to minimize wait_lock hold time especially
* when a large number of readers are to be woken up.
*/
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
raw_spin_lock_irq(&sem->wait_lock);
}
} else {
atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
}
/* wait until we successfully acquire the lock */
set_current_state(state);
trace_contention_begin(sem, LCB_F_WRITE);
for (;;) {
if (rwsem_try_write_lock(sem, &waiter)) {
/* rwsem_try_write_lock() implies ACQUIRE on success */
break;
}
raw_spin_unlock_irq(&sem->wait_lock);
if (signal_pending_state(state, current))
goto out_nolock;
/*
* After setting the handoff bit and failing to acquire
* the lock, attempt to spin on owner to accelerate lock
* transfer. If the previous owner is a on-cpu writer and it
* has just released the lock, OWNER_NULL will be returned.
* In this case, we attempt to acquire the lock again
* without sleeping.
*/
if (waiter.handoff_set) {
enum owner_state owner_state;
owner_state = rwsem_spin_on_owner(sem);
if (owner_state == OWNER_NULL)
goto trylock_again;
}
schedule_preempt_disabled();
lockevent_inc(rwsem_sleep_writer);
set_current_state(state);
trylock_again:
raw_spin_lock_irq(&sem->wait_lock);
}
__set_current_state(TASK_RUNNING);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
trace_contention_end(sem, 0);
return sem;
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
rwsem_del_wake_waiter(sem, &waiter, &wake_q);
lockevent_inc(rwsem_wlock_fail);
trace_contention_end(sem, -EINTR);
return ERR_PTR(-EINTR);
}
/*
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q);
return sem;
}
/*
* downgrade a write lock into a read lock
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q);
return sem;
}
/*
* lock for reading
*/
static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
{
int ret = 0;
long count;
preempt_disable();
if (!rwsem_read_trylock(sem, &count)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
ret = -EINTR;
goto out;
}
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
}
out:
preempt_enable();
return ret;
}
static __always_inline void __down_read(struct rw_semaphore *sem)
{
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
}
static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
{
return __down_read_common(sem, TASK_INTERRUPTIBLE);
}
static __always_inline int __down_read_killable(struct rw_semaphore *sem)
{
return __down_read_common(sem, TASK_KILLABLE);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int ret = 0;
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
preempt_disable();
tmp = atomic_long_read(&sem->count);
while (!(tmp & RWSEM_READ_FAILED_MASK)) {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
ret = 1;
break;
}
}
preempt_enable();
return ret;
}
/*
* lock for writing
*/
static inline int __down_write_common(struct rw_semaphore *sem, int state)
{
int ret = 0;
preempt_disable();
if (unlikely(!rwsem_write_trylock(sem))) {
if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
ret = -EINTR;
}
preempt_enable();
return ret;
}
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
static inline int __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int ret;
preempt_disable();
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
ret = rwsem_write_trylock(sem);
preempt_enable();
return ret;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
preempt_disable();
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
RWSEM_FLAG_WAITERS)) {
clear_nonspinnable(sem);
rwsem_wake(sem);
}
preempt_enable();
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
/*
* sem->owner may differ from current if the ownership is transferred
* to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
*/
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
preempt_disable();
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))
rwsem_wake(sem);
preempt_enable();
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
/*
* When downgrading from exclusive to shared ownership,
* anything inside the write-locked region cannot leak
* into the read side. In contrast, anything in the
* read-locked region is ok to be re-ordered into the
* write side. As such, rely on RELEASE semantics.
*/
DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
preempt_disable();
tmp = atomic_long_fetch_add_release(
-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
rwsem_set_reader_owned(sem);
if (tmp & RWSEM_FLAG_WAITERS)
rwsem_downgrade_wake(sem);
preempt_enable();
}
#else /* !CONFIG_PREEMPT_RT */
#define RT_MUTEX_BUILD_MUTEX
#include "rtmutex.c"
#define rwbase_set_and_save_current_state(state) \
set_current_state(state)
#define rwbase_restore_current_state() \
__set_current_state(TASK_RUNNING)
#define rwbase_rtmutex_lock_state(rtm, state) \
__rt_mutex_lock(rtm, state)
#define rwbase_rtmutex_slowlock_locked(rtm, state) \
__rt_mutex_slowlock_locked(rtm, NULL, state)
#define rwbase_rtmutex_unlock(rtm) \
__rt_mutex_unlock(rtm)
#define rwbase_rtmutex_trylock(rtm) \
__rt_mutex_trylock(rtm)
#define rwbase_signal_pending_state(state, current) \
signal_pending_state(state, current)
#define rwbase_schedule() \
schedule()
#include "rwbase_rt.c"
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
init_rwbase_rt(&(sem)->rwbase);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
}
EXPORT_SYMBOL(__init_rwsem);
static inline void __down_read(struct rw_semaphore *sem)
{
rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
}
static inline int __down_read_interruptible(struct rw_semaphore *sem)
{
return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
}
static inline int __down_read_killable(struct rw_semaphore *sem)
{
return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
return rwbase_read_trylock(&sem->rwbase);
}
static inline void __up_read(struct rw_semaphore *sem)
{
rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
}
static inline void __sched __down_write(struct rw_semaphore *sem)
{
rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
}
static inline int __sched __down_write_killable(struct rw_semaphore *sem)
{
return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
return rwbase_write_trylock(&sem->rwbase);
}
static inline void __up_write(struct rw_semaphore *sem)
{
rwbase_write_unlock(&sem->rwbase);
}
static inline void __downgrade_write(struct rw_semaphore *sem)
{
rwbase_write_downgrade(&sem->rwbase);
}
/* Debug stubs for the common API */
#define DEBUG_RWSEMS_WARN_ON(c, sem)
static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
struct task_struct *owner)
{
}
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
{
int count = atomic_read(&sem->rwbase.readers);
return count < 0 && count != READER_BIAS;
}
#endif /* CONFIG_PREEMPT_RT */
/*
* lock for reading
*/
void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
EXPORT_SYMBOL(down_read);
int __sched down_read_interruptible(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_read_interruptible);
int __sched down_read_killable(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_read_killable);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int down_read_trylock(struct rw_semaphore *sem)
{
int ret = __down_read_trylock(sem);
if (ret == 1)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(down_read_trylock);
/*
* lock for writing
*/
void __sched down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write);
/*
* lock for writing
*/
int __sched down_write_killable(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_write_killable);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int down_write_trylock(struct rw_semaphore *sem)
{
int ret = __down_write_trylock(sem);
if (ret == 1)
rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(down_write_trylock);
/*
* release a read lock
*/
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, _RET_IP_);
__up_read(sem);
}
EXPORT_SYMBOL(up_read);
/*
* release a write lock
*/
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, _RET_IP_);
__up_write(sem);
}
EXPORT_SYMBOL(up_write);
/*
* downgrade write lock to read lock
*/
void downgrade_write(struct rw_semaphore *sem)
{
lock_downgrade(&sem->dep_map, _RET_IP_);
__downgrade_write(sem);
}
EXPORT_SYMBOL(downgrade_write);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void down_read_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
EXPORT_SYMBOL(down_read_nested);
int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_read_killable_nested);
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{
might_sleep();
rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(_down_write_nest_lock);
void down_read_non_owner(struct rw_semaphore *sem)
{
might_sleep();
__down_read(sem);
/*
* The owner value for a reader-owned lock is mostly for debugging
* purpose only and is not critical to the correct functioning of
* rwsem. So it is perfectly fine to set it in a preempt-enabled
* context here.
*/
__rwsem_set_reader_owned(sem, NULL);
}
EXPORT_SYMBOL(down_read_non_owner);
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write_nested);
int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
__up_read(sem);
}
EXPORT_SYMBOL(up_read_non_owner);
#endif
| linux-master | kernel/locking/rwsem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Simple CPU accounting cgroup controller
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/cputime.h>
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* There are no locks covering percpu hardirq/softirq time.
* They are only modified in vtime_account, on corresponding CPU
* with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
* race with irq/vtime_account on this CPU. We would either get old
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
*/
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
static int sched_clock_irqtime;
void enable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 1;
}
void disable_sched_clock_irqtime(void)
{
sched_clock_irqtime = 0;
}
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
enum cpu_usage_stat idx)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
u64_stats_update_begin(&irqtime->sync);
cpustat[idx] += delta;
irqtime->total += delta;
irqtime->tick_delta += delta;
u64_stats_update_end(&irqtime->sync);
}
/*
* Called after incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
unsigned int pc;
s64 delta;
int cpu;
if (!sched_clock_irqtime)
return;
cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
irqtime->irq_start_time += delta;
pc = irq_count() - offset;
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
if (pc & HARDIRQ_MASK)
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
}
static u64 irqtime_tick_accounted(u64 maxtime)
{
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
u64 delta;
delta = min(irqtime->tick_delta, maxtime);
irqtime->tick_delta -= delta;
return delta;
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
#define sched_clock_irqtime (0)
static u64 irqtime_tick_accounted(u64 dummy)
{
return 0;
}
#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
static inline void task_group_account_field(struct task_struct *p, int index,
u64 tmp)
{
/*
* Since all updates are sure to touch the root cgroup, we
* get ourselves ahead and touch it first. If the root cgroup
* is the only cgroup, then nothing else should be necessary.
*
*/
__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
cgroup_account_cputime_field(p, index, tmp);
}
/*
* Account user CPU time to a process.
* @p: the process that the CPU time gets accounted to
* @cputime: the CPU time spent in user space since the last update
*/
void account_user_time(struct task_struct *p, u64 cputime)
{
int index;
/* Add user time to process. */
p->utime += cputime;
account_group_user_time(p, cputime);
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
task_group_account_field(p, index, cputime);
/* Account for user time used */
acct_account_cputime(p);
}
/*
* Account guest CPU time to a process.
* @p: the process that the CPU time gets accounted to
* @cputime: the CPU time spent in virtual machine since the last update
*/
void account_guest_time(struct task_struct *p, u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
p->utime += cputime;
account_group_user_time(p, cputime);
p->gtime += cputime;
/* Add guest time to cpustat. */
if (task_nice(p) > 0) {
task_group_account_field(p, CPUTIME_NICE, cputime);
cpustat[CPUTIME_GUEST_NICE] += cputime;
} else {
task_group_account_field(p, CPUTIME_USER, cputime);
cpustat[CPUTIME_GUEST] += cputime;
}
}
/*
* Account system CPU time to a process and desired cpustat field
* @p: the process that the CPU time gets accounted to
* @cputime: the CPU time spent in kernel space since the last update
* @index: pointer to cpustat field that has to be updated
*/
void account_system_index_time(struct task_struct *p,
u64 cputime, enum cpu_usage_stat index)
{
/* Add system time to process. */
p->stime += cputime;
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
task_group_account_field(p, index, cputime);
/* Account for system time used */
acct_account_cputime(p);
}
/*
* Account system CPU time to a process.
* @p: the process that the CPU time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the CPU time spent in kernel space since the last update
*/
void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
{
int index;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime);
return;
}
if (hardirq_count() - hardirq_offset)
index = CPUTIME_IRQ;
else if (in_serving_softirq())
index = CPUTIME_SOFTIRQ;
else
index = CPUTIME_SYSTEM;
account_system_index_time(p, cputime, index);
}
/*
* Account for involuntary wait time.
* @cputime: the CPU time spent in involuntary wait
*/
void account_steal_time(u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
cpustat[CPUTIME_STEAL] += cputime;
}
/*
* Account for idle time.
* @cputime: the CPU time spent in idle wait
*/
void account_idle_time(u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
cpustat[CPUTIME_IOWAIT] += cputime;
else
cpustat[CPUTIME_IDLE] += cputime;
}
#ifdef CONFIG_SCHED_CORE
/*
* Account for forceidle time due to core scheduling.
*
* REQUIRES: schedstat is enabled.
*/
void __account_forceidle_time(struct task_struct *p, u64 delta)
{
__schedstat_add(p->stats.core_forceidle_sum, delta);
task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
}
#endif
/*
* When a guest is interrupted for a longer amount of time, missed clock
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT
if (static_key_false(¶virt_steal_enabled)) {
u64 steal;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
steal = min(steal, maxtime);
account_steal_time(steal);
this_rq()->prev_steal_time += steal;
return steal;
}
#endif
return 0;
}
/*
* Account how much elapsed time was spent in steal, irq, or softirq time.
*/
static inline u64 account_other_time(u64 max)
{
u64 accounted;
lockdep_assert_irqs_disabled();
accounted = steal_account_process_time(max);
if (accounted < max)
accounted += irqtime_tick_accounted(max - accounted);
return accounted;
}
#ifdef CONFIG_64BIT
static inline u64 read_sum_exec_runtime(struct task_struct *t)
{
return t->se.sum_exec_runtime;
}
#else
static u64 read_sum_exec_runtime(struct task_struct *t)
{
u64 ns;
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(t, &rf);
ns = t->se.sum_exec_runtime;
task_rq_unlock(rq, t, &rf);
return ns;
}
#endif
/*
* Accumulate raw cputime values of dead tasks (sig->[us]time) and live
* tasks (sum on group iteration) belonging to @tsk's group.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
struct signal_struct *sig = tsk->signal;
u64 utime, stime;
struct task_struct *t;
unsigned int seq, nextseq;
unsigned long flags;
/*
* Update current task runtime to account pending time since last
* scheduler action or thread_group_cputime() call. This thread group
* might have other running tasks on different CPUs, but updating
* their runtime can affect syscall performance, so we skip account
* those pending times and rely only on values updated on tick or
* other scheduler action.
*/
if (same_thread_group(current, tsk))
(void) task_sched_runtime(current);
rcu_read_lock();
/* Attempt a lockless read on the first round. */
nextseq = 0;
do {
seq = nextseq;
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
times->utime = sig->utime;
times->stime = sig->stime;
times->sum_exec_runtime = sig->sum_sched_runtime;
for_each_thread(tsk, t) {
task_cputime(t, &utime, &stime);
times->utime += utime;
times->stime += stime;
times->sum_exec_runtime += read_sum_exec_runtime(t);
}
/* If lockless access failed, take the lock. */
nextseq = 1;
} while (need_seqretry(&sig->stats_lock, seq));
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
rcu_read_unlock();
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* Account a tick to a process and cpustat
* @p: the process that the CPU time gets accounted to
* @user_tick: is the tick from userspace
* @rq: the pointer to rq
*
* Tick demultiplexing follows the order
* - pending hardirq update
* - pending softirq update
* - user_time
* - idle_time
* - system time
* - check for guest_time
* - else account as system_time
*
* Check for hardirq is done both for system and user time as there is
* no timer going off while we are on hardirq and hence we may never get an
* opportunity to update it solely in system time.
* p->stime and friends are only updated on system time and not on irq
* softirq as those do not count in task exec_runtime any more.
*/
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
int ticks)
{
u64 other, cputime = TICK_NSEC * ticks;
/*
* When returning from idle, many ticks can get accounted at
* once, including some ticks of steal, irq, and softirq time.
* Subtract those ticks from the amount of time accounted to
* idle, or potentially user or system time. Due to rounding,
* other time can exceed ticks occasionally.
*/
other = account_other_time(ULONG_MAX);
if (other >= cputime)
return;
cputime -= other;
if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
} else if (user_tick) {
account_user_time(p, cputime);
} else if (p == this_rq()->idle) {
account_idle_time(cputime);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
account_guest_time(p, cputime);
} else {
account_system_index_time(p, cputime, CPUTIME_SYSTEM);
}
}
static void irqtime_account_idle_ticks(int ticks)
{
irqtime_account_process_tick(current, 0, ticks);
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
static inline void irqtime_account_idle_ticks(int ticks) { }
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
int nr_ticks) { }
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
void vtime_task_switch(struct task_struct *prev)
{
if (is_idle_task(prev))
vtime_account_idle(prev);
else
vtime_account_kernel(prev);
vtime_flush(prev);
arch_vtime_task_switch(prev);
}
# endif
void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
{
unsigned int pc = irq_count() - offset;
if (pc & HARDIRQ_OFFSET) {
vtime_account_hardirq(tsk);
} else if (pc & SOFTIRQ_OFFSET) {
vtime_account_softirq(tsk);
} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
is_idle_task(tsk)) {
vtime_account_idle(tsk);
} else {
vtime_account_kernel(tsk);
}
}
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
u64 *ut, u64 *st)
{
*ut = curr->utime;
*st = curr->stime;
}
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
*ut = p->utime;
*st = p->stime;
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
*ut = cputime.utime;
*st = cputime.stime;
}
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
/*
* Account a single tick of CPU time.
* @p: the process that the CPU time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
u64 cputime, steal;
if (vtime_accounting_enabled_this_cpu())
return;
if (sched_clock_irqtime) {
irqtime_account_process_tick(p, user_tick, 1);
return;
}
cputime = TICK_NSEC;
steal = steal_account_process_time(ULONG_MAX);
if (steal >= cputime)
return;
cputime -= steal;
if (user_tick)
account_user_time(p, cputime);
else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, cputime);
else
account_idle_time(cputime);
}
/*
* Account multiple ticks of idle time.
* @ticks: number of stolen ticks
*/
void account_idle_ticks(unsigned long ticks)
{
u64 cputime, steal;
if (sched_clock_irqtime) {
irqtime_account_idle_ticks(ticks);
return;
}
cputime = ticks * TICK_NSEC;
steal = steal_account_process_time(ULONG_MAX);
if (steal >= cputime)
return;
cputime -= steal;
account_idle_time(cputime);
}
/*
* Adjust tick based cputime random precision against scheduler runtime
* accounting.
*
* Tick based cputime accounting depend on random scheduling timeslices of a
* task to be interrupted or not by the timer. Depending on these
* circumstances, the number of these interrupts may be over or
* under-optimistic, matching the real user and system cputime with a variable
* precision.
*
* Fix this by scaling these tick based values against the total runtime
* accounted by the CFS scheduler.
*
* This code provides the following guarantees:
*
* stime + utime == rtime
* stime_i+1 >= stime_i, utime_i+1 >= utime_i
*
* Assuming that rtime_i+1 >= rtime_i.
*/
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
u64 *ut, u64 *st)
{
u64 rtime, stime, utime;
unsigned long flags;
/* Serialize concurrent callers such that we can honour our guarantees */
raw_spin_lock_irqsave(&prev->lock, flags);
rtime = curr->sum_exec_runtime;
/*
* This is possible under two circumstances:
* - rtime isn't monotonic after all (a bug);
* - we got reordered by the lock.
*
* In both cases this acts as a filter such that the rest of the code
* can assume it is monotonic regardless of anything else.
*/
if (prev->stime + prev->utime >= rtime)
goto out;
stime = curr->stime;
utime = curr->utime;
/*
* If either stime or utime are 0, assume all runtime is userspace.
* Once a task gets some ticks, the monotonicity code at 'update:'
* will ensure things converge to the observed ratio.
*/
if (stime == 0) {
utime = rtime;
goto update;
}
if (utime == 0) {
stime = rtime;
goto update;
}
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
update:
/*
* Make sure stime doesn't go backwards; this preserves monotonicity
* for utime because rtime is monotonic.
*
* utime_i+1 = rtime_i+1 - stime_i
* = rtime_i+1 - (rtime_i - utime_i)
* = (rtime_i+1 - rtime_i) + utime_i
* >= utime_i
*/
if (stime < prev->stime)
stime = prev->stime;
utime = rtime - stime;
/*
* Make sure utime doesn't go backwards; this still preserves
* monotonicity for stime, analogous argument to above.
*/
if (utime < prev->utime) {
utime = prev->utime;
stime = rtime - utime;
}
prev->stime = stime;
prev->utime = utime;
out:
*ut = prev->utime;
*st = prev->stime;
raw_spin_unlock_irqrestore(&prev->lock, flags);
}
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime = {
.sum_exec_runtime = p->se.sum_exec_runtime,
};
if (task_cputime(p, &cputime.utime, &cputime.stime))
cputime.sum_exec_runtime = task_sched_runtime(p);
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static u64 vtime_delta(struct vtime *vtime)
{
unsigned long long clock;
clock = sched_clock();
if (clock < vtime->starttime)
return 0;
return clock - vtime->starttime;
}
static u64 get_vtime_delta(struct vtime *vtime)
{
u64 delta = vtime_delta(vtime);
u64 other;
/*
* Unlike tick based timing, vtime based timing never has lost
* ticks, and no need for steal time accounting to make up for
* lost ticks. Vtime accounts a rounded version of actual
* elapsed time. Limit account_other_time to prevent rounding
* errors from causing elapsed vtime to go negative.
*/
other = account_other_time(delta);
WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
vtime->starttime += delta;
return delta - other;
}
static void vtime_account_system(struct task_struct *tsk,
struct vtime *vtime)
{
vtime->stime += get_vtime_delta(vtime);
if (vtime->stime >= TICK_NSEC) {
account_system_time(tsk, irq_count(), vtime->stime);
vtime->stime = 0;
}
}
static void vtime_account_guest(struct task_struct *tsk,
struct vtime *vtime)
{
vtime->gtime += get_vtime_delta(vtime);
if (vtime->gtime >= TICK_NSEC) {
account_guest_time(tsk, vtime->gtime);
vtime->gtime = 0;
}
}
static void __vtime_account_kernel(struct task_struct *tsk,
struct vtime *vtime)
{
/* We might have scheduled out from guest path */
if (vtime->state == VTIME_GUEST)
vtime_account_guest(tsk, vtime);
else
vtime_account_system(tsk, vtime);
}
void vtime_account_kernel(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
if (!vtime_delta(vtime))
return;
write_seqcount_begin(&vtime->seqcount);
__vtime_account_kernel(tsk, vtime);
write_seqcount_end(&vtime->seqcount);
}
void vtime_user_enter(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime_account_system(tsk, vtime);
vtime->state = VTIME_USER;
write_seqcount_end(&vtime->seqcount);
}
void vtime_user_exit(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime->utime += get_vtime_delta(vtime);
if (vtime->utime >= TICK_NSEC) {
account_user_time(tsk, vtime->utime);
vtime->utime = 0;
}
vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
}
void vtime_guest_enter(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
/*
* The flags must be updated under the lock with
* the vtime_starttime flush and update.
* That enforces a right ordering and update sequence
* synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta.
*/
write_seqcount_begin(&vtime->seqcount);
vtime_account_system(tsk, vtime);
tsk->flags |= PF_VCPU;
vtime->state = VTIME_GUEST;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime);
tsk->flags &= ~PF_VCPU;
vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk)
{
account_idle_time(get_vtime_delta(&tsk->vtime));
}
void vtime_task_switch_generic(struct task_struct *prev)
{
struct vtime *vtime = &prev->vtime;
write_seqcount_begin(&vtime->seqcount);
if (vtime->state == VTIME_IDLE)
vtime_account_idle(prev);
else
__vtime_account_kernel(prev, vtime);
vtime->state = VTIME_INACTIVE;
vtime->cpu = -1;
write_seqcount_end(&vtime->seqcount);
vtime = ¤t->vtime;
write_seqcount_begin(&vtime->seqcount);
if (is_idle_task(current))
vtime->state = VTIME_IDLE;
else if (current->flags & PF_VCPU)
vtime->state = VTIME_GUEST;
else
vtime->state = VTIME_SYS;
vtime->starttime = sched_clock();
vtime->cpu = smp_processor_id();
write_seqcount_end(&vtime->seqcount);
}
void vtime_init_idle(struct task_struct *t, int cpu)
{
struct vtime *vtime = &t->vtime;
unsigned long flags;
local_irq_save(flags);
write_seqcount_begin(&vtime->seqcount);
vtime->state = VTIME_IDLE;
vtime->starttime = sched_clock();
vtime->cpu = cpu;
write_seqcount_end(&vtime->seqcount);
local_irq_restore(flags);
}
u64 task_gtime(struct task_struct *t)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 gtime;
if (!vtime_accounting_enabled())
return t->gtime;
do {
seq = read_seqcount_begin(&vtime->seqcount);
gtime = t->gtime;
if (vtime->state == VTIME_GUEST)
gtime += vtime->gtime + vtime_delta(vtime);
} while (read_seqcount_retry(&vtime->seqcount, seq));
return gtime;
}
/*
* Fetch cputime raw values from fields of task_struct and
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 delta;
int ret;
if (!vtime_accounting_enabled()) {
*utime = t->utime;
*stime = t->stime;
return false;
}
do {
ret = false;
seq = read_seqcount_begin(&vtime->seqcount);
*utime = t->utime;
*stime = t->stime;
/* Task is sleeping or idle, nothing to add */
if (vtime->state < VTIME_SYS)
continue;
ret = true;
delta = vtime_delta(vtime);
/*
* Task runs either in user (including guest) or kernel space,
* add pending nohz time to the right place.
*/
if (vtime->state == VTIME_SYS)
*stime += vtime->stime + delta;
else
*utime += vtime->utime + delta;
} while (read_seqcount_retry(&vtime->seqcount, seq));
return ret;
}
static int vtime_state_fetch(struct vtime *vtime, int cpu)
{
int state = READ_ONCE(vtime->state);
/*
* We raced against a context switch, fetch the
* kcpustat task again.
*/
if (vtime->cpu != cpu && vtime->cpu != -1)
return -EAGAIN;
/*
* Two possible things here:
* 1) We are seeing the scheduling out task (prev) or any past one.
* 2) We are seeing the scheduling in task (next) but it hasn't
* passed though vtime_task_switch() yet so the pending
* cputime of the prev task may not be flushed yet.
*
* Case 1) is ok but 2) is not. So wait for a safe VTIME state.
*/
if (state == VTIME_INACTIVE)
return -EAGAIN;
return state;
}
static u64 kcpustat_user_vtime(struct vtime *vtime)
{
if (vtime->state == VTIME_USER)
return vtime->utime + vtime_delta(vtime);
else if (vtime->state == VTIME_GUEST)
return vtime->gtime + vtime_delta(vtime);
return 0;
}
static int kcpustat_field_vtime(u64 *cpustat,
struct task_struct *tsk,
enum cpu_usage_stat usage,
int cpu, u64 *val)
{
struct vtime *vtime = &tsk->vtime;
unsigned int seq;
do {
int state;
seq = read_seqcount_begin(&vtime->seqcount);
state = vtime_state_fetch(vtime, cpu);
if (state < 0)
return state;
*val = cpustat[usage];
/*
* Nice VS unnice cputime accounting may be inaccurate if
* the nice value has changed since the last vtime update.
* But proper fix would involve interrupting target on nice
* updates which is a no go on nohz_full (although the scheduler
* may still interrupt the target if rescheduling is needed...)
*/
switch (usage) {
case CPUTIME_SYSTEM:
if (state == VTIME_SYS)
*val += vtime->stime + vtime_delta(vtime);
break;
case CPUTIME_USER:
if (task_nice(tsk) <= 0)
*val += kcpustat_user_vtime(vtime);
break;
case CPUTIME_NICE:
if (task_nice(tsk) > 0)
*val += kcpustat_user_vtime(vtime);
break;
case CPUTIME_GUEST:
if (state == VTIME_GUEST && task_nice(tsk) <= 0)
*val += vtime->gtime + vtime_delta(vtime);
break;
case CPUTIME_GUEST_NICE:
if (state == VTIME_GUEST && task_nice(tsk) > 0)
*val += vtime->gtime + vtime_delta(vtime);
break;
default:
break;
}
} while (read_seqcount_retry(&vtime->seqcount, seq));
return 0;
}
u64 kcpustat_field(struct kernel_cpustat *kcpustat,
enum cpu_usage_stat usage, int cpu)
{
u64 *cpustat = kcpustat->cpustat;
u64 val = cpustat[usage];
struct rq *rq;
int err;
if (!vtime_accounting_enabled_cpu(cpu))
return val;
rq = cpu_rq(cpu);
for (;;) {
struct task_struct *curr;
rcu_read_lock();
curr = rcu_dereference(rq->curr);
if (WARN_ON_ONCE(!curr)) {
rcu_read_unlock();
return cpustat[usage];
}
err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
rcu_read_unlock();
if (!err)
return val;
cpu_relax();
}
}
EXPORT_SYMBOL_GPL(kcpustat_field);
static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
const struct kernel_cpustat *src,
struct task_struct *tsk, int cpu)
{
struct vtime *vtime = &tsk->vtime;
unsigned int seq;
do {
u64 *cpustat;
u64 delta;
int state;
seq = read_seqcount_begin(&vtime->seqcount);
state = vtime_state_fetch(vtime, cpu);
if (state < 0)
return state;
*dst = *src;
cpustat = dst->cpustat;
/* Task is sleeping, dead or idle, nothing to add */
if (state < VTIME_SYS)
continue;
delta = vtime_delta(vtime);
/*
* Task runs either in user (including guest) or kernel space,
* add pending nohz time to the right place.
*/
if (state == VTIME_SYS) {
cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
} else if (state == VTIME_USER) {
if (task_nice(tsk) > 0)
cpustat[CPUTIME_NICE] += vtime->utime + delta;
else
cpustat[CPUTIME_USER] += vtime->utime + delta;
} else {
WARN_ON_ONCE(state != VTIME_GUEST);
if (task_nice(tsk) > 0) {
cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
cpustat[CPUTIME_NICE] += vtime->gtime + delta;
} else {
cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
cpustat[CPUTIME_USER] += vtime->gtime + delta;
}
}
} while (read_seqcount_retry(&vtime->seqcount, seq));
return 0;
}
void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
{
const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
struct rq *rq;
int err;
if (!vtime_accounting_enabled_cpu(cpu)) {
*dst = *src;
return;
}
rq = cpu_rq(cpu);
for (;;) {
struct task_struct *curr;
rcu_read_lock();
curr = rcu_dereference(rq->curr);
if (WARN_ON_ONCE(!curr)) {
rcu_read_unlock();
*dst = *src;
return;
}
err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
rcu_read_unlock();
if (!err)
return;
cpu_relax();
}
}
EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
| linux-master | kernel/sched/cputime.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A simple wrapper around refcount. An allocated sched_core_cookie's
* address is used to compute the cookie of the task.
*/
struct sched_core_cookie {
refcount_t refcnt;
};
static unsigned long sched_core_alloc_cookie(void)
{
struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
if (!ck)
return 0;
refcount_set(&ck->refcnt, 1);
sched_core_get();
return (unsigned long)ck;
}
static void sched_core_put_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
kfree(ptr);
sched_core_put();
}
}
static unsigned long sched_core_get_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr)
refcount_inc(&ptr->refcnt);
return cookie;
}
/*
* sched_core_update_cookie - replace the cookie on a task
* @p: the task to update
* @cookie: the new cookie
*
* Effectively exchange the task cookie; caller is responsible for lifetimes on
* both ends.
*
* Returns: the old cookie
*/
static unsigned long sched_core_update_cookie(struct task_struct *p,
unsigned long cookie)
{
unsigned long old_cookie;
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
/*
* Since creating a cookie implies sched_core_get(), and we cannot set
* a cookie until after we've created it, similarly, we cannot destroy
* a cookie until after we've removed it, we must have core scheduling
* enabled here.
*/
SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
if (sched_core_enqueued(p))
sched_core_dequeue(rq, p, DEQUEUE_SAVE);
old_cookie = p->core_cookie;
p->core_cookie = cookie;
/*
* Consider the cases: !prev_cookie and !cookie.
*/
if (cookie && task_on_rq_queued(p))
sched_core_enqueue(rq, p);
/*
* If task is currently running, it may not be compatible anymore after
* the cookie change, so enter the scheduler on its CPU to schedule it
* away.
*
* Note that it is possible that as a result of this cookie change, the
* core has now entered/left forced idle state. Defer accounting to the
* next scheduling edge, rather than always forcing a reschedule here.
*/
if (task_on_cpu(rq, p))
resched_curr(rq);
task_rq_unlock(rq, p, &rf);
return old_cookie;
}
static unsigned long sched_core_clone_cookie(struct task_struct *p)
{
unsigned long cookie, flags;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cookie = sched_core_get_cookie(p->core_cookie);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return cookie;
}
void sched_core_fork(struct task_struct *p)
{
RB_CLEAR_NODE(&p->core_node);
p->core_cookie = sched_core_clone_cookie(current);
}
void sched_core_free(struct task_struct *p)
{
sched_core_put_cookie(p->core_cookie);
}
static void __sched_core_set(struct task_struct *p, unsigned long cookie)
{
cookie = sched_core_get_cookie(cookie);
cookie = sched_core_update_cookie(p, cookie);
sched_core_put_cookie(cookie);
}
/* Called from prctl interface: PR_SCHED_CORE */
int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr)
{
unsigned long cookie = 0, id = 0;
struct task_struct *task, *p;
struct pid *grp;
int err = 0;
if (!static_branch_likely(&sched_smt_present))
return -ENODEV;
BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID);
BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD_GROUP != PIDTYPE_TGID);
BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_PROCESS_GROUP != PIDTYPE_PGID);
if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 ||
(cmd != PR_SCHED_CORE_GET && uaddr))
return -EINVAL;
rcu_read_lock();
if (pid == 0) {
task = current;
} else {
task = find_task_by_vpid(pid);
if (!task) {
rcu_read_unlock();
return -ESRCH;
}
}
get_task_struct(task);
rcu_read_unlock();
/*
* Check if this process has the right to modify the specified
* process. Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
err = -EPERM;
goto out;
}
switch (cmd) {
case PR_SCHED_CORE_GET:
if (type != PIDTYPE_PID || uaddr & 7) {
err = -EINVAL;
goto out;
}
cookie = sched_core_clone_cookie(task);
if (cookie) {
/* XXX improve ? */
ptr_to_hashval((void *)cookie, &id);
}
err = put_user(id, (u64 __user *)uaddr);
goto out;
case PR_SCHED_CORE_CREATE:
cookie = sched_core_alloc_cookie();
if (!cookie) {
err = -ENOMEM;
goto out;
}
break;
case PR_SCHED_CORE_SHARE_TO:
cookie = sched_core_clone_cookie(current);
break;
case PR_SCHED_CORE_SHARE_FROM:
if (type != PIDTYPE_PID) {
err = -EINVAL;
goto out;
}
cookie = sched_core_clone_cookie(task);
__sched_core_set(current, cookie);
goto out;
default:
err = -EINVAL;
goto out;
}
if (type == PIDTYPE_PID) {
__sched_core_set(task, cookie);
goto out;
}
read_lock(&tasklist_lock);
grp = task_pid_type(task, type);
do_each_pid_thread(grp, type, p) {
if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) {
err = -EPERM;
goto out_tasklist;
}
} while_each_pid_thread(grp, type, p);
do_each_pid_thread(grp, type, p) {
__sched_core_set(p, cookie);
} while_each_pid_thread(grp, type, p);
out_tasklist:
read_unlock(&tasklist_lock);
out:
sched_core_put_cookie(cookie);
put_task_struct(task);
return err;
}
#ifdef CONFIG_SCHEDSTATS
/* REQUIRES: rq->core's clock recently updated. */
void __sched_core_account_forceidle(struct rq *rq)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
u64 delta, now = rq_clock(rq->core);
struct rq *rq_i;
struct task_struct *p;
int i;
lockdep_assert_rq_held(rq);
WARN_ON_ONCE(!rq->core->core_forceidle_count);
if (rq->core->core_forceidle_start == 0)
return;
delta = now - rq->core->core_forceidle_start;
if (unlikely((s64)delta <= 0))
return;
rq->core->core_forceidle_start = now;
if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) {
/* can't be forced idle without a running task */
} else if (rq->core->core_forceidle_count > 1 ||
rq->core->core_forceidle_occupation > 1) {
/*
* For larger SMT configurations, we need to scale the charged
* forced idle amount since there can be more than one forced
* idle sibling and more than one running cookied task.
*/
delta *= rq->core->core_forceidle_count;
delta = div_u64(delta, rq->core->core_forceidle_occupation);
}
for_each_cpu(i, smt_mask) {
rq_i = cpu_rq(i);
p = rq_i->core_pick ?: rq_i->curr;
if (p == rq_i->idle)
continue;
/*
* Note: this will account forceidle to the current cpu, even
* if it comes from our SMT sibling.
*/
__account_forceidle_time(p, delta);
}
}
void __sched_core_tick(struct rq *rq)
{
if (!rq->core->core_forceidle_count)
return;
if (rq != rq->core)
update_rq_clock(rq->core);
__sched_core_account_forceidle(rq);
}
#endif /* CONFIG_SCHEDSTATS */
| linux-master | kernel/sched/core_sched.c |
// SPDX-License-Identifier: GPL-2.0
/*
* stop-task scheduling class.
*
* The stop task is the highest priority task in the system, it preempts
* everything and will be preempted by nothing.
*
* See kernel/stop_machine.c
*/
#ifdef CONFIG_SMP
static int
select_task_rq_stop(struct task_struct *p, int cpu, int flags)
{
return task_cpu(p); /* stop tasks as never migrate */
}
static int
balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
return sched_stop_runnable(rq);
}
#endif /* CONFIG_SMP */
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{
/* we're never preempted */
}
static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
{
stop->se.exec_start = rq_clock_task(rq);
}
static struct task_struct *pick_task_stop(struct rq *rq)
{
if (!sched_stop_runnable(rq))
return NULL;
return rq->stop;
}
static struct task_struct *pick_next_task_stop(struct rq *rq)
{
struct task_struct *p = pick_task_stop(rq);
if (p)
set_next_task_stop(rq, p, true);
return p;
}
static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
}
static void
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
}
static void yield_task_stop(struct rq *rq)
{
BUG(); /* the stop task should never yield, its pointless. */
}
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
{
struct task_struct *curr = rq->curr;
u64 now, delta_exec;
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
update_current_exec_runtime(curr, now, delta_exec);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
{
}
static void switched_to_stop(struct rq *rq, struct task_struct *p)
{
BUG(); /* its impossible to change to this class */
}
static void
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
{
BUG(); /* how!?, what priority? */
}
static void update_curr_stop(struct rq *rq)
{
}
/*
* Simple, special scheduling class for the per-CPU stop tasks:
*/
DEFINE_SCHED_CLASS(stop) = {
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,
.yield_task = yield_task_stop,
.check_preempt_curr = check_preempt_curr_stop,
.pick_next_task = pick_next_task_stop,
.put_prev_task = put_prev_task_stop,
.set_next_task = set_next_task_stop,
#ifdef CONFIG_SMP
.balance = balance_stop,
.pick_task = pick_task_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.task_tick = task_tick_stop,
.prio_changed = prio_changed_stop,
.switched_to = switched_to_stop,
.update_curr = update_curr_stop,
};
| linux-master | kernel/sched/stop_task.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Deadline Scheduling Class (SCHED_DEADLINE)
*
* Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
*
* Tasks that periodically executes their instances for less than their
* runtime won't miss any of their deadlines.
* Tasks that are not periodic or sporadic or that tries to execute more
* than their reserved bandwidth will be slowed down (and may potentially
* miss some of their deadlines), and won't affect any other task.
*
* Copyright (C) 2012 Dario Faggioli <[email protected]>,
* Juri Lelli <[email protected]>,
* Michael Trimarchi <[email protected]>,
* Fabio Checconi <[email protected]>
*/
#include <linux/cpuset.h>
/*
* Default limits for DL period; on the top end we guard against small util
* tasks still getting ridiculously long effective runtimes, on the bottom end we
* guard against timer DoS.
*/
static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_dl_sysctls[] = {
{
.procname = "sched_deadline_period_max_us",
.data = &sysctl_sched_dl_period_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = (void *)&sysctl_sched_dl_period_min,
},
{
.procname = "sched_deadline_period_min_us",
.data = &sysctl_sched_dl_period_min,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra2 = (void *)&sysctl_sched_dl_period_max,
},
{}
};
static int __init sched_dl_sysctl_init(void)
{
register_sysctl_init("kernel", sched_dl_sysctls);
return 0;
}
late_initcall(sched_dl_sysctl_init);
#endif
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
return container_of(dl_se, struct task_struct, dl);
}
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
{
return container_of(dl_rq, struct rq, dl);
}
static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = task_rq(p);
return &rq->dl;
}
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
{
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
#ifdef CONFIG_RT_MUTEXES
static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
{
return dl_se->pi_se;
}
static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
{
return pi_of(dl_se) != dl_se;
}
#else
static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
{
return dl_se;
}
static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
{
return false;
}
#endif
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
static inline int dl_bw_cpus(int i)
{
struct root_domain *rd = cpu_rq(i)->rd;
int cpus;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
if (cpumask_subset(rd->span, cpu_active_mask))
return cpumask_weight(rd->span);
cpus = 0;
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
return cpus;
}
static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
{
unsigned long cap = 0;
int i;
for_each_cpu_and(i, mask, cpu_active_mask)
cap += capacity_orig_of(i);
return cap;
}
/*
* XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
* of the CPU the task is running on rather rd's \Sum CPU capacity.
*/
static inline unsigned long dl_bw_capacity(int i)
{
if (!sched_asym_cpucap_active() &&
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
} else {
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return __dl_bw_capacity(cpu_rq(i)->rd->span);
}
}
static inline bool dl_bw_visited(int cpu, u64 gen)
{
struct root_domain *rd = cpu_rq(cpu)->rd;
if (rd->visit_gen == gen)
return true;
rd->visit_gen = gen;
return false;
}
static inline
void __dl_update(struct dl_bw *dl_b, s64 bw)
{
struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
int i;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask) {
struct rq *rq = cpu_rq(i);
rq->dl.extra_bw += bw;
}
}
#else
static inline struct dl_bw *dl_bw_of(int i)
{
return &cpu_rq(i)->dl.dl_bw;
}
static inline int dl_bw_cpus(int i)
{
return 1;
}
static inline unsigned long dl_bw_capacity(int i)
{
return SCHED_CAPACITY_SCALE;
}
static inline bool dl_bw_visited(int cpu, u64 gen)
{
return false;
}
static inline
void __dl_update(struct dl_bw *dl_b, s64 bw)
{
struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
dl->extra_bw += bw;
}
#endif
static inline
void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
{
dl_b->total_bw -= tsk_bw;
__dl_update(dl_b, (s32)tsk_bw / cpus);
}
static inline
void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
{
dl_b->total_bw += tsk_bw;
__dl_update(dl_b, -((s32)tsk_bw / cpus));
}
static inline bool
__dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
}
static inline
void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->running_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
}
static inline
void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
}
static inline
void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
dl_rq->this_bw -= dl_bw;
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
if (dl_rq->this_bw > old)
dl_rq->this_bw = 0;
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
static inline
void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_rq_bw(dl_se->dl_bw, dl_rq);
}
static inline
void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__add_running_bw(dl_se->dl_bw, dl_rq);
}
static inline
void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
if (!dl_entity_is_special(dl_se))
__sub_running_bw(dl_se->dl_bw, dl_rq);
}
static void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be canceled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
__sub_rq_bw(p->dl.dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
}
/*
* The utilization of a task cannot be immediately removed from
* the rq active utilization (running_bw) when the task blocks.
* Instead, we have to wait for the so called "0-lag time".
*
* If a task blocks before the "0-lag time", a timer (the inactive
* timer) is armed, and running_bw is decreased when the timer
* fires.
*
* If the task wakes up again before the inactive timer fires,
* the timer is canceled, whereas if the task wakes up after the
* inactive timer fired (and running_bw has been decreased) the
* task's utilization has to be added to running_bw again.
* A flag in the deadline scheduling entity (dl_non_contending)
* is used to avoid race conditions between the inactive timer handler
* and task wakeups.
*
* The following diagram shows how running_bw is updated. A task is
* "ACTIVE" when its utilization contributes to running_bw; an
* "ACTIVE contending" task is in the TASK_RUNNING state, while an
* "ACTIVE non contending" task is a blocked task for which the "0-lag time"
* has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
* time already passed, which does not contribute to running_bw anymore.
* +------------------+
* wakeup | ACTIVE |
* +------------------>+ contending |
* | add_running_bw | |
* | +----+------+------+
* | | ^
* | dequeue | |
* +--------+-------+ | |
* | | t >= 0-lag | | wakeup
* | INACTIVE |<---------------+ |
* | | sub_running_bw | |
* +--------+-------+ | |
* ^ | |
* | t < 0-lag | |
* | | |
* | V |
* | +----+------+------+
* | sub_running_bw | ACTIVE |
* +-------------------+ |
* inactive timer | non contending |
* fired +------------------+
*
* The task_non_contending() function is invoked when a task
* blocks, and checks if the 0-lag time already passed or
* not (in the first case, it directly updates running_bw;
* in the second case, it arms the inactive timer).
*
* The task_contending() function is invoked when a task wakes
* up, and checks if the task is still in the "ACTIVE non contending"
* state or not (in the second case, it updates running_bw).
*/
static void task_non_contending(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->inactive_timer;
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
s64 zerolag_time;
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (dl_entity_is_special(dl_se))
return;
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
div64_long((dl_se->runtime * dl_se->dl_period),
dl_se->dl_runtime);
/*
* Using relative times instead of the absolute "0-lag time"
* allows to simplify the code
*/
zerolag_time -= rq_clock(rq);
/*
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (READ_ONCE(p->__state) == TASK_DEAD)
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
}
return;
}
dl_se->dl_non_contending = 1;
get_task_struct(p);
hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
}
static void task_contending(struct sched_dl_entity *dl_se, int flags)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
/*
* If this is a non-deadline task that has been boosted,
* do nothing
*/
if (dl_se->dl_runtime == 0)
return;
if (flags & ENQUEUE_MIGRATED)
add_rq_bw(dl_se, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be canceled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
put_task_struct(dl_task_of(dl_se));
} else {
/*
* Since "dl_non_contending" is not set, the
* task's utilization has already been removed from
* active utilization (either when the task blocked,
* when the "inactive timer" fired).
* So, add it back.
*/
add_running_bw(dl_se, dl_rq);
}
}
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
}
static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
void init_dl_bw(struct dl_bw *dl_b)
{
raw_spin_lock_init(&dl_b->lock);
if (global_rt_runtime() == RUNTIME_INF)
dl_b->bw = -1;
else
dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
dl_b->total_bw = 0;
}
void init_dl_rq(struct dl_rq *dl_rq)
{
dl_rq->root = RB_ROOT_CACHED;
#ifdef CONFIG_SMP
/* zero means no -deadline tasks */
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
dl_rq->running_bw = 0;
dl_rq->this_bw = 0;
init_dl_rq_bw_ratio(dl_rq);
}
#ifdef CONFIG_SMP
static inline int dl_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->dlo_count);
}
static inline void dl_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
/*
* Must be visible before the overload count is
* set (as in sched_rt.c).
*
* Matched by the barrier in pull_dl_task().
*/
smp_wmb();
atomic_inc(&rq->rd->dlo_count);
}
static inline void dl_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
atomic_dec(&rq->rd->dlo_count);
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
}
static void update_dl_migration(struct dl_rq *dl_rq)
{
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
}
} else if (dl_rq->overloaded) {
dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 0;
}
}
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq);
}
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
}
#define __node_2_pdl(node) \
rb_entry((node), struct task_struct, pushable_dl_tasks)
static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
{
return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
}
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct rb_node *leftmost;
WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
leftmost = rb_add_cached(&p->pushable_dl_tasks,
&rq->dl.pushable_dl_tasks_root,
__pushable_less);
if (leftmost)
rq->dl.earliest_dl.next = p->dl.deadline;
}
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
struct rb_node *leftmost;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
if (leftmost)
dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
static inline int has_pushable_dl_tasks(struct rq *rq)
{
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
}
static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return rq->online && dl_task(prev);
}
static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
static void push_dl_tasks(struct rq *);
static void pull_dl_task(struct rq *);
static inline void deadline_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_dl_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
{
struct rq *later_rq = NULL;
struct dl_bw *dl_b;
later_rq = find_lock_later_rq(p, rq);
if (!later_rq) {
int cpu;
/*
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
* The task will never come back!
*/
WARN_ON_ONCE(dl_bandwidth_enabled());
/*
* If admission control is disabled we
* try a little harder to let the task
* run.
*/
cpu = cpumask_any(cpu_active_mask);
}
later_rq = cpu_rq(cpu);
double_lock_balance(rq, later_rq);
}
if (p->dl.dl_non_contending || p->dl.dl_throttled) {
/*
* Inactive timer is armed (or callback is running, but
* waiting for us to release rq locks). In any case, when it
* will fire (or continue), it will see running_bw of this
* task migrated to later_rq (and correctly handle it).
*/
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
add_running_bw(&p->dl, &later_rq->dl);
} else {
sub_rq_bw(&p->dl, &rq->dl);
add_rq_bw(&p->dl, &later_rq->dl);
}
/*
* And we finally need to fixup root_domain(s) bandwidth accounting,
* since p is still hanging out in the old (now moved to default) root
* domain.
*/
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
raw_spin_unlock(&dl_b->lock);
dl_b = &later_rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
raw_spin_unlock(&dl_b->lock);
set_task_cpu(p, later_rq->cpu);
double_unlock_balance(later_rq, rq);
return later_rq;
}
#else
static inline
void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline
void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline void deadline_queue_push_tasks(struct rq *rq)
{
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
struct rq *rq)
{
/* for non-boosted task, pi_of(dl_se) == dl_se */
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
* - the absolute deadline of the entity has to be placed at
* current time + relative deadline;
* - the runtime of the entity has to be set to the maximum value.
*
* The capability of specifying such event is useful whenever a -deadline
* entity wants to (try to!) synchronize its behaviour with the scheduler's
* one, and to (try to!) reconcile itself with its own scheduling
* parameters.
*/
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(is_dl_boosted(dl_se));
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/*
* We use the regular wall clock time to set deadlines in the
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
replenish_dl_new_period(dl_se, rq);
}
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
* exhausting its runtime.
*
* Here we are interested in making runtime overrun possible, but we do
* not want a entity which is misbehaving to affect the scheduling of all
* other entities.
* Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
* is used, in order to confine each entity within its own bandwidth.
*
* This function deals exactly with that, and ensures that when the runtime
* of a entity is replenished, its deadline is also postponed. That ensures
* the overrunning entity can't interfere with other entity in the system and
* can't make them miss their deadlines. Reasons why this kind of overruns
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
static void replenish_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0)
replenish_dl_new_period(dl_se, rq);
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
/*
* We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct
* handling of situations where the runtime overrun is
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
dl_se->deadline += pi_of(dl_se)->dl_period;
dl_se->runtime += pi_of(dl_se)->dl_runtime;
}
/*
* At this point, the deadline really should be "in
* the future" with respect to rq->clock. If it's
* not, we are, for some reason, lagging too much!
* Anyway, after having warn userspace abut that,
* we still try to keep the things running by
* resetting the deadline and the budget of the
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
replenish_dl_new_period(dl_se, rq);
}
if (dl_se->dl_yielded)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
}
/*
* Here we check if --at time t-- an entity (which is probably being
* [re]activated or, in general, enqueued) can use its remaining runtime
* and its current deadline _without_ exceeding the bandwidth it is
* assigned (function returns true if it can't). We are in fact applying
* one of the CBS rules: when a task wakes up, if the residual runtime
* over residual deadline fits within the allocated bandwidth, then we
* can keep the current (absolute) deadline and residual budget without
* disrupting the schedulability of the system. Otherwise, we should
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.rst for more information).
*
* This function returns true if:
*
* runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
* Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
{
u64 left, right;
/*
* left and right are the two sides of the equation above,
* after a bit of shuffling to use multiplications instead
* of divisions.
*
* Note that none of the time values involved in the two
* multiplications are absolute: dl_deadline and dl_runtime
* are the relative deadline and the maximum runtime of each
* instance, runtime is the runtime left for the last instance
* and (deadline - t), since t is rq->clock, is the time left
* to the (absolute) deadline. Even if overflowing the u64 type
* is very unlikely to occur in both cases, here we scale down
* as we want to avoid that risk at all. Scaling down by 10
* means that we reduce granularity to 1us. We are fine with it,
* since this is only a true/false check and, anyway, thinking
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_of(dl_se)->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
/*
* Revised wakeup rule [1]: For self-suspending tasks, rather then
* re-initializing task's runtime and deadline, the revised wakeup
* rule adjusts the task's runtime to avoid the task to overrun its
* density.
*
* Reasoning: a task may overrun the density if:
* runtime / (deadline - t) > dl_runtime / dl_deadline
*
* Therefore, runtime can be adjusted to:
* runtime = (dl_runtime / dl_deadline) * (deadline - t)
*
* In such way that runtime will be equal to the maximum density
* the task can use without breaking any rule.
*
* [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
* bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
*/
static void
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
{
u64 laxity = dl_se->deadline - rq_clock(rq);
/*
* If the task has deadline < period, and the deadline is in the past,
* it should already be throttled before this check.
*
* See update_dl_entity() comments for further details.
*/
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
}
/*
* Regarding the deadline, a task with implicit deadline has a relative
* deadline == relative period. A task with constrained deadline has a
* relative deadline <= relative period.
*
* We support constrained deadline tasks. However, there are some restrictions
* applied only for tasks which do not have an implicit deadline. See
* update_dl_entity() to know more about such restrictions.
*
* The dl_is_implicit() returns true if the task has an implicit deadline.
*/
static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
{
return dl_se->dl_deadline == dl_se->dl_period;
}
/*
* When a deadline entity is placed in the runqueue, its runtime and deadline
* might need to be updated. This is done by a CBS wake up rule. There are two
* different rules: 1) the original CBS; and 2) the Revisited CBS.
*
* When the task is starting a new period, the Original CBS is used. In this
* case, the runtime is replenished and a new absolute deadline is set.
*
* When a task is queued before the begin of the next period, using the
* remaining runtime and deadline could make the entity to overflow, see
* dl_entity_overflow() to find more about runtime overflow. When such case
* is detected, the runtime and deadline need to be updated.
*
* If the task has an implicit deadline, i.e., deadline == period, the Original
* CBS is applied. the runtime is replenished and a new absolute deadline is
* set, as in the previous cases.
*
* However, the Original CBS does not work properly for tasks with
* deadline < period, which are said to have a constrained deadline. By
* applying the Original CBS, a constrained deadline task would be able to run
* runtime/deadline in a period. With deadline < period, the task would
* overrun the runtime/period allowed bandwidth, breaking the admission test.
*
* In order to prevent this misbehave, the Revisited CBS is used for
* constrained deadline tasks when a runtime overflow is detected. In the
* Revisited CBS, rather than replenishing & setting a new absolute deadline,
* the remaining runtime of the task is reduced to avoid runtime overflow.
* Please refer to the comments update_dl_revised_wakeup() function to find
* more about the Revised CBS rule.
*/
static void update_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, rq_clock(rq))) {
if (unlikely(!dl_is_implicit(dl_se) &&
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
!is_dl_boosted(dl_se))) {
update_dl_revised_wakeup(dl_se, rq);
return;
}
replenish_dl_new_period(dl_se, rq);
}
}
static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
{
return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
}
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
* set the bandwidth replenishment timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
* actually started or not (i.e., the replenishment instant is in
* the future or in the past).
*/
static int start_dl_timer(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct hrtimer *timer = &dl_se->dl_timer;
struct rq *rq = task_rq(p);
ktime_t now, act;
s64 delta;
lockdep_assert_rq_held(rq);
/*
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
act = ns_to_ktime(dl_next_period(dl_se));
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
/*
* If the expiry time already passed, e.g., because the value
* chosen as the deadline is too small, don't even try to
* start the timer in the past!
*/
if (ktime_us_delta(act, now) < 0)
return 0;
/*
* !enqueued will guarantee another callback; even if one is already in
* progress. This ensures a balanced {get,put}_task_struct().
*
* The race against __run_timer() clearing the enqueued state is
* harmless because we're holding task_rq()->lock, therefore the timer
* expiring after we've done the check will wait on its task_rq_lock()
* and observe our state.
*/
if (!hrtimer_is_queued(timer)) {
get_task_struct(p);
hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
}
return 1;
}
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
* means the task is throttled and needs a runtime replenishment.
*
* However, what we actually do depends on the fact the task is active,
* (it is on its rq) or has been removed from there by a call to
* dequeue_task_dl(). In the former case we must issue the runtime
* replenishment and add the task back to the dl_rq; in the latter, we just
* do nothing but clearing dl_throttled, so that runtime and deadline
* updating (and the queueing back to dl_rq) will be done by the
* next call to enqueue_task_dl().
*/
static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
dl_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
/*
* The task might have changed its scheduling policy to something
* different than SCHED_DEADLINE (through switched_from_dl()).
*/
if (!dl_task(p))
goto unlock;
/*
* The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled.
*/
if (is_dl_boosted(dl_se))
goto unlock;
/*
* Spurious timer due to start_dl_timer() race; or we already received
* a replenishment from rt_mutex_setprio().
*/
if (!dl_se->dl_throttled)
goto unlock;
sched_clock_tick();
update_rq_clock(rq);
/*
* If the throttle happened during sched-out; like:
*
* schedule()
* deactivate_task()
* dequeue_task_dl()
* update_curr_dl()
* start_dl_timer()
* __dequeue_task_dl()
* prev->on_rq = 0;
*
* We can be both throttled and !queued. Replenish the counter
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
replenish_dl_entity(dl_se);
goto unlock;
}
#ifdef CONFIG_SMP
if (unlikely(!rq->online)) {
/*
* If the runqueue is no longer available, migrate the
* task elsewhere. This necessarily changes rq.
*/
lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
rq = dl_task_offline_migration(rq, p);
rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
update_rq_clock(rq);
/*
* Now that the task has been migrated to the new RQ and we
* have that locked, proceed as normal and enqueue the task
* there.
*/
}
#endif
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
#ifdef CONFIG_SMP
/*
* Queueing this task back might have overloaded rq, check if we need
* to kick someone away.
*/
if (has_pushable_dl_tasks(rq)) {
/*
* Nothing relies on rq->lock after this, so its safe to drop
* rq->lock.
*/
rq_unpin_lock(rq, &rf);
push_dl_task(rq);
rq_repin_lock(rq, &rf);
}
#endif
unlock:
task_rq_unlock(rq, p, &rf);
/*
* This can free the task_struct, including this hrtimer, do not touch
* anything related to that after this.
*/
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = dl_task_timer;
}
/*
* During the activation, CBS checks if it can reuse the current task's
* runtime and period. If the deadline of the task is in the past, CBS
* cannot use the runtime, and so it replenishes the task. This rule
* works fine for implicit deadline tasks (deadline == period), and the
* CBS was designed for implicit deadline tasks. However, a task with
* constrained deadline (deadline < period) might be awakened after the
* deadline, but before the next period. In this case, replenishing the
* task would allow it to run for runtime / deadline. As in this case
* deadline < period, CBS enables a task to run for more than the
* runtime / period. In a very loaded system, this can cause a domino
* effect, making other tasks miss their deadlines.
*
* To avoid this problem, in the activation of a constrained deadline
* task after the deadline but before the next period, throttle the
* task and set the replenishing timer to the begin of the next period,
* unless it is boosted.
*/
static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
}
}
static
int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
{
return (dl_se->runtime <= 0);
}
/*
* This function implements the GRUB accounting rule. According to the
* GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
* but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
* where u is the utilization of the task, Umax is the maximum reclaimable
* utilization, Uinact is the (per-runqueue) inactive utilization, computed
* as the difference between the "total runqueue utilization" and the
* "runqueue active utilization", and Uextra is the (per runqueue) extra
* reclaimable utilization.
* Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
* by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
* Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
* is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
* Since delta is a 64 bit variable, to have an overflow its value should be
* larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
* not an issue here.
*/
static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
{
u64 u_act;
u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
/*
* Instead of computing max{u, (u_max - u_inact - u_extra)}, we
* compare u_inact + u_extra with u_max - u, because u_inact + u_extra
* can be larger than u_max. So, u_max - u_inact - u_extra would be
* negative leading to wrong results.
*/
if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
u_act = dl_se->dl_bw;
else
u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
return (delta * u_act) >> BW_SHIFT;
}
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
*/
static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec, scaled_delta_exec;
int cpu = cpu_of(rq);
u64 now;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
/*
* Consumed budget is computed considering the time as
* observed by schedulable tasks (excluding time spent
* in hardirq context, etc.). Deadlines are instead
* computed using hard walltime. This seems to be the more
* natural solution, but the full ramifications of this
* approach need further study.
*/
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
return;
}
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
trace_sched_stat_runtime(curr, delta_exec, 0);
update_current_exec_runtime(curr, now, delta_exec);
if (dl_entity_is_special(dl_se))
return;
/*
* For tasks that participate in GRUB, we implement GRUB-PA: the
* spare reclaimed bandwidth is used to clock down frequency.
*
* For the others, we still need to scale reservation parameters
* according to current frequency and CPU maximum capacity.
*/
if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
scaled_delta_exec = grub_reclaim(delta_exec,
rq,
&curr->dl);
} else {
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
scaled_delta_exec = cap_scale(delta_exec, scale_freq);
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
}
dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
/* If requested, inform the user about runtime overruns. */
if (dl_runtime_exceeded(dl_se) &&
(dl_se->flags & SCHED_FLAG_DL_OVERRUN))
dl_se->dl_overrun = 1;
__dequeue_task_dl(rq, curr, 0);
if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
resched_curr(rq);
}
/*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
*
* Account to the root rt group for now.
*
* The solution we're working towards is having the RT groups scheduled
* using deadline servers -- however there's a few nasties to figure
* out before that can happen.
*/
if (rt_bandwidth_enabled()) {
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We'll let actual RT tasks worry about the overflow here, we
* have our own CBS to keep us inline; only account when RT
* bandwidth is relevant.
*/
if (sched_rt_bandwidth_account(rt_rq))
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
inactive_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
sched_clock_tick();
update_rq_clock(rq);
if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
}
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
goto unlock;
}
if (dl_se->dl_non_contending == 0)
goto unlock;
sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
put_task_struct(p);
return HRTIMER_NORESTART;
}
void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->inactive_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = inactive_task_timer;
}
#define __node_2_dle(node) \
rb_entry((node), struct sched_dl_entity, rb_node)
#ifdef CONFIG_SMP
static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
if (dl_rq->earliest_dl.curr == 0)
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
}
}
static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we may have removed our earliest (and/or next earliest)
* task we must recompute them.
*/
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
} else {
struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
struct sched_dl_entity *entry = __node_2_dle(leftmost);
dl_rq->earliest_dl.curr = entry->deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
}
}
#else
static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
u64 deadline = dl_se->deadline;
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
}
static inline
void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
}
static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
{
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
}
static inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
{
return &dl_task_of(dl_se)->stats;
}
static inline void
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
struct sched_statistics *stats;
if (!schedstat_enabled())
return;
stats = __schedstats_from_dl_se(dl_se);
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
struct sched_statistics *stats;
if (!schedstat_enabled())
return;
stats = __schedstats_from_dl_se(dl_se);
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
struct sched_statistics *stats;
if (!schedstat_enabled())
return;
stats = __schedstats_from_dl_se(dl_se);
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
int flags)
{
if (!schedstat_enabled())
return;
if (flags & ENQUEUE_WAKEUP)
update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
}
static inline void
update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
int flags)
{
struct task_struct *p = dl_task_of(dl_se);
if (!schedstat_enabled())
return;
if ((flags & DEQUEUE_SLEEP)) {
unsigned int state;
state = READ_ONCE(p->__state);
if (state & TASK_INTERRUPTIBLE)
__schedstat_set(p->stats.sleep_start,
rq_clock(rq_of_dl_rq(dl_rq)));
if (state & TASK_UNINTERRUPTIBLE)
__schedstat_set(p->stats.block_start,
rq_clock(rq_of_dl_rq(dl_rq)));
}
}
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
inc_dl_tasks(dl_se, dl_rq);
}
static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
if (RB_EMPTY_NODE(&dl_se->rb_node))
return;
rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq);
}
static void
enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
WARN_ON_ONCE(on_dl_rq(dl_se));
update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
/*
* If this is a wakeup or a new instance, the scheduling
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
if (flags & ENQUEUE_WAKEUP) {
task_contending(dl_se, flags);
update_dl_entity(dl_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
}
static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
__dequeue_dl_entity(dl_se);
}
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
if (is_dl_boosted(&p->dl)) {
/*
* Because of delays in the detection of the overrun of a
* thread's runtime, it might be the case that a thread
* goes to sleep in a rt mutex with negative runtime. As
* a consequence, the thread will be throttled.
*
* While waiting for the mutex, this thread can also be
* boosted via PI, resulting in a thread that is throttled
* and boosted at the same time.
*
* In this case, the boost overrides the throttle.
*/
if (p->dl.dl_throttled) {
/*
* The replenish timer needs to be canceled. No
* problem if it fires concurrently: boosted threads
* are ignored in dl_task_timer().
*/
hrtimer_try_to_cancel(&p->dl.dl_timer);
p->dl.dl_throttled = 0;
}
} else if (!dl_prio(p->normal_prio)) {
/*
* Special case in which we have a !SCHED_DEADLINE task that is going
* to be deboosted, but exceeds its runtime while doing so. No point in
* replenishing it, as it's going to return back to its original
* scheduling class after this. If it has been throttled, we need to
* clear the flag, otherwise the task may wake up as throttled after
* being boosted again with no means to replenish the runtime and clear
* the throttle.
*/
p->dl.dl_throttled = 0;
if (!(flags & ENQUEUE_REPLENISH))
printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
task_pid_nr(p));
return;
}
/*
* Check if a constrained deadline task was activated
* after the deadline but before the next period.
* If that is the case, the task will be throttled and
* the replenishment timer will be set to the next period.
*/
if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
dl_check_constrained_dl(&p->dl);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
add_rq_bw(&p->dl, &rq->dl);
add_running_bw(&p->dl, &rq->dl);
}
/*
* If p is throttled, we do not enqueue it. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
* However, the active utilization does not depend on the fact
* that the task is on the runqueue or not (but depends on the
* task's state - in GRUB parlance, "inactive" vs "active contending").
* In other words, even if a task is throttled its utilization must
* be counted in the active utilization; hence, we need to call
* add_running_bw().
*/
if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
if (flags & ENQUEUE_WAKEUP)
task_contending(&p->dl, flags);
return;
}
check_schedstat_required();
update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
enqueue_dl_entity(&p->dl, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
dequeue_dl_entity(&p->dl);
dequeue_pushable_dl_task(rq, p);
}
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* This check allows to start the inactive timer (or to immediately
* decrease the active utilization, if needed) in two cases:
* when the task blocks and when it is terminating
* (p->state == TASK_DEAD). We can handle the two cases in the same
* way, because from GRUB's point of view the same thing is happening
* (the task moves from "active contending" to "active non contending"
* or "inactive")
*/
if (flags & DEQUEUE_SLEEP)
task_non_contending(p);
}
/*
* Yield task semantic for -deadline tasks is:
*
* get off from the CPU until our next instance, with
* a new runtime. This is of little use now, since we
* don't have a bandwidth reclaiming mechanism. Anyway,
* bandwidth reclaiming is planned for the future, and
* yield_task_dl will indicate that some spare budget
* is available for other task instances to use it.
*/
static void yield_task_dl(struct rq *rq)
{
/*
* We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it
* new scheduling parameters (thanks to dl_yielded=1).
*/
rq->curr->dl.dl_yielded = 1;
update_rq_clock(rq);
update_curr_dl(rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
}
#ifdef CONFIG_SMP
static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
struct rq *rq)
{
return (!rq->dl.dl_nr_running ||
dl_time_before(p->dl.deadline,
rq->dl.earliest_dl.curr));
}
static int find_later_rq(struct task_struct *task);
static int
select_task_rq_dl(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
bool select_rq;
struct rq *rq;
if (!(flags & WF_TTWU))
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
* If we are dealing with a -deadline task, we must
* decide where to wake it up.
* If it has a later deadline and the current task
* on this rq can't move (provided the waking task
* can!) we prefer to send it somewhere else. On the
* other hand, if it has a shorter deadline, we
* try to make it stay here, it might be important.
*/
select_rq = unlikely(dl_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &curr->dl)) &&
p->nr_cpus_allowed > 1;
/*
* Take the capacity of the CPU into account to
* ensure it fits the requirement of the task.
*/
if (sched_asym_cpucap_active())
select_rq |= !dl_task_fits_capacity(p, cpu);
if (select_rq) {
int target = find_later_rq(p);
if (target != -1 &&
dl_task_is_earliest_deadline(p, cpu_rq(target)))
cpu = target;
}
rcu_read_unlock();
out:
return cpu;
}
static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
{
struct rq_flags rf;
struct rq *rq;
if (READ_ONCE(p->__state) != TASK_WAKING)
return;
rq = task_rq(p);
/*
* Since p->state == TASK_WAKING, set_task_cpu() has been called
* from try_to_wake_up(). Hence, p->pi_lock is locked, but
* rq->lock is not... So, lock it
*/
rq_lock(rq, &rf);
if (p->dl.dl_non_contending) {
update_rq_clock(rq);
sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be canceled, inactive_task_timer()
* will see that dl_not_contending is not set, and
* will not touch the rq's active utilization,
* so we are still safe.
*/
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
sub_rq_bw(&p->dl, &rq->dl);
rq_unlock(rq, &rf);
}
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL))
return;
resched_curr(rq);
}
static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_dl_task(rq);
rq_repin_lock(rq, rf);
}
return sched_stop_runnable(rq) || sched_dl_runnable(rq);
}
#endif /* CONFIG_SMP */
/*
* Only called when both the current and waking task are -deadline
* tasks.
*/
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags)
{
if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* In the unlikely case current and p have the same deadline
* let us try to decide what's the best thing to do...
*/
if ((p->dl.deadline == rq->curr->dl.deadline) &&
!test_tsk_need_resched(rq->curr))
check_preempt_equal_dl(rq, p);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SCHED_HRTICK
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
hrtick_start(rq, p->dl.runtime);
}
#else /* !CONFIG_SCHED_HRTICK */
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
}
#endif
static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
p->se.exec_start = rq_clock_task(rq);
if (on_dl_rq(&p->dl))
update_stats_wait_end_dl(dl_rq, dl_se);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
if (!first)
return;
if (hrtick_enabled_dl(rq))
start_hrtick_dl(rq, p);
if (rq->curr->sched_class != &dl_sched_class)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
deadline_queue_push_tasks(rq);
}
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
{
struct rb_node *left = rb_first_cached(&dl_rq->root);
if (!left)
return NULL;
return __node_2_dle(left);
}
static struct task_struct *pick_task_dl(struct rq *rq)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
struct task_struct *p;
if (!sched_dl_runnable(rq))
return NULL;
dl_se = pick_next_dl_entity(dl_rq);
WARN_ON_ONCE(!dl_se);
p = dl_task_of(dl_se);
return p;
}
static struct task_struct *pick_next_task_dl(struct rq *rq)
{
struct task_struct *p;
p = pick_task_dl(rq);
if (p)
set_next_task_dl(rq, p, true);
return p;
}
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
if (on_dl_rq(&p->dl))
update_stats_wait_start_dl(dl_rq, dl_se);
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
* be set and schedule() will start a new hrtick for the next task.
*/
if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
is_leftmost(p, &rq->dl))
start_hrtick_dl(rq, p);
}
static void task_fork_dl(struct task_struct *p)
{
/*
* SCHED_DEADLINE tasks cannot fork and this is achieved through
* sched_fork()
*/
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define DL_MAX_TRIES 3
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_on_cpu(rq, p) &&
cpumask_test_cpu(cpu, &p->cpus_mask))
return 1;
return 0;
}
/*
* Return the earliest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise:
*/
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
{
struct task_struct *p = NULL;
struct rb_node *next_node;
if (!has_pushable_dl_tasks(rq))
return NULL;
next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
next_node:
if (next_node) {
p = __node_2_pdl(next_node);
if (pick_dl_task(rq, p, cpu))
return p;
next_node = rb_next(next_node);
goto next_node;
}
return NULL;
}
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
/* Make sure the mask is initialized first */
if (unlikely(!later_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1;
/*
* We have to consider system topology and task affinity
* first, then we can look for a suitable CPU.
*/
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
return -1;
/*
* If we are here, some targets have been found, including
* the most suitable which is, among the runqueues where the
* current tasks have later deadlines than the task's one, the
* rq with the latest possible one.
*
* Now we check how well this matches with task's
* affinity and system topology.
*
* The last CPU where the task run is our first
* guess, since it is most likely cache-hot there.
*/
if (cpumask_test_cpu(cpu, later_mask))
return cpu;
/*
* Check if this_cpu is to be skipped (i.e., it is
* not in the mask) or not.
*/
if (!cpumask_test_cpu(this_cpu, later_mask))
this_cpu = -1;
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* If possible, preempting this_cpu is
* cheaper than migrating.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_any_and_distribute(later_mask,
sched_domain_span(sd));
/*
* Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our
* choice. Of course, the latest possible CPU is
* already under consideration through later_mask.
*/
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* At this point, all our guesses failed, we just return
* 'something', and let the caller sort the things out.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any_distribute(later_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Locks the rq it finds */
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
{
struct rq *later_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < DL_MAX_TRIES; tries++) {
cpu = find_later_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
later_rq = cpu_rq(cpu);
if (!dl_task_is_earliest_deadline(task, later_rq)) {
/*
* Target rq has tasks of equal or earlier deadline,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
later_rq = NULL;
break;
}
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!dl_task(task) ||
is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
break;
}
}
/*
* If the rq we found has no -deadline task, or
* its earliest one has a later deadline than our
* task, the rq is a good one.
*/
if (dl_task_is_earliest_deadline(task, later_rq))
break;
/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
later_rq = NULL;
}
return later_rq;
}
static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_dl_tasks(rq))
return NULL;
p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
WARN_ON_ONCE(rq->cpu != task_cpu(p));
WARN_ON_ONCE(task_current(rq, p));
WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
WARN_ON_ONCE(!task_on_rq_queued(p));
WARN_ON_ONCE(!dl_task(p));
return p;
}
/*
* See if the non running -deadline tasks on this rq
* can be sent to some other CPU where they can preempt
* and start executing.
*/
static int push_dl_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *later_rq;
int ret = 0;
if (!rq->dl.overloaded)
return 0;
next_task = pick_next_pushable_dl_task(rq);
if (!next_task)
return 0;
retry:
/*
* If next_task preempts rq->curr, and rq->curr
* can move away, it makes sense to just reschedule
* without going further in pushing next_task.
*/
if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq->curr->nr_cpus_allowed > 1) {
resched_curr(rq);
return 0;
}
if (is_migration_disabled(next_task))
return 0;
if (WARN_ON(next_task == rq->curr))
return 0;
/* We might release rq lock */
get_task_struct(next_task);
/* Will lock the rq it'll find */
later_rq = find_lock_later_rq(next_task, rq);
if (!later_rq) {
struct task_struct *task;
/*
* We must check all this again, since
* find_lock_later_rq releases rq->lock and it is
* then possible that next_task has migrated.
*/
task = pick_next_pushable_dl_task(rq);
if (task == next_task) {
/*
* The task is still there. We don't try
* again, some other CPU will pull it when ready.
*/
goto out;
}
if (!task)
/* No more tasks */
goto out;
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, later_rq->cpu);
activate_task(later_rq, next_task, 0);
ret = 1;
resched_curr(later_rq);
double_unlock_balance(rq, later_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_dl_tasks(struct rq *rq)
{
/* push_dl_task() will return true if it moved a -deadline task */
while (push_dl_task(rq))
;
}
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
struct task_struct *p, *push_task;
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
if (likely(!dl_overloaded(this_rq)))
return;
/*
* Match the barrier from dl_set_overloaded; this guarantees that if we
* see overloaded we must also see the dlo_mask bit.
*/
smp_rmb();
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* It looks racy, abd it is! However, as in sched_rt.c,
* we are fine with this.
*/
if (this_rq->dl.dl_nr_running &&
dl_time_before(this_rq->dl.earliest_dl.curr,
src_rq->dl.earliest_dl.next))
continue;
/* Might drop this_rq->lock */
push_task = NULL;
double_lock_balance(this_rq, src_rq);
/*
* If there are no more pullable tasks on the
* rq, we're done with it.
*/
if (src_rq->dl.dl_nr_running <= 1)
goto skip;
p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
/*
* We found a task to be pulled if:
* - it preempts our current (if there's one),
* - it will preempt the last one we pulled (if any).
*/
if (p && dl_time_before(p->dl.deadline, dmin) &&
dl_task_is_earliest_deadline(p, this_rq)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* Then we pull iff p has actually an earlier
* deadline than the current task of its runqueue.
*/
if (dl_time_before(p->dl.deadline,
src_rq->curr->dl.deadline))
goto skip;
if (is_migration_disabled(p)) {
push_task = get_push_task(src_rq);
} else {
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
resched = true;
}
/* Is there any other task even earlier? */
}
skip:
double_unlock_balance(this_rq, src_rq);
if (push_task) {
raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
raw_spin_rq_lock(this_rq);
}
}
if (resched)
resched_curr(this_rq);
}
/*
* Since the task is not running and a reschedule is not going to happen
* anytime soon on its runqueue, we try pushing it away now.
*/
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
if (!task_on_cpu(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq);
}
}
static void set_cpus_allowed_dl(struct task_struct *p,
struct affinity_context *ctx)
{
struct root_domain *src_rd;
struct rq *rq;
WARN_ON_ONCE(!dl_task(p));
rq = task_rq(p);
src_rd = rq->rd;
/*
* Migrating a SCHED_DEADLINE task between exclusive
* cpusets (different root_domains) entails a bandwidth
* update. We already made space for us in the destination
* domain (see cpuset_can_attach()).
*/
if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq));
/*
* We now free resources of the root_domain we are migrating
* off. In the worst case, sched_setattr() may temporary fail
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&src_dl_b->lock);
}
set_cpus_allowed_common(p, ctx);
}
/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
}
void __init init_sched_dl_class(void)
{
unsigned int i;
for_each_possible_cpu(i)
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
GFP_KERNEL, cpu_to_node(i));
}
void dl_add_task_root_domain(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
struct dl_bw *dl_b;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
if (!dl_task(p)) {
raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
return;
}
rq = __task_rq_lock(p, &rf);
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
raw_spin_unlock(&dl_b->lock);
task_rq_unlock(rq, p, &rf);
}
void dl_clear_root_domain(struct root_domain *rd)
{
unsigned long flags;
raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
rd->dl_bw.total_bw = 0;
raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
}
#endif /* CONFIG_SMP */
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
/*
* task_non_contending() can start the "inactive timer" (if the 0-lag
* time is in the future). If the task switches back to dl before
* the "inactive timer" fires, it can continue to consume its current
* runtime using its current deadline. If it stays outside of
* SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
* will reset the task parameters.
*/
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
/*
* In case a task is setscheduled out from SCHED_DEADLINE we need to
* keep track of that on its cpuset (for correct bandwidth tracking).
*/
dec_dl_tasks_cs(p);
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
* might migrate away from this rq while continuing to run on
* some other class. We need to remove its contribution from
* this rq running_bw now, or sub_rq_bw (below) will complain.
*/
if (p->dl.dl_non_contending)
sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
}
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
* at the 0-lag time, because the task could have been migrated
* while SCHED_OTHER in the meanwhile.
*/
if (p->dl.dl_non_contending)
p->dl.dl_non_contending = 0;
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded CPU, if any.
*/
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
return;
deadline_queue_pull_task(rq);
}
/*
* When switching to -deadline, we may overload the rq, then
* we try to push someone off, if possible.
*/
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
/*
* In case a task is setscheduled to SCHED_DEADLINE we need to keep
* track of that on its cpuset (for correct bandwidth tracking).
*/
inc_dl_tasks_cs(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
return;
}
if (rq->curr != p) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
deadline_queue_push_tasks(rq);
#endif
if (dl_task(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_curr(rq);
} else {
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
}
}
/*
* If the scheduling parameters of a -deadline task changed,
* a push or pull operation might be needed.
*/
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
if (!task_on_rq_queued(p))
return;
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
* we don't have the old deadline value, and
* we can't argue if the task is increasing
* or lowering its prio, so...
*/
if (!rq->dl.overloaded)
deadline_queue_pull_task(rq);
if (task_current(rq, p)) {
/*
* If we now have a earlier deadline task than p,
* then reschedule, provided p is still on this
* runqueue.
*/
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
resched_curr(rq);
} else {
/*
* Current may not be deadline in case p was throttled but we
* have just replenished it (e.g. rt_mutex_setprio()).
*
* Otherwise, if p was given an earlier deadline, reschedule.
*/
if (!dl_task(rq->curr) ||
dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
resched_curr(rq);
}
#else
/*
* We don't know if p has a earlier or later deadline, so let's blindly
* set a (maybe not needed) rescheduling point.
*/
resched_curr(rq);
#endif
}
#ifdef CONFIG_SCHED_CORE
static int task_is_throttled_dl(struct task_struct *p, int cpu)
{
return p->dl.dl_throttled;
}
#endif
DEFINE_SCHED_CLASS(dl) = {
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
.check_preempt_curr = check_preempt_curr_dl,
.pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl,
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP
.balance = balance_dl,
.pick_task = pick_task_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
.find_lock_rq = find_lock_later_rq,
#endif
.task_tick = task_tick_dl,
.task_fork = task_fork_dl,
.prio_changed = prio_changed_dl,
.switched_from = switched_from_dl,
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
#ifdef CONFIG_SCHED_CORE
.task_is_throttled = task_is_throttled_dl,
#endif
};
/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
static u64 dl_generation;
int sched_dl_global_validate(void)
{
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
u64 gen = ++dl_generation;
struct dl_bw *dl_b;
int cpu, cpus, ret = 0;
unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
*/
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
if (dl_bw_visited(cpu, gen))
goto next;
dl_b = dl_bw_of(cpu);
cpus = dl_bw_cpus(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw * cpus < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
next:
rcu_read_unlock_sched();
if (ret)
break;
}
return ret;
}
static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
{
if (global_rt_runtime() == RUNTIME_INF) {
dl_rq->bw_ratio = 1 << RATIO_SHIFT;
dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
} else {
dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
dl_rq->max_bw = dl_rq->extra_bw =
to_ratio(global_rt_period(), global_rt_runtime());
}
}
void sched_dl_do_global(void)
{
u64 new_bw = -1;
u64 gen = ++dl_generation;
struct dl_bw *dl_b;
int cpu;
unsigned long flags;
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
if (dl_bw_visited(cpu, gen)) {
rcu_read_unlock_sched();
continue;
}
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
}
}
/*
* We must be sure that accepting a new task (or allowing changing the
* parameters of an existing one) is consistent with the bandwidth
* constraints. If yes, this function also accordingly updates the currently
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
*/
int sched_dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
{
u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1, cpu = task_cpu(p);
struct dl_bw *dl_b = dl_bw_of(cpu);
unsigned long cap;
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return 0;
/* !deadline task may carry old deadline bandwidth */
if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
return 0;
/*
* Either if a task, enters, leave, or stays -deadline but changes
* its parameters, we may need to update accordingly the total
* allocated bandwidth of the container.
*/
raw_spin_lock(&dl_b->lock);
cpus = dl_bw_cpus(cpu);
cap = dl_bw_capacity(cpu);
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cap, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer))
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
/*
* XXX this is slightly incorrect: when the task
* utilization decreases, we should delay the total
* utilization change until the task's 0-lag point.
* But this would require to set the task's "inactive
* timer" when the task is not inactive.
*/
__dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
dl_change_utilization(p, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
/*
* Do not decrease the total deadline utilization here,
* switched_from_dl() will take care to do it at the correct
* (0-lag) time.
*/
err = 0;
}
raw_spin_unlock(&dl_b->lock);
return err;
}
/*
* This function initializes the sched_dl_entity of a newly becoming
* SCHED_DEADLINE task.
*
* Only the static values are considered here, the actual runtime and the
* absolute deadline will be properly calculated when the task is enqueued
* for the first time with its new policy.
*/
void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = attr->sched_runtime;
dl_se->dl_deadline = attr->sched_deadline;
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
}
void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
attr->sched_priority = p->rt_priority;
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;
attr->sched_period = dl_se->dl_period;
attr->sched_flags &= ~SCHED_DL_FLAGS;
attr->sched_flags |= dl_se->flags;
}
/*
* This function validates the new parameters of a -deadline task.
* We ask for the deadline not being zero, and greater or equal
* than the runtime, as well as the period of being zero or
* greater than deadline. Furthermore, we have to be sure that
* user parameters are above the internal resolution of 1us (we
* check sched_runtime only since it is always the smaller one) and
* below 2^63 ns (we have to check both sched_deadline and
* sched_period, as the latter can be zero).
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
u64 period, max, min;
/* special dl tasks don't actually use any parameter */
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return true;
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
/*
* Since we truncate DL_SCALE bits, make sure we're at least
* that big.
*/
if (attr->sched_runtime < (1ULL << DL_SCALE))
return false;
/*
* Since we use the MSB for wrap-around and sign issues, make
* sure it's not set (mind that period can be equal to zero).
*/
if (attr->sched_deadline & (1ULL << 63) ||
attr->sched_period & (1ULL << 63))
return false;
period = attr->sched_period;
if (!period)
period = attr->sched_deadline;
/* runtime <= deadline <= period (if period != 0) */
if (period < attr->sched_deadline ||
attr->sched_deadline < attr->sched_runtime)
return false;
max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
if (period < min || period > max)
return false;
return true;
}
/*
* This function clears the sched_dl_entity static params.
*/
void __dl_clear_params(struct task_struct *p)
{
struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0;
dl_se->dl_period = 0;
dl_se->flags = 0;
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
#ifdef CONFIG_RT_MUTEXES
dl_se->pi_se = dl_se;
#endif
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
if (dl_se->dl_runtime != attr->sched_runtime ||
dl_se->dl_deadline != attr->sched_deadline ||
dl_se->dl_period != attr->sched_period ||
dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
return true;
return false;
}
#ifdef CONFIG_SMP
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial)
{
unsigned long flags, cap;
struct dl_bw *cur_dl_b;
int ret = 1;
rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
cap = __dl_bw_capacity(trial);
raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
if (__dl_overflow(cur_dl_b, cap, 0, 0))
ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
rcu_read_unlock_sched();
return ret;
}
enum dl_bw_request {
dl_bw_req_check_overflow = 0,
dl_bw_req_alloc,
dl_bw_req_free
};
static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow = 0;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (req == dl_bw_req_free) {
__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
} else {
unsigned long cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
if (req == dl_bw_req_alloc && !overflow) {
/*
* We reserve space in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
}
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
return overflow ? -EBUSY : 0;
}
int dl_bw_check_overflow(int cpu)
{
return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
}
int dl_bw_alloc(int cpu, u64 dl_bw)
{
return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
}
void dl_bw_free(int cpu, u64 dl_bw)
{
dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
}
#endif
#ifdef CONFIG_SCHED_DEBUG
void print_dl_stats(struct seq_file *m, int cpu)
{
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
}
#endif /* CONFIG_SCHED_DEBUG */
| linux-master | kernel/sched/deadline.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/cpudeadline.c
*
* Global CPU deadline management
*
* Author: Juri Lelli <[email protected]>
*/
static inline int parent(int i)
{
return (i - 1) >> 1;
}
static inline int left_child(int i)
{
return (i << 1) + 1;
}
static inline int right_child(int i)
{
return (i << 1) + 2;
}
static void cpudl_heapify_down(struct cpudl *cp, int idx)
{
int l, r, largest;
int orig_cpu = cp->elements[idx].cpu;
u64 orig_dl = cp->elements[idx].dl;
if (left_child(idx) >= cp->size)
return;
/* adapted from lib/prio_heap.c */
while (1) {
u64 largest_dl;
l = left_child(idx);
r = right_child(idx);
largest = idx;
largest_dl = orig_dl;
if ((l < cp->size) && dl_time_before(orig_dl,
cp->elements[l].dl)) {
largest = l;
largest_dl = cp->elements[l].dl;
}
if ((r < cp->size) && dl_time_before(largest_dl,
cp->elements[r].dl))
largest = r;
if (largest == idx)
break;
/* pull largest child onto idx */
cp->elements[idx].cpu = cp->elements[largest].cpu;
cp->elements[idx].dl = cp->elements[largest].dl;
cp->elements[cp->elements[idx].cpu].idx = idx;
idx = largest;
}
/* actual push down of saved original values orig_* */
cp->elements[idx].cpu = orig_cpu;
cp->elements[idx].dl = orig_dl;
cp->elements[cp->elements[idx].cpu].idx = idx;
}
static void cpudl_heapify_up(struct cpudl *cp, int idx)
{
int p;
int orig_cpu = cp->elements[idx].cpu;
u64 orig_dl = cp->elements[idx].dl;
if (idx == 0)
return;
do {
p = parent(idx);
if (dl_time_before(orig_dl, cp->elements[p].dl))
break;
/* pull parent onto idx */
cp->elements[idx].cpu = cp->elements[p].cpu;
cp->elements[idx].dl = cp->elements[p].dl;
cp->elements[cp->elements[idx].cpu].idx = idx;
idx = p;
} while (idx != 0);
/* actual push up of saved original values orig_* */
cp->elements[idx].cpu = orig_cpu;
cp->elements[idx].dl = orig_dl;
cp->elements[cp->elements[idx].cpu].idx = idx;
}
static void cpudl_heapify(struct cpudl *cp, int idx)
{
if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
cp->elements[idx].dl))
cpudl_heapify_up(cp, idx);
else
cpudl_heapify_down(cp, idx);
}
static inline int cpudl_maximum(struct cpudl *cp)
{
return cp->elements[0].cpu;
}
/*
* cpudl_find - find the best (later-dl) CPU in the system
* @cp: the cpudl max-heap context
* @p: the task
* @later_mask: a mask to fill in with the selected CPUs (or NULL)
*
* Returns: int - CPUs were found
*/
int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask)
{
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
unsigned long cap, max_cap = 0;
int cpu, max_cpu = -1;
if (!sched_asym_cpucap_active())
return 1;
/* Ensure the capacity of the CPUs fits the task. */
for_each_cpu(cpu, later_mask) {
if (!dl_task_fits_capacity(p, cpu)) {
cpumask_clear_cpu(cpu, later_mask);
cap = capacity_orig_of(cpu);
if (cap > max_cap ||
(cpu == task_cpu(p) && cap == max_cap)) {
max_cap = cap;
max_cpu = cpu;
}
}
}
if (cpumask_empty(later_mask))
cpumask_set_cpu(max_cpu, later_mask);
return 1;
} else {
int best_cpu = cpudl_maximum(cp);
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
return 1;
}
}
return 0;
}
/*
* cpudl_clear - remove a CPU from the cpudl max-heap
* @cp: the cpudl max-heap context
* @cpu: the target CPU
*
* Notes: assumes cpu_rq(cpu)->lock is locked
*
* Returns: (void)
*/
void cpudl_clear(struct cpudl *cp, int cpu)
{
int old_idx, new_cpu;
unsigned long flags;
WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->elements[cpu].idx;
if (old_idx == IDX_INVALID) {
/*
* Nothing to remove if old_idx was invalid.
* This could happen if a rq_offline_dl is
* called for a CPU without -dl tasks running.
*/
} else {
new_cpu = cp->elements[cp->size - 1].cpu;
cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
cp->elements[old_idx].cpu = new_cpu;
cp->size--;
cp->elements[new_cpu].idx = old_idx;
cp->elements[cpu].idx = IDX_INVALID;
cpudl_heapify(cp, old_idx);
cpumask_set_cpu(cpu, cp->free_cpus);
}
raw_spin_unlock_irqrestore(&cp->lock, flags);
}
/*
* cpudl_set - update the cpudl max-heap
* @cp: the cpudl max-heap context
* @cpu: the target CPU
* @dl: the new earliest deadline for this CPU
*
* Notes: assumes cpu_rq(cpu)->lock is locked
*
* Returns: (void)
*/
void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
{
int old_idx;
unsigned long flags;
WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->elements[cpu].idx;
if (old_idx == IDX_INVALID) {
int new_idx = cp->size++;
cp->elements[new_idx].dl = dl;
cp->elements[new_idx].cpu = cpu;
cp->elements[cpu].idx = new_idx;
cpudl_heapify_up(cp, new_idx);
cpumask_clear_cpu(cpu, cp->free_cpus);
} else {
cp->elements[old_idx].dl = dl;
cpudl_heapify(cp, old_idx);
}
raw_spin_unlock_irqrestore(&cp->lock, flags);
}
/*
* cpudl_set_freecpu - Set the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached CPU
*/
void cpudl_set_freecpu(struct cpudl *cp, int cpu)
{
cpumask_set_cpu(cpu, cp->free_cpus);
}
/*
* cpudl_clear_freecpu - Clear the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached CPU
*/
void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
{
cpumask_clear_cpu(cpu, cp->free_cpus);
}
/*
* cpudl_init - initialize the cpudl structure
* @cp: the cpudl max-heap context
*/
int cpudl_init(struct cpudl *cp)
{
int i;
raw_spin_lock_init(&cp->lock);
cp->size = 0;
cp->elements = kcalloc(nr_cpu_ids,
sizeof(struct cpudl_item),
GFP_KERNEL);
if (!cp->elements)
return -ENOMEM;
if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
kfree(cp->elements);
return -ENOMEM;
}
for_each_possible_cpu(i)
cp->elements[i].idx = IDX_INVALID;
return 0;
}
/*
* cpudl_cleanup - clean up the cpudl structure
* @cp: the cpudl max-heap context
*/
void cpudl_cleanup(struct cpudl *cp)
{
free_cpumask_var(cp->free_cpus);
kfree(cp->elements);
}
| linux-master | kernel/sched/cpudeadline.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Auto-group scheduling implementation:
*/
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_autogroup_sysctls[] = {
{
.procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};
static void __init sched_autogroup_sysctl_init(void)
{
register_sysctl_init("kernel", sched_autogroup_sysctls);
}
#else
#define sched_autogroup_sysctl_init() do { } while (0)
#endif
void __init autogroup_init(struct task_struct *init_task)
{
autogroup_default.tg = &root_task_group;
kref_init(&autogroup_default.kref);
init_rwsem(&autogroup_default.lock);
init_task->signal->autogroup = &autogroup_default;
sched_autogroup_sysctl_init();
}
void autogroup_free(struct task_group *tg)
{
kfree(tg->autogroup);
}
static inline void autogroup_destroy(struct kref *kref)
{
struct autogroup *ag = container_of(kref, struct autogroup, kref);
#ifdef CONFIG_RT_GROUP_SCHED
/* We've redirected RT tasks to the root task group... */
ag->tg->rt_se = NULL;
ag->tg->rt_rq = NULL;
#endif
sched_release_group(ag->tg);
sched_destroy_group(ag->tg);
}
static inline void autogroup_kref_put(struct autogroup *ag)
{
kref_put(&ag->kref, autogroup_destroy);
}
static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
{
kref_get(&ag->kref);
return ag;
}
static inline struct autogroup *autogroup_task_get(struct task_struct *p)
{
struct autogroup *ag;
unsigned long flags;
if (!lock_task_sighand(p, &flags))
return autogroup_kref_get(&autogroup_default);
ag = autogroup_kref_get(p->signal->autogroup);
unlock_task_sighand(p, &flags);
return ag;
}
static inline struct autogroup *autogroup_create(void)
{
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
struct task_group *tg;
if (!ag)
goto out_fail;
tg = sched_create_group(&root_task_group);
if (IS_ERR(tg))
goto out_free;
kref_init(&ag->kref);
init_rwsem(&ag->lock);
ag->id = atomic_inc_return(&autogroup_seq_nr);
ag->tg = tg;
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Autogroup RT tasks are redirected to the root task group
* so we don't have to move tasks around upon policy change,
* or flail around trying to allocate bandwidth on the fly.
* A bandwidth exception in __sched_setscheduler() allows
* the policy change to proceed.
*/
free_rt_sched_group(tg);
tg->rt_se = root_task_group.rt_se;
tg->rt_rq = root_task_group.rt_rq;
#endif
tg->autogroup = ag;
sched_online_group(tg, &root_task_group);
return ag;
out_free:
kfree(ag);
out_fail:
if (printk_ratelimit()) {
printk(KERN_WARNING "autogroup_create: %s failure.\n",
ag ? "sched_create_group()" : "kzalloc()");
}
return autogroup_kref_get(&autogroup_default);
}
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (tg != &root_task_group)
return false;
/*
* If we race with autogroup_move_group() the caller can use the old
* value of signal->autogroup but in this case sched_move_task() will
* be called again before autogroup_kref_put().
*
* However, there is no way sched_autogroup_exit_task() could tell us
* to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
*/
if (p->flags & PF_EXITING)
return false;
return true;
}
void sched_autogroup_exit_task(struct task_struct *p)
{
/*
* We are going to call exit_notify() and autogroup_move_group() can't
* see this thread after that: we can no longer use signal->autogroup.
* See the PF_EXITING check in task_wants_autogroup().
*/
sched_move_task(p);
}
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
struct autogroup *prev;
struct task_struct *t;
unsigned long flags;
if (WARN_ON_ONCE(!lock_task_sighand(p, &flags)))
return;
prev = p->signal->autogroup;
if (prev == ag) {
unlock_task_sighand(p, &flags);
return;
}
p->signal->autogroup = autogroup_kref_get(ag);
/*
* We can't avoid sched_move_task() after we changed signal->autogroup,
* this process can already run with task_group() == prev->tg or we can
* race with cgroup code which can read autogroup = prev under rq->lock.
* In the latter case for_each_thread() can not miss a migrating thread,
* cpu_cgroup_attach() must not be possible after cgroup_exit() and it
* can't be removed from thread list, we hold ->siglock.
*
* If an exiting thread was already removed from thread list we rely on
* sched_autogroup_exit_task().
*/
for_each_thread(p, t)
sched_move_task(t);
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
/* Allocates GFP_KERNEL, cannot be called under any spinlock: */
void sched_autogroup_create_attach(struct task_struct *p)
{
struct autogroup *ag = autogroup_create();
autogroup_move_group(p, ag);
/* Drop extra reference added by autogroup_create(): */
autogroup_kref_put(ag);
}
EXPORT_SYMBOL(sched_autogroup_create_attach);
/* Cannot be called under siglock. Currently has no users: */
void sched_autogroup_detach(struct task_struct *p)
{
autogroup_move_group(p, &autogroup_default);
}
EXPORT_SYMBOL(sched_autogroup_detach);
void sched_autogroup_fork(struct signal_struct *sig)
{
sig->autogroup = autogroup_task_get(current);
}
void sched_autogroup_exit(struct signal_struct *sig)
{
autogroup_kref_put(sig->autogroup);
}
static int __init setup_autogroup(char *str)
{
sysctl_sched_autogroup_enabled = 0;
return 1;
}
__setup("noautogroup", setup_autogroup);
#ifdef CONFIG_PROC_FS
int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{
static unsigned long next = INITIAL_JIFFIES;
struct autogroup *ag;
unsigned long shares;
int err, idx;
if (nice < MIN_NICE || nice > MAX_NICE)
return -EINVAL;
err = security_task_setnice(current, nice);
if (err)
return err;
if (nice < 0 && !can_nice(current, nice))
return -EPERM;
/* This is a heavy operation, taking global locks.. */
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
return -EAGAIN;
next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);
idx = array_index_nospec(nice + 20, 40);
shares = scale_load(sched_prio_to_weight[idx]);
down_write(&ag->lock);
err = sched_group_set_shares(ag->tg, shares);
if (!err)
ag->nice = nice;
up_write(&ag->lock);
autogroup_kref_put(ag);
return err;
}
void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{
struct autogroup *ag = autogroup_task_get(p);
if (!task_group_is_autogroup(ag->tg))
goto out;
down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock);
out:
autogroup_kref_put(ag);
}
#endif /* CONFIG_PROC_FS */
int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
if (!task_group_is_autogroup(tg))
return 0;
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
}
| linux-master | kernel/sched/autogroup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Scheduler topology setup/handling methods
*/
#include <linux/bsearch.h>
DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */
static cpumask_var_t sched_domains_tmpmask;
static cpumask_var_t sched_domains_tmpmask2;
#ifdef CONFIG_SCHED_DEBUG
static int __init sched_debug_setup(char *str)
{
sched_debug_verbose = true;
return 0;
}
early_param("sched_verbose", sched_debug_setup);
static inline bool sched_debug(void)
{
return sched_debug_verbose;
}
#define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
const struct sd_flag_debug sd_flag_debug[] = {
#include <linux/sched/sd_flags.h>
};
#undef SD_FLAG
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
unsigned long flags = sd->flags;
unsigned int idx;
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
printk(KERN_CONT "span=%*pbl level=%s\n",
cpumask_pr_args(sched_domain_span(sd)), sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
}
if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
}
for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
unsigned int flag = BIT(idx);
unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
!(sd->child->flags & flag))
printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
sd_flag_debug[idx].name);
if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
!(sd->parent->flags & flag))
printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
sd_flag_debug[idx].name);
}
printk(KERN_DEBUG "%*s groups:", level + 1, "");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
if (cpumask_empty(sched_group_span(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (!(sd->flags & SD_OVERLAP) &&
cpumask_intersects(groupmask, sched_group_span(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
cpumask_or(groupmask, groupmask, sched_group_span(group));
printk(KERN_CONT " %d:{ span=%*pbl",
group->sgc->id,
cpumask_pr_args(sched_group_span(group)));
if ((sd->flags & SD_OVERLAP) &&
!cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
printk(KERN_CONT " mask=%*pbl",
cpumask_pr_args(group_balance_mask(group)));
}
if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
printk(KERN_CONT " cap=%lu", group->sgc->capacity);
if (group == sd->groups && sd->child &&
!cpumask_equal(sched_domain_span(sd->child),
sched_group_span(group))) {
printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
}
printk(KERN_CONT " }");
group = group->next;
if (group != sd->groups)
printk(KERN_CONT ",");
} while (group != sd->groups);
printk(KERN_CONT "\n");
if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
return 0;
}
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
if (!sched_debug_verbose)
return;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
return;
}
printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
for (;;) {
if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_debug_verbose 0
# define sched_domain_debug(sd, cpu) do { } while (0)
static inline bool sched_debug(void)
{
return false;
}
#endif /* CONFIG_SCHED_DEBUG */
/* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
#define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
static const unsigned int SD_DEGENERATE_GROUPS_MASK =
#include <linux/sched/sd_flags.h>
0;
#undef SD_FLAG
static int sd_degenerate(struct sched_domain *sd)
{
if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1;
/* Following flags need at least 2 groups */
if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
(sd->groups != sd->groups->next))
return 0;
/* Following flags don't use groups */
if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
}
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
{
unsigned long cflags = sd->flags, pflags = parent->flags;
if (sd_degenerate(parent))
return 1;
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next)
pflags &= ~SD_DEGENERATE_GROUPS_MASK;
if (~cflags & pflags)
return 0;
return 1;
}
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
DEFINE_STATIC_KEY_FALSE(sched_energy_present);
static unsigned int sysctl_sched_energy_aware = 1;
static DEFINE_MUTEX(sched_energy_mutex);
static bool sched_energy_update;
void rebuild_sched_domains_energy(void)
{
mutex_lock(&sched_energy_mutex);
sched_energy_update = true;
rebuild_sched_domains();
sched_energy_update = false;
mutex_unlock(&sched_energy_mutex);
}
#ifdef CONFIG_PROC_SYSCTL
static int sched_energy_aware_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, state;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
state = static_branch_unlikely(&sched_energy_present);
if (state != sysctl_sched_energy_aware)
rebuild_sched_domains_energy();
}
return ret;
}
static struct ctl_table sched_energy_aware_sysctls[] = {
{
.procname = "sched_energy_aware",
.data = &sysctl_sched_energy_aware,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_energy_aware_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};
static int __init sched_energy_aware_sysctl_init(void)
{
register_sysctl_init("kernel", sched_energy_aware_sysctls);
return 0;
}
late_initcall(sched_energy_aware_sysctl_init);
#endif
static void free_pd(struct perf_domain *pd)
{
struct perf_domain *tmp;
while (pd) {
tmp = pd->next;
kfree(pd);
pd = tmp;
}
}
static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
{
while (pd) {
if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
return pd;
pd = pd->next;
}
return NULL;
}
static struct perf_domain *pd_init(int cpu)
{
struct em_perf_domain *obj = em_cpu_get(cpu);
struct perf_domain *pd;
if (!obj) {
if (sched_debug())
pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
return NULL;
}
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return NULL;
pd->em_pd = obj;
return pd;
}
static void perf_domain_debug(const struct cpumask *cpu_map,
struct perf_domain *pd)
{
if (!sched_debug() || !pd)
return;
printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
while (pd) {
printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
cpumask_first(perf_domain_span(pd)),
cpumask_pr_args(perf_domain_span(pd)),
em_pd_nr_perf_states(pd->em_pd));
pd = pd->next;
}
printk(KERN_CONT "\n");
}
static void destroy_perf_domain_rcu(struct rcu_head *rp)
{
struct perf_domain *pd;
pd = container_of(rp, struct perf_domain, rcu);
free_pd(pd);
}
static void sched_energy_set(bool has_eas)
{
if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
if (sched_debug())
pr_info("%s: stopping EAS\n", __func__);
static_branch_disable_cpuslocked(&sched_energy_present);
} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
if (sched_debug())
pr_info("%s: starting EAS\n", __func__);
static_branch_enable_cpuslocked(&sched_energy_present);
}
}
/*
* EAS can be used on a root domain if it meets all the following conditions:
* 1. an Energy Model (EM) is available;
* 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
* 3. no SMT is detected.
* 4. the EM complexity is low enough to keep scheduling overheads low;
* 5. schedutil is driving the frequency of all CPUs of the rd;
* 6. frequency invariance support is present;
*
* The complexity of the Energy Model is defined as:
*
* C = nr_pd * (nr_cpus + nr_ps)
*
* with parameters defined as:
* - nr_pd: the number of performance domains
* - nr_cpus: the number of CPUs
* - nr_ps: the sum of the number of performance states of all performance
* domains (for example, on a system with 2 performance domains,
* with 10 performance states each, nr_ps = 2 * 10 = 20).
*
* It is generally not a good idea to use such a model in the wake-up path on
* very complex platforms because of the associated scheduling overheads. The
* arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
* with per-CPU DVFS and less than 8 performance states each, for example.
*/
#define EM_MAX_COMPLEXITY 2048
extern struct cpufreq_governor schedutil_gov;
static bool build_perf_domains(const struct cpumask *cpu_map)
{
int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
struct perf_domain *pd = NULL, *tmp;
int cpu = cpumask_first(cpu_map);
struct root_domain *rd = cpu_rq(cpu)->rd;
struct cpufreq_policy *policy;
struct cpufreq_governor *gov;
if (!sysctl_sched_energy_aware)
goto free;
/* EAS is enabled for asymmetric CPU capacity topologies. */
if (!per_cpu(sd_asym_cpucapacity, cpu)) {
if (sched_debug()) {
pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
cpumask_pr_args(cpu_map));
}
goto free;
}
/* EAS definitely does *not* handle SMT */
if (sched_smt_active()) {
pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
cpumask_pr_args(cpu_map));
goto free;
}
if (!arch_scale_freq_invariant()) {
if (sched_debug()) {
pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
cpumask_pr_args(cpu_map));
}
goto free;
}
for_each_cpu(i, cpu_map) {
/* Skip already covered CPUs. */
if (find_pd(pd, i))
continue;
/* Do not attempt EAS if schedutil is not being used. */
policy = cpufreq_cpu_get(i);
if (!policy)
goto free;
gov = policy->governor;
cpufreq_cpu_put(policy);
if (gov != &schedutil_gov) {
if (rd->pd)
pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
cpumask_pr_args(cpu_map));
goto free;
}
/* Create the new pd and add it to the local list. */
tmp = pd_init(i);
if (!tmp)
goto free;
tmp->next = pd;
pd = tmp;
/*
* Count performance domains and performance states for the
* complexity check.
*/
nr_pd++;
nr_ps += em_pd_nr_perf_states(pd->em_pd);
}
/* Bail out if the Energy Model complexity is too high. */
if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
cpumask_pr_args(cpu_map));
goto free;
}
perf_domain_debug(cpu_map, pd);
/* Attach the new list of performance domains to the root domain. */
tmp = rd->pd;
rcu_assign_pointer(rd->pd, pd);
if (tmp)
call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
return !!pd;
free:
free_pd(pd);
tmp = rd->pd;
rcu_assign_pointer(rd->pd, NULL);
if (tmp)
call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
return false;
}
#else
static void free_pd(struct perf_domain *pd) { }
#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
static void free_rootdomain(struct rcu_head *rcu)
{
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
cpudl_cleanup(&rd->cpudl);
free_cpumask_var(rd->dlo_mask);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
free_pd(rd->pd);
kfree(rd);
}
void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
rq_unlock_irqrestore(rq, &rf);
if (old_rd)
call_rcu(&old_rd->rcu, free_rootdomain);
}
void sched_get_rd(struct root_domain *rd)
{
atomic_inc(&rd->refcount);
}
void sched_put_rd(struct root_domain *rd)
{
if (!atomic_dec_and_test(&rd->refcount))
return;
call_rcu(&rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd)
{
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out;
if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
goto free_online;
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
#ifdef HAVE_RT_PUSH_IPI
rd->rto_cpu = -1;
raw_spin_lock_init(&rd->rto_lock);
rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
#endif
rd->visit_gen = 0;
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_rto_mask;
if (cpupri_init(&rd->cpupri) != 0)
goto free_cpudl;
return 0;
free_cpudl:
cpudl_cleanup(&rd->cpudl);
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_dlo_mask:
free_cpumask_var(rd->dlo_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
out:
return -ENOMEM;
}
/*
* By default the system creates a single root-domain with all CPUs as
* members (mimicking the global state we have today).
*/
struct root_domain def_root_domain;
void __init init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;
if (init_rootdomain(rd) != 0) {
kfree(rd);
return NULL;
}
return rd;
}
static void free_sched_groups(struct sched_group *sg, int free_sgc)
{
struct sched_group *tmp, *first;
if (!sg)
return;
first = sg;
do {
tmp = sg->next;
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc);
if (atomic_dec_and_test(&sg->ref))
kfree(sg);
sg = tmp;
} while (sg != first);
}
static void destroy_sched_domain(struct sched_domain *sd)
{
/*
* A normal sched domain may have multiple group references, an
* overlapping domain, having private groups, only one. Iterate,
* dropping group/capacity references, freeing where none remain.
*/
free_sched_groups(sd->groups, 1);
if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
kfree(sd->shared);
kfree(sd);
}
static void destroy_sched_domains_rcu(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
while (sd) {
struct sched_domain *parent = sd->parent;
destroy_sched_domain(sd);
sd = parent;
}
}
static void destroy_sched_domains(struct sched_domain *sd)
{
if (sd)
call_rcu(&sd->rcu, destroy_sched_domains_rcu);
}
/*
* Keep a special pointer to the highest sched_domain that has
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
* Also keep a unique ID per domain (we use the first CPU number in
* the cpumask of the domain), this allows us to quickly tell if
* two CPUs are in the same cache domain, see cpus_share_cache().
*/
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
static void update_top_cache_domain(int cpu)
{
struct sched_domain_shared *sds = NULL;
struct sched_domain *sd;
int id = cpu;
int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
if (sd) {
id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd));
sds = sd->shared;
}
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id;
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
sd = lowest_flag_domain(cpu, SD_NUMA);
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent) {
parent->parent->child = tmp;
parent->parent->groups->flags = tmp->flags;
}
/*
* Transfer SD_PREFER_SIBLING down in case of a
* degenerate parent; the spans match for this
* so the property transfers.
*/
if (parent->flags & SD_PREFER_SIBLING)
tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent);
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
destroy_sched_domain(tmp);
if (sd) {
struct sched_group *sg = sd->groups;
/*
* sched groups hold the flags of the child sched
* domain for convenience. Clear such flags since
* the child is being destroyed.
*/
do {
sg->flags = 0;
} while (sg != sd->groups);
sd->child = NULL;
}
}
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
dirty_sched_domain_sysctl(cpu);
destroy_sched_domains(tmp);
update_top_cache_domain(cpu);
}
struct s_data {
struct sched_domain * __percpu *sd;
struct root_domain *rd;
};
enum s_alloc {
sa_rootdomain,
sa_sd,
sa_sd_storage,
sa_none,
};
/*
* Return the canonical balance CPU for this group, this is the first CPU
* of this group that's also in the balance mask.
*
* The balance mask are all those CPUs that could actually end up at this
* group. See build_balance_mask().
*
* Also see should_we_balance().
*/
int group_balance_cpu(struct sched_group *sg)
{
return cpumask_first(group_balance_mask(sg));
}
/*
* NUMA topology (first read the regular topology blurb below)
*
* Given a node-distance table, for example:
*
* node 0 1 2 3
* 0: 10 20 30 20
* 1: 20 10 20 30
* 2: 30 20 10 20
* 3: 20 30 20 10
*
* which represents a 4 node ring topology like:
*
* 0 ----- 1
* | |
* | |
* | |
* 3 ----- 2
*
* We want to construct domains and groups to represent this. The way we go
* about doing this is to build the domains on 'hops'. For each NUMA level we
* construct the mask of all nodes reachable in @level hops.
*
* For the above NUMA topology that gives 3 levels:
*
* NUMA-2 0-3 0-3 0-3 0-3
* groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
*
* NUMA-1 0-1,3 0-2 1-3 0,2-3
* groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3}
*
* NUMA-0 0 1 2 3
*
*
* As can be seen; things don't nicely line up as with the regular topology.
* When we iterate a domain in child domain chunks some nodes can be
* represented multiple times -- hence the "overlap" naming for this part of
* the topology.
*
* In order to minimize this overlap, we only build enough groups to cover the
* domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
*
* Because:
*
* - the first group of each domain is its child domain; this
* gets us the first 0-1,3
* - the only uncovered node is 2, who's child domain is 1-3.
*
* However, because of the overlap, computing a unique CPU for each group is
* more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
* groups include the CPUs of Node-0, while those CPUs would not in fact ever
* end up at those groups (they would end up in group: 0-1,3).
*
* To correct this we have to introduce the group balance mask. This mask
* will contain those CPUs in the group that can reach this group given the
* (child) domain tree.
*
* With this we can once again compute balance_cpu and sched_group_capacity
* relations.
*
* XXX include words on how balance_cpu is unique and therefore can be
* used for sched_group_capacity links.
*
*
* Another 'interesting' topology is:
*
* node 0 1 2 3
* 0: 10 20 20 30
* 1: 20 10 20 20
* 2: 20 20 10 20
* 3: 30 20 20 10
*
* Which looks a little like:
*
* 0 ----- 1
* | / |
* | / |
* | / |
* 2 ----- 3
*
* This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
* are not.
*
* This leads to a few particularly weird cases where the sched_domain's are
* not of the same number for each CPU. Consider:
*
* NUMA-2 0-3 0-3
* groups: {0-2},{1-3} {1-3},{0-2}
*
* NUMA-1 0-2 0-3 0-3 1-3
*
* NUMA-0 0 1 2 3
*
*/
/*
* Build the balance mask; it contains only those CPUs that can arrive at this
* group and should be considered to continue balancing.
*
* We do this during the group creation pass, therefore the group information
* isn't complete yet, however since each group represents a (child) domain we
* can fully construct this using the sched_domain bits (which are already
* complete).
*/
static void
build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
{
const struct cpumask *sg_span = sched_group_span(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
cpumask_clear(mask);
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
/*
* Can happen in the asymmetric case, where these siblings are
* unused. The mask will not be empty because those CPUs that
* do have the top domain _should_ span the domain.
*/
if (!sibling->child)
continue;
/* If we would not end up here, we can't continue from here */
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
cpumask_set_cpu(i, mask);
}
/* We must not have empty masks here */
WARN_ON_ONCE(cpumask_empty(mask));
}
/*
* XXX: This creates per-node group entries; since the load-balancer will
* immediately access remote memory to construct this group's load-balance
* statistics having the groups node local is of dubious benefit.
*/
static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
{
struct sched_group *sg;
struct cpumask *sg_span;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(cpu));
if (!sg)
return NULL;
sg_span = sched_group_span(sg);
if (sd->child) {
cpumask_copy(sg_span, sched_domain_span(sd->child));
sg->flags = sd->child->flags;
} else {
cpumask_copy(sg_span, sched_domain_span(sd));
}
atomic_inc(&sg->ref);
return sg;
}
static void init_overlap_sched_group(struct sched_domain *sd,
struct sched_group *sg)
{
struct cpumask *mask = sched_domains_tmpmask2;
struct sd_data *sdd = sd->private;
struct cpumask *sg_span;
int cpu;
build_balance_mask(sd, sg, mask);
cpu = cpumask_first(mask);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
if (atomic_inc_return(&sg->sgc->ref) == 1)
cpumask_copy(group_balance_mask(sg), mask);
else
WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
/*
* Initialize sgc->capacity such that even if we mess up the
* domains and no possible iteration will get us here, we won't
* die on a /0 trap.
*/
sg_span = sched_group_span(sg);
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
}
static struct sched_domain *
find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
{
/*
* The proper descendant would be the one whose child won't span out
* of sd
*/
while (sibling->child &&
!cpumask_subset(sched_domain_span(sibling->child),
sched_domain_span(sd)))
sibling = sibling->child;
/*
* As we are referencing sgc across different topology level, we need
* to go down to skip those sched_domains which don't contribute to
* scheduling because they will be degenerated in cpu_attach_domain
*/
while (sibling->child &&
cpumask_equal(sched_domain_span(sibling->child),
sched_domain_span(sibling)))
sibling = sibling->child;
return sibling;
}
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL, *sg;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
cpumask_clear(covered);
for_each_cpu_wrap(i, span, cpu) {
struct cpumask *sg_span;
if (cpumask_test_cpu(i, covered))
continue;
sibling = *per_cpu_ptr(sdd->sd, i);
/*
* Asymmetric node setups can result in situations where the
* domain tree is of unequal depth, make sure to skip domains
* that already cover the entire range.
*
* In that case build_sched_domains() will have terminated the
* iteration early and our sibling sd spans will be empty.
* Domains should always include the CPU they're built on, so
* check that.
*/
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
continue;
/*
* Usually we build sched_group by sibling's child sched_domain
* But for machines whose NUMA diameter are 3 or above, we move
* to build sched_group by sibling's proper descendant's child
* domain because sibling's child sched_domain will span out of
* the sched_domain being built as below.
*
* Smallest diameter=3 topology is:
*
* node 0 1 2 3
* 0: 10 20 30 40
* 1: 20 10 20 30
* 2: 30 20 10 20
* 3: 40 30 20 10
*
* 0 --- 1 --- 2 --- 3
*
* NUMA-3 0-3 N/A N/A 0-3
* groups: {0-2},{1-3} {1-3},{0-2}
*
* NUMA-2 0-2 0-3 0-3 1-3
* groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2}
*
* NUMA-1 0-1 0-2 1-3 2-3
* groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2}
*
* NUMA-0 0 1 2 3
*
* The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
* group span isn't a subset of the domain span.
*/
if (sibling->child &&
!cpumask_subset(sched_domain_span(sibling->child), span))
sibling = find_descended_sibling(sd, sibling);
sg = build_group_from_child_sched_domain(sibling, cpu);
if (!sg)
goto fail;
sg_span = sched_group_span(sg);
cpumask_or(covered, covered, sg_span);
init_overlap_sched_group(sibling, sg);
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
last->next = first;
}
sd->groups = first;
return 0;
fail:
free_sched_groups(first, 0);
return -ENOMEM;
}
/*
* Package topology (also see the load-balance blurb in fair.c)
*
* The scheduler builds a tree structure to represent a number of important
* topology features. By default (default_topology[]) these include:
*
* - Simultaneous multithreading (SMT)
* - Multi-Core Cache (MC)
* - Package (DIE)
*
* Where the last one more or less denotes everything up to a NUMA node.
*
* The tree consists of 3 primary data structures:
*
* sched_domain -> sched_group -> sched_group_capacity
* ^ ^ ^ ^
* `-' `-'
*
* The sched_domains are per-CPU and have a two way link (parent & child) and
* denote the ever growing mask of CPUs belonging to that level of topology.
*
* Each sched_domain has a circular (double) linked list of sched_group's, each
* denoting the domains of the level below (or individual CPUs in case of the
* first domain level). The sched_group linked by a sched_domain includes the
* CPU of that sched_domain [*].
*
* Take for instance a 2 threaded, 2 core, 2 cache cluster part:
*
* CPU 0 1 2 3 4 5 6 7
*
* DIE [ ]
* MC [ ] [ ]
* SMT [ ] [ ] [ ] [ ]
*
* - or -
*
* DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
* MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
* SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
*
* CPU 0 1 2 3 4 5 6 7
*
* One way to think about it is: sched_domain moves you up and down among these
* topology levels, while sched_group moves you sideways through it, at child
* domain granularity.
*
* sched_group_capacity ensures each unique sched_group has shared storage.
*
* There are two related construction problems, both require a CPU that
* uniquely identify each group (for a given domain):
*
* - The first is the balance_cpu (see should_we_balance() and the
* load-balance blub in fair.c); for each group we only want 1 CPU to
* continue balancing at a higher domain.
*
* - The second is the sched_group_capacity; we want all identical groups
* to share a single sched_group_capacity.
*
* Since these topologies are exclusive by construction. That is, its
* impossible for an SMT thread to belong to multiple cores, and cores to
* be part of multiple caches. There is a very clear and unique location
* for each CPU in the hierarchy.
*
* Therefore computing a unique CPU for each group is trivial (the iteration
* mask is redundant and set all 1s; all CPUs in a group will end up at _that_
* group), we can simply pick the first CPU in each group.
*
*
* [*] in other words, the first group of each domain is its child domain.
*/
static struct sched_group *get_group(int cpu, struct sd_data *sdd)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
struct sched_group *sg;
bool already_visited;
if (child)
cpu = cpumask_first(sched_domain_span(child));
sg = *per_cpu_ptr(sdd->sg, cpu);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
/* Increase refcounts for claim_allocations: */
already_visited = atomic_inc_return(&sg->ref) > 1;
/* sgc visits should follow a similar trend as sg */
WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
/* If we have already visited that group, it's already initialized. */
if (already_visited)
return sg;
if (child) {
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
sg->flags = child->flags;
} else {
cpumask_set_cpu(cpu, sched_group_span(sg));
cpumask_set_cpu(cpu, group_balance_mask(sg));
}
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
return sg;
}
/*
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, will set each group's ->cpumask correctly,
* and will initialize their ->sgc.
*
* Assumes the sched_domain tree is fully constructed
*/
static int
build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
int i;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
cpumask_clear(covered);
for_each_cpu_wrap(i, span, cpu) {
struct sched_group *sg;
if (cpumask_test_cpu(i, covered))
continue;
sg = get_group(i, sdd);
cpumask_or(covered, covered, sched_group_span(sg));
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
sd->groups = first;
return 0;
}
/*
* Initialize sched groups cpu_capacity.
*
* cpu_capacity indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_capacity for all the groups in a sched domain will be same
* unless there are asymmetries in the topology. If there are asymmetries,
* group having more cpu_capacity will pickup more load compared to the
* group having less cpu_capacity.
*/
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
struct cpumask *mask = sched_domains_tmpmask2;
WARN_ON(!sg);
do {
int cpu, cores = 0, max_cpu = -1;
sg->group_weight = cpumask_weight(sched_group_span(sg));
cpumask_copy(mask, sched_group_span(sg));
for_each_cpu(cpu, mask) {
cores++;
#ifdef CONFIG_SCHED_SMT
cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
#endif
}
sg->cores = cores;
if (!(sd->flags & SD_ASYM_PACKING))
goto next;
for_each_cpu(cpu, sched_group_span(sg)) {
if (max_cpu < 0)
max_cpu = cpu;
else if (sched_asym_prefer(cpu, max_cpu))
max_cpu = cpu;
}
sg->asym_prefer_cpu = max_cpu;
next:
sg = sg->next;
} while (sg != sd->groups);
if (cpu != group_balance_cpu(sg))
return;
update_group_capacity(sd, cpu);
}
/*
* Asymmetric CPU capacity bits
*/
struct asym_cap_data {
struct list_head link;
unsigned long capacity;
unsigned long cpus[];
};
/*
* Set of available CPUs grouped by their corresponding capacities
* Each list entry contains a CPU mask reflecting CPUs that share the same
* capacity.
* The lifespan of data is unlimited.
*/
static LIST_HEAD(asym_cap_list);
#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
/*
* Verify whether there is any CPU capacity asymmetry in a given sched domain.
* Provides sd_flags reflecting the asymmetry scope.
*/
static inline int
asym_cpu_capacity_classify(const struct cpumask *sd_span,
const struct cpumask *cpu_map)
{
struct asym_cap_data *entry;
int count = 0, miss = 0;
/*
* Count how many unique CPU capacities this domain spans across
* (compare sched_domain CPUs mask with ones representing available
* CPUs capacities). Take into account CPUs that might be offline:
* skip those.
*/
list_for_each_entry(entry, &asym_cap_list, link) {
if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
++count;
else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
++miss;
}
WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
/* No asymmetry detected */
if (count < 2)
return 0;
/* Some of the available CPU capacity values have not been detected */
if (miss)
return SD_ASYM_CPUCAPACITY;
/* Full asymmetry */
return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
}
static inline void asym_cpu_capacity_update_data(int cpu)
{
unsigned long capacity = arch_scale_cpu_capacity(cpu);
struct asym_cap_data *entry = NULL;
list_for_each_entry(entry, &asym_cap_list, link) {
if (capacity == entry->capacity)
goto done;
}
entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
return;
entry->capacity = capacity;
list_add(&entry->link, &asym_cap_list);
done:
__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
}
/*
* Build-up/update list of CPUs grouped by their capacities
* An update requires explicit request to rebuild sched domains
* with state indicating CPU topology changes.
*/
static void asym_cpu_capacity_scan(void)
{
struct asym_cap_data *entry, *next;
int cpu;
list_for_each_entry(entry, &asym_cap_list, link)
cpumask_clear(cpu_capacity_span(entry));
for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
asym_cpu_capacity_update_data(cpu);
list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
if (cpumask_empty(cpu_capacity_span(entry))) {
list_del(&entry->link);
kfree(entry);
}
}
/*
* Only one capacity value has been detected i.e. this system is symmetric.
* No need to keep this data around.
*/
if (list_is_singular(&asym_cap_list)) {
entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
list_del(&entry->link);
kfree(entry);
}
}
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
static int default_relax_domain_level = -1;
int sched_domain_level_max;
static int __init setup_relax_domain_level(char *str)
{
if (kstrtoint(str, 0, &default_relax_domain_level))
pr_warn("Unable to set relax_domain_level\n");
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
static void set_domain_attribute(struct sched_domain *sd,
struct sched_domain_attr *attr)
{
int request;
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
if (sd->level > request) {
/* Turn off idle balance on this domain: */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
switch (what) {
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu);
fallthrough;
case sa_sd:
free_percpu(d->sd);
fallthrough;
case sa_sd_storage:
__sdt_free(cpu_map);
fallthrough;
case sa_none:
break;
}
}
static enum s_alloc
__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
{
memset(d, 0, sizeof(*d));
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
return sa_sd_storage;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
return sa_rootdomain;
}
/*
* NULL the sd_data elements we've used to build the sched_domain and
* sched_group structure so that the subsequent __free_domain_allocs()
* will not free the data we're using.
*/
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
*per_cpu_ptr(sdd->sds, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
*per_cpu_ptr(sdd->sgc, cpu) = NULL;
}
#ifdef CONFIG_NUMA
enum numa_topology_type sched_numa_topology_type;
static int sched_domains_numa_levels;
static int sched_domains_curr_level;
int sched_max_numa_distance;
static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
#endif
/*
* SD_flags allowed in topology descriptions.
*
* These flags are purely descriptive of the topology and do not prescribe
* behaviour. Behaviour is artificial and mapped in the below sd_init()
* function:
*
* SD_SHARE_CPUCAPACITY - describes SMT topologies
* SD_SHARE_PKG_RESOURCES - describes shared caches
* SD_NUMA - describes NUMA topologies
*
* Odd one out, which beside describing the topology has a quirk also
* prescribes the desired behaviour that goes along with it:
*
* SD_ASYM_PACKING - describes SMT quirks
*/
#define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY | \
SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \
SD_ASYM_PACKING)
static struct sched_domain *
sd_init(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map,
struct sched_domain *child, int cpu)
{
struct sd_data *sdd = &tl->data;
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
int sd_id, sd_weight, sd_flags = 0;
struct cpumask *sd_span;
#ifdef CONFIG_NUMA
/*
* Ugly hack to pass state to sd_numa_mask()...
*/
sched_domains_curr_level = tl->numa_level;
#endif
sd_weight = cpumask_weight(tl->mask(cpu));
if (tl->sd_flags)
sd_flags = (*tl->sd_flags)();
if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
"wrong sd_flags in topology description\n"))
sd_flags &= TOPOLOGY_SD_FLAGS;
*sd = (struct sched_domain){
.min_interval = sd_weight,
.max_interval = 2*sd_weight,
.busy_factor = 16,
.imbalance_pct = 117,
.cache_nice_tries = 0,
.flags = 1*SD_BALANCE_NEWIDLE
| 1*SD_BALANCE_EXEC
| 1*SD_BALANCE_FORK
| 0*SD_BALANCE_WAKE
| 1*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES
| 0*SD_SERIALIZE
| 1*SD_PREFER_SIBLING
| 0*SD_NUMA
| sd_flags
,
.last_balance = jiffies,
.balance_interval = sd_weight,
.max_newidle_lb_cost = 0,
.last_decay_max_lb_cost = jiffies,
.child = child,
#ifdef CONFIG_SCHED_DEBUG
.name = tl->name,
#endif
};
sd_span = sched_domain_span(sd);
cpumask_and(sd_span, cpu_map, tl->mask(cpu));
sd_id = cpumask_first(sd_span);
sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
(SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
"CPU capacity asymmetry not supported on SMT\n");
/*
* Convert topological properties into behaviour.
*/
/* Don't attempt to spread across CPUs of different capacities. */
if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
sd->child->flags &= ~SD_PREFER_SIBLING;
if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->imbalance_pct = 110;
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->imbalance_pct = 117;
sd->cache_nice_tries = 1;
#ifdef CONFIG_NUMA
} else if (sd->flags & SD_NUMA) {
sd->cache_nice_tries = 2;
sd->flags &= ~SD_PREFER_SIBLING;
sd->flags |= SD_SERIALIZE;
if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
sd->flags &= ~(SD_BALANCE_EXEC |
SD_BALANCE_FORK |
SD_WAKE_AFFINE);
}
#endif
} else {
sd->cache_nice_tries = 1;
}
/*
* For all levels sharing cache; connect a sched_domain_shared
* instance.
*/
if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
atomic_inc(&sd->shared->ref);
atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
}
sd->private = sdd;
return sd;
}
/*
* Topology list, bottom-up.
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_CLUSTER
{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
#endif
#ifdef CONFIG_SCHED_MC
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
#endif
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
static struct sched_domain_topology_level *sched_domain_topology =
default_topology;
static struct sched_domain_topology_level *sched_domain_topology_saved;
#define for_each_sd_topology(tl) \
for (tl = sched_domain_topology; tl->mask; tl++)
void __init set_sched_topology(struct sched_domain_topology_level *tl)
{
if (WARN_ON_ONCE(sched_smp_initialized))
return;
sched_domain_topology = tl;
sched_domain_topology_saved = NULL;
}
#ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu)
{
return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
}
static void sched_numa_warn(const char *str)
{
static int done = false;
int i,j;
if (done)
return;
done = true;
printk(KERN_WARNING "ERROR: %s\n\n", str);
for (i = 0; i < nr_node_ids; i++) {
printk(KERN_WARNING " ");
for (j = 0; j < nr_node_ids; j++) {
if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
printk(KERN_CONT "(%02d) ", node_distance(i,j));
else
printk(KERN_CONT " %02d ", node_distance(i,j));
}
printk(KERN_CONT "\n");
}
printk(KERN_WARNING "\n");
}
bool find_numa_distance(int distance)
{
bool found = false;
int i, *distances;
if (distance == node_distance(0, 0))
return true;
rcu_read_lock();
distances = rcu_dereference(sched_domains_numa_distance);
if (!distances)
goto unlock;
for (i = 0; i < sched_domains_numa_levels; i++) {
if (distances[i] == distance) {
found = true;
break;
}
}
unlock:
rcu_read_unlock();
return found;
}
#define for_each_cpu_node_but(n, nbut) \
for_each_node_state(n, N_CPU) \
if (n == nbut) \
continue; \
else
/*
* A system can have three types of NUMA topology:
* NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
* NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
* NUMA_BACKPLANE: nodes can reach other nodes through a backplane
*
* The difference between a glueless mesh topology and a backplane
* topology lies in whether communication between not directly
* connected nodes goes through intermediary nodes (where programs
* could run), or through backplane controllers. This affects
* placement of programs.
*
* The type of topology can be discerned with the following tests:
* - If the maximum distance between any nodes is 1 hop, the system
* is directly connected.
* - If for two nodes A and B, located N > 1 hops away from each other,
* there is an intermediary node C, which is < N hops away from both
* nodes A and B, the system is a glueless mesh.
*/
static void init_numa_topology_type(int offline_node)
{
int a, b, c, n;
n = sched_max_numa_distance;
if (sched_domains_numa_levels <= 2) {
sched_numa_topology_type = NUMA_DIRECT;
return;
}
for_each_cpu_node_but(a, offline_node) {
for_each_cpu_node_but(b, offline_node) {
/* Find two nodes furthest removed from each other. */
if (node_distance(a, b) < n)
continue;
/* Is there an intermediary node between a and b? */
for_each_cpu_node_but(c, offline_node) {
if (node_distance(a, c) < n &&
node_distance(b, c) < n) {
sched_numa_topology_type =
NUMA_GLUELESS_MESH;
return;
}
}
sched_numa_topology_type = NUMA_BACKPLANE;
return;
}
}
pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
sched_numa_topology_type = NUMA_DIRECT;
}
#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
void sched_init_numa(int offline_node)
{
struct sched_domain_topology_level *tl;
unsigned long *distance_map;
int nr_levels = 0;
int i, j;
int *distances;
struct cpumask ***masks;
/*
* O(nr_nodes^2) deduplicating selection sort -- in order to find the
* unique distances in the node_distance() table.
*/
distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
if (!distance_map)
return;
bitmap_zero(distance_map, NR_DISTANCE_VALUES);
for_each_cpu_node_but(i, offline_node) {
for_each_cpu_node_but(j, offline_node) {
int distance = node_distance(i, j);
if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
sched_numa_warn("Invalid distance value range");
bitmap_free(distance_map);
return;
}
bitmap_set(distance_map, distance, 1);
}
}
/*
* We can now figure out how many unique distance values there are and
* allocate memory accordingly.
*/
nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
if (!distances) {
bitmap_free(distance_map);
return;
}
for (i = 0, j = 0; i < nr_levels; i++, j++) {
j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
distances[i] = j;
}
rcu_assign_pointer(sched_domains_numa_distance, distances);
bitmap_free(distance_map);
/*
* 'nr_levels' contains the number of unique distances
*
* The sched_domains_numa_distance[] array includes the actual distance
* numbers.
*/
/*
* Here, we should temporarily reset sched_domains_numa_levels to 0.
* If it fails to allocate memory for array sched_domains_numa_masks[][],
* the array will contain less then 'nr_levels' members. This could be
* dangerous when we use it to iterate array sched_domains_numa_masks[][]
* in other functions.
*
* We reset it to 'nr_levels' at the end of this function.
*/
sched_domains_numa_levels = 0;
masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
if (!masks)
return;
/*
* Now for each level, construct a mask per node which contains all
* CPUs of nodes that are that many hops away from us.
*/
for (i = 0; i < nr_levels; i++) {
masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
if (!masks[i])
return;
for_each_cpu_node_but(j, offline_node) {
struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
int k;
if (!mask)
return;
masks[i][j] = mask;
for_each_cpu_node_but(k, offline_node) {
if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
sched_numa_warn("Node-distance not symmetric");
if (node_distance(j, k) > sched_domains_numa_distance[i])
continue;
cpumask_or(mask, mask, cpumask_of_node(k));
}
}
}
rcu_assign_pointer(sched_domains_numa_masks, masks);
/* Compute default topology size */
for (i = 0; sched_domain_topology[i].mask; i++);
tl = kzalloc((i + nr_levels + 1) *
sizeof(struct sched_domain_topology_level), GFP_KERNEL);
if (!tl)
return;
/*
* Copy the default topology bits..
*/
for (i = 0; sched_domain_topology[i].mask; i++)
tl[i] = sched_domain_topology[i];
/*
* Add the NUMA identity distance, aka single NODE.
*/
tl[i++] = (struct sched_domain_topology_level){
.mask = sd_numa_mask,
.numa_level = 0,
SD_INIT_NAME(NODE)
};
/*
* .. and append 'j' levels of NUMA goodness.
*/
for (j = 1; j < nr_levels; i++, j++) {
tl[i] = (struct sched_domain_topology_level){
.mask = sd_numa_mask,
.sd_flags = cpu_numa_flags,
.flags = SDTL_OVERLAP,
.numa_level = j,
SD_INIT_NAME(NUMA)
};
}
sched_domain_topology_saved = sched_domain_topology;
sched_domain_topology = tl;
sched_domains_numa_levels = nr_levels;
WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
init_numa_topology_type(offline_node);
}
static void sched_reset_numa(void)
{
int nr_levels, *distances;
struct cpumask ***masks;
nr_levels = sched_domains_numa_levels;
sched_domains_numa_levels = 0;
sched_max_numa_distance = 0;
sched_numa_topology_type = NUMA_DIRECT;
distances = sched_domains_numa_distance;
rcu_assign_pointer(sched_domains_numa_distance, NULL);
masks = sched_domains_numa_masks;
rcu_assign_pointer(sched_domains_numa_masks, NULL);
if (distances || masks) {
int i, j;
synchronize_rcu();
kfree(distances);
for (i = 0; i < nr_levels && masks; i++) {
if (!masks[i])
continue;
for_each_node(j)
kfree(masks[i][j]);
kfree(masks[i]);
}
kfree(masks);
}
if (sched_domain_topology_saved) {
kfree(sched_domain_topology);
sched_domain_topology = sched_domain_topology_saved;
sched_domain_topology_saved = NULL;
}
}
/*
* Call with hotplug lock held
*/
void sched_update_numa(int cpu, bool online)
{
int node;
node = cpu_to_node(cpu);
/*
* Scheduler NUMA topology is updated when the first CPU of a
* node is onlined or the last CPU of a node is offlined.
*/
if (cpumask_weight(cpumask_of_node(node)) != 1)
return;
sched_reset_numa();
sched_init_numa(online ? NUMA_NO_NODE : node);
}
void sched_domains_numa_masks_set(unsigned int cpu)
{
int node = cpu_to_node(cpu);
int i, j;
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (!node_state(j, N_CPU))
continue;
/* Set ourselves in the remote node's masks */
if (node_distance(j, node) <= sched_domains_numa_distance[i])
cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
}
}
}
void sched_domains_numa_masks_clear(unsigned int cpu)
{
int i, j;
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (sched_domains_numa_masks[i][j])
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
}
}
}
/*
* sched_numa_find_closest() - given the NUMA topology, find the cpu
* closest to @cpu from @cpumask.
* cpumask: cpumask to find a cpu from
* cpu: cpu to be close to
*
* returns: cpu, or nr_cpu_ids when nothing found.
*/
int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
{
int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
struct cpumask ***masks;
rcu_read_lock();
masks = rcu_dereference(sched_domains_numa_masks);
if (!masks)
goto unlock;
for (i = 0; i < sched_domains_numa_levels; i++) {
if (!masks[i][j])
break;
cpu = cpumask_any_and(cpus, masks[i][j]);
if (cpu < nr_cpu_ids) {
found = cpu;
break;
}
}
unlock:
rcu_read_unlock();
return found;
}
struct __cmp_key {
const struct cpumask *cpus;
struct cpumask ***masks;
int node;
int cpu;
int w;
};
static int hop_cmp(const void *a, const void *b)
{
struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b;
struct __cmp_key *k = (struct __cmp_key *)a;
if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
return 1;
if (b == k->masks) {
k->w = 0;
return 0;
}
prev_hop = *((struct cpumask ***)b - 1);
k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
if (k->w <= k->cpu)
return 0;
return -1;
}
/*
* sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu
* closest to @cpu from @cpumask.
* cpumask: cpumask to find a cpu from
* cpu: Nth cpu to find
*
* returns: cpu, or nr_cpu_ids when nothing found.
*/
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
struct cpumask ***hop_masks;
int hop, ret = nr_cpu_ids;
rcu_read_lock();
k.masks = rcu_dereference(sched_domains_numa_masks);
if (!k.masks)
goto unlock;
hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
hop = hop_masks - k.masks;
ret = hop ?
cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
cpumask_nth_and(cpu, cpus, k.masks[0][node]);
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
/**
* sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
* @node
* @node: The node to count hops from.
* @hops: Include CPUs up to that many hops away. 0 means local node.
*
* Return: On success, a pointer to a cpumask of CPUs at most @hops away from
* @node, an error value otherwise.
*
* Requires rcu_lock to be held. Returned cpumask is only valid within that
* read-side section, copy it if required beyond that.
*
* Note that not all hops are equal in distance; see sched_init_numa() for how
* distances and masks are handled.
* Also note that this is a reflection of sched_domains_numa_masks, which may change
* during the lifetime of the system (offline nodes are taken out of the masks).
*/
const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
{
struct cpumask ***masks;
if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
return ERR_PTR(-EINVAL);
masks = rcu_dereference(sched_domains_numa_masks);
if (!masks)
return ERR_PTR(-EBUSY);
return masks[hops][node];
}
EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
#endif /* CONFIG_NUMA */
static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for_each_sd_topology(tl) {
struct sd_data *sdd = &tl->data;
sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;
sdd->sds = alloc_percpu(struct sched_domain_shared *);
if (!sdd->sds)
return -ENOMEM;
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
sdd->sgc = alloc_percpu(struct sched_group_capacity *);
if (!sdd->sgc)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_domain_shared *sds;
struct sched_group *sg;
struct sched_group_capacity *sgc;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;
*per_cpu_ptr(sdd->sd, j) = sd;
sds = kzalloc_node(sizeof(struct sched_domain_shared),
GFP_KERNEL, cpu_to_node(j));
if (!sds)
return -ENOMEM;
*per_cpu_ptr(sdd->sds, j) = sds;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg;
sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sgc)
return -ENOMEM;
#ifdef CONFIG_SCHED_DEBUG
sgc->id = j;
#endif
*per_cpu_ptr(sdd->sgc, j) = sgc;
}
}
return 0;
}
static void __sdt_free(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for_each_sd_topology(tl) {
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
if (sdd->sd) {
sd = *per_cpu_ptr(sdd->sd, j);
if (sd && (sd->flags & SD_OVERLAP))
free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sd, j));
}
if (sdd->sds)
kfree(*per_cpu_ptr(sdd->sds, j));
if (sdd->sg)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgc)
kfree(*per_cpu_ptr(sdd->sgc, j));
}
free_percpu(sdd->sd);
sdd->sd = NULL;
free_percpu(sdd->sds);
sdd->sds = NULL;
free_percpu(sdd->sg);
sdd->sg = NULL;
free_percpu(sdd->sgc);
sdd->sgc = NULL;
}
}
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu)
{
struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
if (child) {
sd->level = child->level + 1;
sched_domain_level_max = max(sched_domain_level_max, sd->level);
child->parent = sd;
if (!cpumask_subset(sched_domain_span(child),
sched_domain_span(sd))) {
pr_err("BUG: arch topology borken\n");
#ifdef CONFIG_SCHED_DEBUG
pr_err(" the %s domain not a subset of the %s domain\n",
child->name, sd->name);
#endif
/* Fixup, ensure @sd has at least @child CPUs. */
cpumask_or(sched_domain_span(sd),
sched_domain_span(sd),
sched_domain_span(child));
}
}
set_domain_attribute(sd, attr);
return sd;
}
/*
* Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
* any two given CPUs at this (non-NUMA) topology level.
*/
static bool topology_span_sane(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, int cpu)
{
int i;
/* NUMA levels are allowed to overlap */
if (tl->flags & SDTL_OVERLAP)
return true;
/*
* Non-NUMA levels cannot partially overlap - they must be either
* completely equal or completely disjoint. Otherwise we can end up
* breaking the sched_group lists - i.e. a later get_group() pass
* breaks the linking done for an earlier span.
*/
for_each_cpu(i, cpu_map) {
if (i == cpu)
continue;
/*
* We should 'and' all those masks with 'cpu_map' to exactly
* match the topology we're about to build, but that can only
* remove CPUs, which only lessens our ability to detect
* overlaps
*/
if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
cpumask_intersects(tl->mask(cpu), tl->mask(i)))
return false;
}
return true;
}
/*
* Build sched domains for a given set of CPUs and attach the sched domains
* to the individual CPUs
*/
static int
build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
{
enum s_alloc alloc_state = sa_none;
struct sched_domain *sd;
struct s_data d;
struct rq *rq = NULL;
int i, ret = -ENOMEM;
bool has_asym = false;
if (WARN_ON(cpumask_empty(cpu_map)))
goto error;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
/* Set up domains for CPUs specified by the cpu_map: */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
sd = NULL;
for_each_sd_topology(tl) {
if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
goto error;
sd = build_sched_domain(tl, cpu_map, attr, sd, i);
has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
if (tl == sched_domain_topology)
*per_cpu_ptr(d.sd, i) = sd;
if (tl->flags & SDTL_OVERLAP)
sd->flags |= SD_OVERLAP;
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
break;
}
}
/* Build the groups for the domains */
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
if (sd->flags & SD_OVERLAP) {
if (build_overlap_sched_groups(sd, i))
goto error;
} else {
if (build_sched_groups(sd, i))
goto error;
}
}
}
/*
* Calculate an allowed NUMA imbalance such that LLCs do not get
* imbalanced.
*/
for_each_cpu(i, cpu_map) {
unsigned int imb = 0;
unsigned int imb_span = 1;
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
struct sched_domain *child = sd->child;
if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
(child->flags & SD_SHARE_PKG_RESOURCES)) {
struct sched_domain __rcu *top_p;
unsigned int nr_llcs;
/*
* For a single LLC per node, allow an
* imbalance up to 12.5% of the node. This is
* arbitrary cutoff based two factors -- SMT and
* memory channels. For SMT-2, the intent is to
* avoid premature sharing of HT resources but
* SMT-4 or SMT-8 *may* benefit from a different
* cutoff. For memory channels, this is a very
* rough estimate of how many channels may be
* active and is based on recent CPUs with
* many cores.
*
* For multiple LLCs, allow an imbalance
* until multiple tasks would share an LLC
* on one node while LLCs on another node
* remain idle. This assumes that there are
* enough logical CPUs per LLC to avoid SMT
* factors and that there is a correlation
* between LLCs and memory channels.
*/
nr_llcs = sd->span_weight / child->span_weight;
if (nr_llcs == 1)
imb = sd->span_weight >> 3;
else
imb = nr_llcs;
imb = max(1U, imb);
sd->imb_numa_nr = imb;
/* Set span based on the first NUMA domain. */
top_p = sd->parent;
while (top_p && !(top_p->flags & SD_NUMA)) {
top_p = top_p->parent;
}
imb_span = top_p ? top_p->span_weight : sd->span_weight;
} else {
int factor = max(1U, (sd->span_weight / imb_span));
sd->imb_numa_nr = imb * factor;
}
}
}
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))
continue;
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
claim_allocations(i, sd);
init_sched_groups_capacity(i, sd);
}
}
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
rq = cpu_rq(i);
sd = *per_cpu_ptr(d.sd, i);
/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
cpu_attach_domain(sd, d.rd, i);
}
rcu_read_unlock();
if (has_asym)
static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
if (rq && sched_debug_verbose) {
pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
}
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
return ret;
}
/* Current sched domains: */
static cpumask_var_t *doms_cur;
/* Number of sched domains in 'doms_cur': */
static int ndoms_cur;
/* Attributes of custom domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
/*
* Special case: If a kmalloc() of a doms_cur partition (array of
* cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms.
*/
static cpumask_var_t fallback_doms;
/*
* arch_update_cpu_topology lets virtualized architectures update the
* CPU core maps. It is supposed to return 1 if the topology changed
* or 0 if it stayed the same.
*/
int __weak arch_update_cpu_topology(void)
{
return 0;
}
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
int i;
cpumask_var_t *doms;
doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
}
return doms;
}
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
unsigned int i;
for (i = 0; i < ndoms; i++)
free_cpumask_var(doms[i]);
kfree(doms);
}
/*
* Set up scheduler domains and groups. For now this just excludes isolated
* CPUs, but could be used to exclude other special cases in the future.
*/
int __init sched_init_domains(const struct cpumask *cpu_map)
{
int err;
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
arch_update_cpu_topology();
asym_cpu_capacity_scan();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
err = build_sched_domains(doms_cur[0], NULL);
return err;
}
/*
* Detach sched domains from a group of CPUs specified in cpu_map
* These CPUs will now be attached to the NULL domain
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
unsigned int cpu = cpumask_any(cpu_map);
int i;
if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
rcu_read_lock();
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
rcu_read_unlock();
}
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
struct sched_domain_attr *new, int idx_new)
{
struct sched_domain_attr tmp;
/* Fast path: */
if (!new && !cur)
return 1;
tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr));
}
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
* The masks don't intersect (don't overlap.) We should setup one
* sched domain for each mask. CPUs not in any of the cpumasks will
* not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
* The passed in 'doms_new' should be allocated using
* alloc_sched_domains. This routine takes ownership of it and will
* free_sched_domains it when done with it. If the caller failed the
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
*
* If doms_new == NULL it will be replaced with cpu_online_mask.
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
* Call with hotplug lock and sched_domains_mutex held
*/
void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
bool __maybe_unused has_eas = false;
int i, j, n;
int new_topology;
lockdep_assert_held(&sched_domains_mutex);
/* Let the architecture update CPU core mappings: */
new_topology = arch_update_cpu_topology();
/* Trigger rebuilding CPU capacity asymmetry data */
if (new_topology)
asym_cpu_capacity_scan();
if (!doms_new) {
WARN_ON_ONCE(dattr_new);
n = 0;
doms_new = alloc_sched_domains(1);
if (doms_new) {
n = 1;
cpumask_and(doms_new[0], cpu_active_mask,
housekeeping_cpumask(HK_TYPE_DOMAIN));
}
} else {
n = ndoms_new;
}
/* Destroy deleted domains: */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_cur[i], doms_new[j]) &&
dattrs_equal(dattr_cur, i, dattr_new, j)) {
struct root_domain *rd;
/*
* This domain won't be destroyed and as such
* its dl_bw->total_bw needs to be cleared. It
* will be recomputed in function
* update_tasks_root_domain().
*/
rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
dl_clear_root_domain(rd);
goto match1;
}
}
/* No match - a current sched domain not in new doms_new[] */
detach_destroy_domains(doms_cur[i]);
match1:
;
}
n = ndoms_cur;
if (!doms_new) {
n = 0;
doms_new = &fallback_doms;
cpumask_and(doms_new[0], cpu_active_mask,
housekeeping_cpumask(HK_TYPE_DOMAIN));
}
/* Build new domains: */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) &&
dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* No match - add a new doms_new */
build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
match2:
;
}
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
/* Build perf. domains: */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !sched_energy_update; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) &&
cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
has_eas = true;
goto match3;
}
}
/* No match - add perf. domains for a new rd */
has_eas |= build_perf_domains(doms_new[i]);
match3:
;
}
sched_energy_set(has_eas);
#endif
/* Remember the new sched domains: */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
kfree(dattr_cur);
doms_cur = doms_new;
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
update_sched_domain_debugfs();
}
/*
* Call with hotplug lock held
*/
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex);
}
| linux-master | kernel/sched/topology.c |
// SPDX-License-Identifier: GPL-2.0
/*
* /proc/schedstat implementation
*/
void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
struct sched_statistics *stats)
{
u64 wait_start, prev_wait_start;
wait_start = rq_clock(rq);
prev_wait_start = schedstat_val(stats->wait_start);
if (p && likely(wait_start > prev_wait_start))
wait_start -= prev_wait_start;
__schedstat_set(stats->wait_start, wait_start);
}
void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
struct sched_statistics *stats)
{
u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
if (p) {
if (task_on_rq_migrating(p)) {
/*
* Preserve migrating task's wait time so wait_start
* time stamp can be adjusted to accumulate wait time
* prior to migration.
*/
__schedstat_set(stats->wait_start, delta);
return;
}
trace_sched_stat_wait(p, delta);
}
__schedstat_set(stats->wait_max,
max(schedstat_val(stats->wait_max), delta));
__schedstat_inc(stats->wait_count);
__schedstat_add(stats->wait_sum, delta);
__schedstat_set(stats->wait_start, 0);
}
void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
struct sched_statistics *stats)
{
u64 sleep_start, block_start;
sleep_start = schedstat_val(stats->sleep_start);
block_start = schedstat_val(stats->block_start);
if (sleep_start) {
u64 delta = rq_clock(rq) - sleep_start;
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(stats->sleep_max)))
__schedstat_set(stats->sleep_max, delta);
__schedstat_set(stats->sleep_start, 0);
__schedstat_add(stats->sum_sleep_runtime, delta);
if (p) {
account_scheduler_latency(p, delta >> 10, 1);
trace_sched_stat_sleep(p, delta);
}
}
if (block_start) {
u64 delta = rq_clock(rq) - block_start;
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(stats->block_max)))
__schedstat_set(stats->block_max, delta);
__schedstat_set(stats->block_start, 0);
__schedstat_add(stats->sum_sleep_runtime, delta);
__schedstat_add(stats->sum_block_runtime, delta);
if (p) {
if (p->in_iowait) {
__schedstat_add(stats->iowait_sum, delta);
__schedstat_inc(stats->iowait_count);
trace_sched_stat_iowait(p, delta);
}
trace_sched_stat_blocked(p, delta);
/*
* Blocking time is in units of nanosecs, so shift by
* 20 to get a milliseconds-range estimation of the
* amount of time that the task spent sleeping:
*/
if (unlikely(prof_on == SLEEP_PROFILING)) {
profile_hits(SLEEP_PROFILING,
(void *)get_wchan(p),
delta >> 20);
}
account_scheduler_latency(p, delta >> 10, 0);
}
}
}
/*
* Current schedstat API version.
*
* Bump this up when changing the output format or the meaning of an existing
* format, so that tools can adapt (or abort)
*/
#define SCHEDSTAT_VERSION 15
static int show_schedstat(struct seq_file *seq, void *v)
{
int cpu;
if (v == (void *)1) {
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
seq_printf(seq, "timestamp %lu\n", jiffies);
} else {
struct rq *rq;
#ifdef CONFIG_SMP
struct sched_domain *sd;
int dcount = 0;
#endif
cpu = (unsigned long)(v - 2);
rq = cpu_rq(cpu);
/* runqueue-specific stats */
seq_printf(seq,
"cpu%d %u 0 %u %u %u %u %llu %llu %lu",
cpu, rq->yld_count,
rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
rq->rq_cpu_time,
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
seq_printf(seq, "\n");
#ifdef CONFIG_SMP
/* domain-specific stats */
rcu_read_lock();
for_each_domain(cpu, sd) {
enum cpu_idle_type itype;
seq_printf(seq, "domain%d %*pb", dcount++,
cpumask_pr_args(sched_domain_span(sd)));
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
seq_printf(seq, " %u %u %u %u %u %u %u %u",
sd->lb_count[itype],
sd->lb_balanced[itype],
sd->lb_failed[itype],
sd->lb_imbalance[itype],
sd->lb_gained[itype],
sd->lb_hot_gained[itype],
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
seq_printf(seq,
" %u %u %u %u %u %u %u %u %u %u %u %u\n",
sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
}
rcu_read_unlock();
#endif
}
return 0;
}
/*
* This iterator needs some explanation.
* It returns 1 for the header position.
* This means 2 is cpu 0.
* In a hotplugged system some CPUs, including cpu 0, may be missing so we have
* to use cpumask_* to iterate over the CPUs.
*/
static void *schedstat_start(struct seq_file *file, loff_t *offset)
{
unsigned long n = *offset;
if (n == 0)
return (void *) 1;
n--;
if (n > 0)
n = cpumask_next(n - 1, cpu_online_mask);
else
n = cpumask_first(cpu_online_mask);
*offset = n + 1;
if (n < nr_cpu_ids)
return (void *)(unsigned long)(n + 2);
return NULL;
}
static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
{
(*offset)++;
return schedstat_start(file, offset);
}
static void schedstat_stop(struct seq_file *file, void *data)
{
}
static const struct seq_operations schedstat_sops = {
.start = schedstat_start,
.next = schedstat_next,
.stop = schedstat_stop,
.show = show_schedstat,
};
static int __init proc_schedstat_init(void)
{
proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
return 0;
}
subsys_initcall(proc_schedstat_init);
| linux-master | kernel/sched/stats.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sched_clock() for unstable CPU clocks
*
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
*
* Updates and enhancements:
* Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <[email protected]>
*
* Based on code by:
* Ingo Molnar <[email protected]>
* Guillaume Chazarain <[email protected]>
*
*
* What this file implements:
*
* cpu_clock(i) provides a fast (execution time) high resolution
* clock with bounded drift between CPUs. The value of cpu_clock(i)
* is monotonic for constant i. The timestamp returned is in nanoseconds.
*
* ######################### BIG FAT WARNING ##########################
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
* # go backwards !! #
* ####################################################################
*
* There is no strict promise about the base, although it tends to start
* at 0 on boot (but people really shouldn't rely on that).
*
* cpu_clock(i) -- can be used from any context, including NMI.
* local_clock() -- is cpu_clock() on the current CPU.
*
* sched_clock_cpu(i)
*
* How it is implemented:
*
* The implementation either uses sched_clock() when
* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
* sched_clock() is assumed to provide these properties (mostly it means
* the architecture provides a globally synchronized highres time source).
*
* Otherwise it tries to create a semi stable clock from a mixture of other
* clocks, including:
*
* - GTOD (clock monotonic)
* - sched_clock()
* - explicit idle events
*
* We use GTOD as base and use sched_clock() deltas to improve resolution. The
* deltas are filtered to provide monotonicity and keeping it within an
* expected window.
*
* Furthermore, explicit sleep and wakeup hooks allow us to account for time
* that is otherwise invisible (TSC gets stopped).
*
*/
/*
* Scheduler clock - returns current time in nanosec units.
* This is default implementation.
* Architectures and sub-architectures can override this.
*/
notrace unsigned long long __weak sched_clock(void)
{
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
* (NSEC_PER_SEC / HZ);
}
EXPORT_SYMBOL_GPL(sched_clock);
static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
* We must start with !__sched_clock_stable because the unstable -> stable
* transition is accurate, while the stable -> unstable transition is not.
*
* Similarly we start with __sched_clock_stable_early, thereby assuming we
* will become stable, such that there's only a single 1 -> 0 transition.
*/
static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
static int __sched_clock_stable_early = 1;
/*
* We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
*/
__read_mostly u64 __sched_clock_offset;
static __read_mostly u64 __gtod_offset;
struct sched_clock_data {
u64 tick_raw;
u64 tick_gtod;
u64 clock;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
static __always_inline struct sched_clock_data *this_scd(void)
{
return this_cpu_ptr(&sched_clock_data);
}
notrace static inline struct sched_clock_data *cpu_sdc(int cpu)
{
return &per_cpu(sched_clock_data, cpu);
}
notrace int sched_clock_stable(void)
{
return static_branch_likely(&__sched_clock_stable);
}
notrace static void __scd_stamp(struct sched_clock_data *scd)
{
scd->tick_gtod = ktime_get_ns();
scd->tick_raw = sched_clock();
}
notrace static void __set_sched_clock_stable(void)
{
struct sched_clock_data *scd;
/*
* Since we're still unstable and the tick is already running, we have
* to disable IRQs in order to get a consistent scd->tick* reading.
*/
local_irq_disable();
scd = this_scd();
/*
* Attempt to make the (initial) unstable->stable transition continuous.
*/
__sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
local_irq_enable();
printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
scd->tick_gtod, __gtod_offset,
scd->tick_raw, __sched_clock_offset);
static_branch_enable(&__sched_clock_stable);
tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
}
/*
* If we ever get here, we're screwed, because we found out -- typically after
* the fact -- that TSC wasn't good. This means all our clocksources (including
* ktime) could have reported wrong values.
*
* What we do here is an attempt to fix up and continue sort of where we left
* off in a coherent manner.
*
* The only way to fully avoid random clock jumps is to boot with:
* "tsc=unstable".
*/
notrace static void __sched_clock_work(struct work_struct *work)
{
struct sched_clock_data *scd;
int cpu;
/* take a current timestamp and set 'now' */
preempt_disable();
scd = this_scd();
__scd_stamp(scd);
scd->clock = scd->tick_gtod + __gtod_offset;
preempt_enable();
/* clone to all CPUs */
for_each_possible_cpu(cpu)
per_cpu(sched_clock_data, cpu) = *scd;
printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
scd->tick_gtod, __gtod_offset,
scd->tick_raw, __sched_clock_offset);
static_branch_disable(&__sched_clock_stable);
}
static DECLARE_WORK(sched_clock_work, __sched_clock_work);
notrace static void __clear_sched_clock_stable(void)
{
if (!sched_clock_stable())
return;
tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
schedule_work(&sched_clock_work);
}
notrace void clear_sched_clock_stable(void)
{
__sched_clock_stable_early = 0;
smp_mb(); /* matches sched_clock_init_late() */
if (static_key_count(&sched_clock_running.key) == 2)
__clear_sched_clock_stable();
}
notrace static void __sched_clock_gtod_offset(void)
{
struct sched_clock_data *scd = this_scd();
__scd_stamp(scd);
__gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
}
void __init sched_clock_init(void)
{
/*
* Set __gtod_offset such that once we mark sched_clock_running,
* sched_clock_tick() continues where sched_clock() left off.
*
* Even if TSC is buggered, we're still UP at this point so it
* can't really be out of sync.
*/
local_irq_disable();
__sched_clock_gtod_offset();
local_irq_enable();
static_branch_inc(&sched_clock_running);
}
/*
* We run this as late_initcall() such that it runs after all built-in drivers,
* notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
*/
static int __init sched_clock_init_late(void)
{
static_branch_inc(&sched_clock_running);
/*
* Ensure that it is impossible to not do a static_key update.
*
* Either {set,clear}_sched_clock_stable() must see sched_clock_running
* and do the update, or we must see their __sched_clock_stable_early
* and do the update, or both.
*/
smp_mb(); /* matches {set,clear}_sched_clock_stable() */
if (__sched_clock_stable_early)
__set_sched_clock_stable();
return 0;
}
late_initcall(sched_clock_init_late);
/*
* min, max except they take wrapping into account
*/
static __always_inline u64 wrap_min(u64 x, u64 y)
{
return (s64)(x - y) < 0 ? x : y;
}
static __always_inline u64 wrap_max(u64 x, u64 y)
{
return (s64)(x - y) > 0 ? x : y;
}
/*
* update the percpu scd from the raw @now value
*
* - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values
*/
static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
{
u64 now, clock, old_clock, min_clock, max_clock, gtod;
s64 delta;
again:
now = sched_clock_noinstr();
delta = now - scd->tick_raw;
if (unlikely(delta < 0))
delta = 0;
old_clock = scd->clock;
/*
* scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock),
* scd->tick_gtod + TICK_NSEC);
*/
gtod = scd->tick_gtod + __gtod_offset;
clock = gtod + delta;
min_clock = wrap_max(gtod, old_clock);
max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock))
goto again;
return clock;
}
noinstr u64 local_clock_noinstr(void)
{
u64 clock;
if (static_branch_likely(&__sched_clock_stable))
return sched_clock_noinstr() + __sched_clock_offset;
if (!static_branch_likely(&sched_clock_running))
return sched_clock_noinstr();
clock = sched_clock_local(this_scd());
return clock;
}
u64 local_clock(void)
{
u64 now;
preempt_disable_notrace();
now = local_clock_noinstr();
preempt_enable_notrace();
return now;
}
EXPORT_SYMBOL_GPL(local_clock);
static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
{
struct sched_clock_data *my_scd = this_scd();
u64 this_clock, remote_clock;
u64 *ptr, old_val, val;
#if BITS_PER_LONG != 64
again:
/*
* Careful here: The local and the remote clock values need to
* be read out atomic as we need to compare the values and
* then update either the local or the remote side. So the
* cmpxchg64 below only protects one readout.
*
* We must reread via sched_clock_local() in the retry case on
* 32-bit kernels as an NMI could use sched_clock_local() via the
* tracer and hit between the readout of
* the low 32-bit and the high 32-bit portion.
*/
this_clock = sched_clock_local(my_scd);
/*
* We must enforce atomic readout on 32-bit, otherwise the
* update on the remote CPU can hit inbetween the readout of
* the low 32-bit and the high 32-bit portion.
*/
remote_clock = cmpxchg64(&scd->clock, 0, 0);
#else
/*
* On 64-bit kernels the read of [my]scd->clock is atomic versus the
* update, so we can avoid the above 32-bit dance.
*/
sched_clock_local(my_scd);
again:
this_clock = my_scd->clock;
remote_clock = scd->clock;
#endif
/*
* Use the opportunity that we have both locks
* taken to couple the two clocks: we take the
* larger time as the latest time for both
* runqueues. (this creates monotonic movement)
*/
if (likely((s64)(remote_clock - this_clock) < 0)) {
ptr = &scd->clock;
old_val = remote_clock;
val = this_clock;
} else {
/*
* Should be rare, but possible:
*/
ptr = &my_scd->clock;
old_val = this_clock;
val = remote_clock;
}
if (!try_cmpxchg64(ptr, &old_val, val))
goto again;
return val;
}
/*
* Similar to cpu_clock(), but requires local IRQs to be disabled.
*
* See cpu_clock().
*/
notrace u64 sched_clock_cpu(int cpu)
{
struct sched_clock_data *scd;
u64 clock;
if (sched_clock_stable())
return sched_clock() + __sched_clock_offset;
if (!static_branch_likely(&sched_clock_running))
return sched_clock();
preempt_disable_notrace();
scd = cpu_sdc(cpu);
if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
preempt_enable_notrace();
return clock;
}
EXPORT_SYMBOL_GPL(sched_clock_cpu);
notrace void sched_clock_tick(void)
{
struct sched_clock_data *scd;
if (sched_clock_stable())
return;
if (!static_branch_likely(&sched_clock_running))
return;
lockdep_assert_irqs_disabled();
scd = this_scd();
__scd_stamp(scd);
sched_clock_local(scd);
}
notrace void sched_clock_tick_stable(void)
{
if (!sched_clock_stable())
return;
/*
* Called under watchdog_lock.
*
* The watchdog just found this TSC to (still) be stable, so now is a
* good moment to update our __gtod_offset. Because once we find the
* TSC to be unstable, any computation will be computing crap.
*/
local_irq_disable();
__sched_clock_gtod_offset();
local_irq_enable();
}
/*
* We are going deep-idle (irqs are disabled):
*/
notrace void sched_clock_idle_sleep_event(void)
{
sched_clock_cpu(smp_processor_id());
}
EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
/*
* We just idled; resync with ktime.
*/
notrace void sched_clock_idle_wakeup_event(void)
{
unsigned long flags;
if (sched_clock_stable())
return;
if (unlikely(timekeeping_suspended))
return;
local_irq_save(flags);
sched_clock_tick();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void __init sched_clock_init(void)
{
static_branch_inc(&sched_clock_running);
local_irq_disable();
generic_sched_clock_init();
local_irq_enable();
}
notrace u64 sched_clock_cpu(int cpu)
{
if (!static_branch_likely(&sched_clock_running))
return 0;
return sched_clock();
}
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* Running clock - returns the time that has elapsed while a guest has been
* running.
* On a guest this value should be local_clock minus the time the guest was
* suspended by the hypervisor (for any reason).
* On bare metal this function should return the same as local_clock.
* Architectures and sub-architectures can override this.
*/
notrace u64 __weak running_clock(void)
{
return local_clock();
}
| linux-master | kernel/sched/clock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/core.c
*
* Core kernel scheduler code and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*/
#include <linux/highmem.h>
#include <linux/hrtimer_api.h>
#include <linux/ktime_api.h>
#include <linux/sched/signal.h>
#include <linux/syscalls_api.h>
#include <linux/debug_locks.h>
#include <linux/prefetch.h>
#include <linux/capability.h>
#include <linux/pgtable_api.h>
#include <linux/wait_bit.h>
#include <linux/jiffies.h>
#include <linux/spinlock_api.h>
#include <linux/cpumask_api.h>
#include <linux/lockdep_api.h>
#include <linux/hardirq.h>
#include <linux/softirq.h>
#include <linux/refcount_api.h>
#include <linux/topology.h>
#include <linux/sched/clock.h>
#include <linux/sched/cond_resched.h>
#include <linux/sched/cputime.h>
#include <linux/sched/debug.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/init.h>
#include <linux/sched/isolation.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/mm.h>
#include <linux/sched/nohz.h>
#include <linux/sched/rseq_api.h>
#include <linux/sched/rt.h>
#include <linux/blkdev.h>
#include <linux/context_tracking.h>
#include <linux/cpuset.h>
#include <linux/delayacct.h>
#include <linux/init_task.h>
#include <linux/interrupt.h>
#include <linux/ioprio.h>
#include <linux/kallsyms.h>
#include <linux/kcov.h>
#include <linux/kprobes.h>
#include <linux/llist_api.h>
#include <linux/mmu_context.h>
#include <linux/mmzone.h>
#include <linux/mutex_api.h>
#include <linux/nmi.h>
#include <linux/nospec.h>
#include <linux/perf_event_api.h>
#include <linux/profile.h>
#include <linux/psi.h>
#include <linux/rcuwait_api.h>
#include <linux/sched/wake_q.h>
#include <linux/scs.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/vtime.h>
#include <linux/wait_api.h>
#include <linux/workqueue_api.h>
#ifdef CONFIG_PREEMPT_DYNAMIC
# ifdef CONFIG_GENERIC_ENTRY
# include <linux/entry-common.h>
# endif
#endif
#include <uapi/linux/sched/types.h>
#include <asm/irq_regs.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
#define CREATE_TRACE_POINTS
#include <linux/sched/rseq_api.h>
#include <trace/events/sched.h>
#include <trace/events/ipi.h>
#undef CREATE_TRACE_POINTS
#include "sched.h"
#include "stats.h"
#include "autogroup.h"
#include "autogroup.h"
#include "pelt.h"
#include "smp.h"
#include "stats.h"
#include "../workqueue_internal.h"
#include "../../io_uring/io-wq.h"
#include "../smpboot.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
*/
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#ifdef CONFIG_SCHED_DEBUG
/*
* Debugging: various feature bits
*
* If SCHED_DEBUG is disabled, each compilation unit has its own copy of
* sysctl_sched_features, defined in sched.h, to allow constants propagation
* at compile time and compiler optimization based on features default.
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
/*
* Print a warning if need_resched is set for the given duration (if
* LATENCY_WARN is enabled).
*
* If sysctl_resched_latency_warn_once is set, only one warning will be shown
* per boot.
*/
__read_mostly int sysctl_resched_latency_warn_ms = 100;
__read_mostly int sysctl_resched_latency_warn_once = 1;
#endif /* CONFIG_SCHED_DEBUG */
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
__read_mostly int scheduler_running;
#ifdef CONFIG_SCHED_CORE
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
if (rt_prio(p->prio)) /* includes deadline */
return p->prio; /* [-1, 99] */
if (p->sched_class == &idle_sched_class)
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
}
/*
* l(a,b)
* le(a,b) := !l(b,a)
* g(a,b) := l(b,a)
* ge(a,b) := !l(a,b)
*/
/* real prio, less is less */
static inline bool prio_less(const struct task_struct *a,
const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
if (-pa < -pb)
return true;
if (-pb < -pa)
return false;
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
return cfs_prio_less(a, b, in_fi);
return false;
}
static inline bool __sched_core_less(const struct task_struct *a,
const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
if (a->core_cookie > b->core_cookie)
return false;
/* flip prio, so high prio is leftmost */
if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
return true;
return false;
}
#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
{
return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
}
static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
{
const struct task_struct *p = __node_2_sc(node);
unsigned long cookie = (unsigned long)key;
if (cookie < p->core_cookie)
return -1;
if (cookie > p->core_cookie)
return 1;
return 0;
}
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;
if (!p->core_cookie)
return;
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
}
void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
{
rq->core->core_task_seq++;
if (sched_core_enqueued(p)) {
rb_erase(&p->core_node, &rq->core_tree);
RB_CLEAR_NODE(&p->core_node);
}
/*
* Migrating the last task off the cpu, with the cpu in forced idle
* state. Reschedule to create an accounting edge for forced idle,
* and re-examine whether the core is still in forced idle state.
*/
if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
rq->core->core_forceidle_count && rq->curr == rq->idle)
resched_curr(rq);
}
static int sched_task_is_throttled(struct task_struct *p, int cpu)
{
if (p->sched_class->task_is_throttled)
return p->sched_class->task_is_throttled(p, cpu);
return 0;
}
static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
{
struct rb_node *node = &p->core_node;
int cpu = task_cpu(p);
do {
node = rb_next(node);
if (!node)
return NULL;
p = __node_2_sc(node);
if (p->core_cookie != cookie)
return NULL;
} while (sched_task_is_throttled(p, cpu));
return p;
}
/*
* Find left-most (aka, highest priority) and unthrottled task matching @cookie.
* If no suitable task is found, NULL will be returned.
*/
static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
{
struct task_struct *p;
struct rb_node *node;
node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
if (!node)
return NULL;
p = __node_2_sc(node);
if (!sched_task_is_throttled(p, rq->cpu))
return p;
return sched_core_next(p, cookie);
}
/*
* Magic required such that:
*
* raw_spin_rq_lock(rq);
* ...
* raw_spin_rq_unlock(rq);
*
* ends up locking and unlocking the _same_ lock, and all CPUs
* always agree on what rq has what lock.
*
* XXX entirely possible to selectively enable cores, don't bother for now.
*/
static DEFINE_MUTEX(sched_core_mutex);
static atomic_t sched_core_count;
static struct cpumask sched_core_mask;
static void sched_core_lock(int cpu, unsigned long *flags)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t, i = 0;
local_irq_save(*flags);
for_each_cpu(t, smt_mask)
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
}
static void sched_core_unlock(int cpu, unsigned long *flags)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t;
for_each_cpu(t, smt_mask)
raw_spin_unlock(&cpu_rq(t)->__lock);
local_irq_restore(*flags);
}
static void __sched_core_flip(bool enabled)
{
unsigned long flags;
int cpu, t;
cpus_read_lock();
/*
* Toggle the online cores, one by one.
*/
cpumask_copy(&sched_core_mask, cpu_online_mask);
for_each_cpu(cpu, &sched_core_mask) {
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
sched_core_lock(cpu, &flags);
for_each_cpu(t, smt_mask)
cpu_rq(t)->core_enabled = enabled;
cpu_rq(cpu)->core->core_forceidle_start = 0;
sched_core_unlock(cpu, &flags);
cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
}
/*
* Toggle the offline CPUs.
*/
for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
cpu_rq(cpu)->core_enabled = enabled;
cpus_read_unlock();
}
static void sched_core_assert_empty(void)
{
int cpu;
for_each_possible_cpu(cpu)
WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
}
static void __sched_core_enable(void)
{
static_branch_enable(&__sched_core_enabled);
/*
* Ensure all previous instances of raw_spin_rq_*lock() have finished
* and future ones will observe !sched_core_disabled().
*/
synchronize_rcu();
__sched_core_flip(true);
sched_core_assert_empty();
}
static void __sched_core_disable(void)
{
sched_core_assert_empty();
__sched_core_flip(false);
static_branch_disable(&__sched_core_enabled);
}
void sched_core_get(void)
{
if (atomic_inc_not_zero(&sched_core_count))
return;
mutex_lock(&sched_core_mutex);
if (!atomic_read(&sched_core_count))
__sched_core_enable();
smp_mb__before_atomic();
atomic_inc(&sched_core_count);
mutex_unlock(&sched_core_mutex);
}
static void __sched_core_put(struct work_struct *work)
{
if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
__sched_core_disable();
mutex_unlock(&sched_core_mutex);
}
}
void sched_core_put(void)
{
static DECLARE_WORK(_work, __sched_core_put);
/*
* "There can be only one"
*
* Either this is the last one, or we don't actually need to do any
* 'work'. If it is the last *again*, we rely on
* WORK_STRUCT_PENDING_BIT.
*/
if (!atomic_add_unless(&sched_core_count, -1, 1))
schedule_work(&_work);
}
#else /* !CONFIG_SCHED_CORE */
static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
static inline void
sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
#endif /* CONFIG_SCHED_CORE */
/*
* Serialization rules:
*
* Lock order:
*
* p->pi_lock
* rq->lock
* hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
*
* rq1->lock
* rq2->lock where: rq1 < rq2
*
* Regular state:
*
* Normal scheduling state is serialized by rq->lock. __schedule() takes the
* local CPU's rq->lock, it optionally removes the task from the runqueue and
* always looks at the local rq data structures to find the most eligible task
* to run next.
*
* Task enqueue is also under rq->lock, possibly taken from another CPU.
* Wakeups from another LLC domain might use an IPI to transfer the enqueue to
* the local CPU to avoid bouncing the runqueue state around [ see
* ttwu_queue_wakelist() ]
*
* Task wakeup, specifically wakeups that involve migration, are horribly
* complicated to avoid having to take two rq->locks.
*
* Special state:
*
* System-calls and anything external will use task_rq_lock() which acquires
* both p->pi_lock and rq->lock. As a consequence the state they change is
* stable while holding either lock:
*
* - sched_setaffinity()/
* set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
* - set_user_nice(): p->se.load, p->*prio
* - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
* p->se.load, p->rt_priority,
* p->dl.dl_{runtime, deadline, period, flags, bw, density}
* - sched_setnuma(): p->numa_preferred_nid
* - sched_move_task(): p->sched_task_group
* - uclamp_update_active() p->uclamp*
*
* p->state <- TASK_*:
*
* is changed locklessly using set_current_state(), __set_current_state() or
* set_special_state(), see their respective comments, or by
* try_to_wake_up(). This latter uses p->pi_lock to serialize against
* concurrent self.
*
* p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
*
* is set by activate_task() and cleared by deactivate_task(), under
* rq->lock. Non-zero indicates the task is runnable, the special
* ON_RQ_MIGRATING state is used for migration without holding both
* rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
*
* p->on_cpu <- { 0, 1 }:
*
* is set by prepare_task() and cleared by finish_task() such that it will be
* set before p is scheduled-in and cleared after p is scheduled-out, both
* under rq->lock. Non-zero indicates the task is running on its CPU.
*
* [ The astute reader will observe that it is possible for two tasks on one
* CPU to have ->on_cpu = 1 at the same time. ]
*
* task_cpu(p): is changed by set_task_cpu(), the rules are:
*
* - Don't call set_task_cpu() on a blocked task:
*
* We don't care what CPU we're not running on, this simplifies hotplug,
* the CPU assignment of blocked tasks isn't required to be valid.
*
* - for try_to_wake_up(), called under p->pi_lock:
*
* This allows try_to_wake_up() to only take one rq->lock, see its comment.
*
* - for migration called under rq->lock:
* [ see task_on_rq_migrating() in task_rq_lock() ]
*
* o move_queued_task()
* o detach_task()
*
* - for migration called under double_rq_lock():
*
* o __migrate_swap_task()
* o push_rt_task() / pull_rt_task()
* o push_dl_task() / pull_dl_task()
* o dl_task_offline_migration()
*
*/
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
raw_spinlock_t *lock;
/* Matches synchronize_rcu() in __sched_core_enable() */
preempt_disable();
if (sched_core_disabled()) {
raw_spin_lock_nested(&rq->__lock, subclass);
/* preempt_count *MUST* be > 1 */
preempt_enable_no_resched();
return;
}
for (;;) {
lock = __rq_lockp(rq);
raw_spin_lock_nested(lock, subclass);
if (likely(lock == __rq_lockp(rq))) {
/* preempt_count *MUST* be > 1 */
preempt_enable_no_resched();
return;
}
raw_spin_unlock(lock);
}
}
bool raw_spin_rq_trylock(struct rq *rq)
{
raw_spinlock_t *lock;
bool ret;
/* Matches synchronize_rcu() in __sched_core_enable() */
preempt_disable();
if (sched_core_disabled()) {
ret = raw_spin_trylock(&rq->__lock);
preempt_enable();
return ret;
}
for (;;) {
lock = __rq_lockp(rq);
ret = raw_spin_trylock(lock);
if (!ret || (likely(lock == __rq_lockp(rq)))) {
preempt_enable();
return ret;
}
raw_spin_unlock(lock);
}
}
void raw_spin_rq_unlock(struct rq *rq)
{
raw_spin_unlock(rq_lockp(rq));
}
#ifdef CONFIG_SMP
/*
* double_rq_lock - safely lock two runqueues
*/
void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
lockdep_assert_irqs_disabled();
if (rq_order_less(rq2, rq1))
swap(rq1, rq2);
raw_spin_rq_lock(rq1);
if (__rq_lockp(rq1) != __rq_lockp(rq2))
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
double_rq_clock_clear_update(rq1, rq2);
}
#endif
/*
* __task_rq_lock - lock the rq @p resides on.
*/
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_rq_lock(rq);
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq_pin_lock(rq, rf);
return rq;
}
raw_spin_rq_unlock(rq);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
rq = task_rq(p);
raw_spin_rq_lock(rq);
/*
* move_queued_task() task_rq_lock()
*
* ACQUIRE (rq->lock)
* [S] ->on_rq = MIGRATING [L] rq = task_rq()
* WMB (__set_task_cpu()) ACQUIRE (rq->lock);
* [S] ->cpu = new_cpu [L] task_rq()
* [L] ->on_rq
* RELEASE (rq->lock)
*
* If we observe the old CPU in task_rq_lock(), the acquire of
* the old rq->lock will fully serialize against the stores.
*
* If we observe the new CPU in task_rq_lock(), the address
* dependency headed by '[L] rq = task_rq()' and the acquire
* will pair with the WMB to ensure we then also see migrating.
*/
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq_pin_lock(rq, rf);
return rq;
}
raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
while (unlikely(task_on_rq_migrating(p)))
cpu_relax();
}
}
/*
* RQ-clock updating methods:
*/
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
s64 __maybe_unused steal = 0, irq_delta = 0;
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
psi_account_irqtime(rq->curr, irq_delta);
delayacct_irq(rq->curr, irq_delta);
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((¶virt_steal_rq_enabled))) {
steal = paravirt_steal_clock(cpu_of(rq));
steal -= rq->prev_steal_time_rq;
if (unlikely(steal > delta))
steal = delta;
rq->prev_steal_time_rq += steal;
delta -= steal;
}
#endif
rq->clock_task += delta;
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
update_rq_clock_pelt(rq, delta);
}
void update_rq_clock(struct rq *rq)
{
s64 delta;
lockdep_assert_rq_held(rq);
if (rq->clock_update_flags & RQCF_ACT_SKIP)
return;
#ifdef CONFIG_SCHED_DEBUG
if (sched_feat(WARN_DOUBLE_CLOCK))
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
rq->clock_update_flags |= RQCF_UPDATED;
#endif
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
if (delta < 0)
return;
rq->clock += delta;
update_rq_clock_task(rq, delta);
}
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
*/
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
hrtimer_cancel(&rq->hrtick_timer);
}
/*
* High-resolution timer tick.
* Runs from hardirq context with interrupts disabled.
*/
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
struct rq_flags rf;
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
rq_lock(rq, &rf);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
rq_unlock(rq, &rf);
return HRTIMER_NORESTART;
}
#ifdef CONFIG_SMP
static void __hrtick_restart(struct rq *rq)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = rq->hrtick_time;
hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
}
/*
* called from hardirq (IPI) context
*/
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
struct rq_flags rf;
rq_lock(rq, &rf);
__hrtick_restart(rq);
rq_unlock(rq, &rf);
}
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
s64 delta;
/*
* Don't schedule slices shorter than 10000ns, that just
* doesn't make sense and can cause timer DoS.
*/
delta = max_t(s64, delay, 10000LL);
rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
if (rq == this_rq())
__hrtick_restart(rq);
else
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
}
#else
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
/*
* Don't schedule slices shorter than 10000ns, that just
* doesn't make sense. Rely on vruntime for fairness.
*/
delay = max_t(u64, delay, 10000LL);
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
HRTIMER_MODE_REL_PINNED_HARD);
}
#endif /* CONFIG_SMP */
static void hrtick_rq_init(struct rq *rq)
{
#ifdef CONFIG_SMP
INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
{
}
static inline void hrtick_rq_init(struct rq *rq)
{
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* cmpxchg based fetch_or, macro so it works for different integer types
*/
#define fetch_or(ptr, mask) \
({ \
typeof(ptr) _ptr = (ptr); \
typeof(mask) _mask = (mask); \
typeof(*_ptr) _val = *_ptr; \
\
do { \
} while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
_val; \
})
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
* Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
* this avoids any races wrt polling state changes and thereby avoids
* spurious IPIs.
*/
static inline bool set_nr_and_not_polling(struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
}
/*
* Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
*
* If this returns true, then the idle task promises to call
* sched_ttwu_pending() and reschedule soon.
*/
static bool set_nr_if_polling(struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
typeof(ti->flags) val = READ_ONCE(ti->flags);
for (;;) {
if (!(val & _TIF_POLLING_NRFLAG))
return false;
if (val & _TIF_NEED_RESCHED)
return true;
if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
break;
}
return true;
}
#else
static inline bool set_nr_and_not_polling(struct task_struct *p)
{
set_tsk_need_resched(p);
return true;
}
#ifdef CONFIG_SMP
static inline bool set_nr_if_polling(struct task_struct *p)
{
return false;
}
#endif
#endif
static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
struct wake_q_node *node = &task->wake_q;
/*
* Atomically grab the task, if ->wake_q is !nil already it means
* it's already queued (either by us or someone else) and will get the
* wakeup due to that.
*
* In order to ensure that a pending wakeup will observe our pending
* state, even in the failed case, an explicit smp_mb() must be used.
*/
smp_mb__before_atomic();
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
return false;
/*
* The head is context local, there can be no concurrency.
*/
*head->lastp = node;
head->lastp = &node->next;
return true;
}
/**
* wake_q_add() - queue a wakeup for 'later' waking.
* @head: the wake_q_head to add @task to
* @task: the task to queue for 'later' wakeup
*
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
* instantly.
*
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*/
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
if (__wake_q_add(head, task))
get_task_struct(task);
}
/**
* wake_q_add_safe() - safely queue a wakeup for 'later' waking.
* @head: the wake_q_head to add @task to
* @task: the task to queue for 'later' wakeup
*
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
* instantly.
*
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*
* This function is essentially a task-safe equivalent to wake_q_add(). Callers
* that already hold reference to @task can call the 'safe' version and trust
* wake_q to do the right thing depending whether or not the @task is already
* queued for wakeup.
*/
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
{
if (!__wake_q_add(head, task))
put_task_struct(task);
}
void wake_up_q(struct wake_q_head *head)
{
struct wake_q_node *node = head->first;
while (node != WAKE_Q_TAIL) {
struct task_struct *task;
task = container_of(node, struct task_struct, wake_q);
/* Task can safely be re-inserted now: */
node = node->next;
task->wake_q.next = NULL;
/*
* wake_up_process() executes a full barrier, which pairs with
* the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
put_task_struct(task);
}
}
/*
* resched_curr - mark rq's current task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
void resched_curr(struct rq *rq)
{
struct task_struct *curr = rq->curr;
int cpu;
lockdep_assert_rq_held(rq);
if (test_tsk_need_resched(curr))
return;
cpu = cpu_of(rq);
if (cpu == smp_processor_id()) {
set_tsk_need_resched(curr);
set_preempt_need_resched();
return;
}
if (set_nr_and_not_polling(curr))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_rq_lock_irqsave(rq, flags);
if (cpu_online(cpu) || cpu == smp_processor_id())
resched_curr(rq);
raw_spin_rq_unlock_irqrestore(rq, flags);
}
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ_COMMON
/*
* In the semi idle case, use the nearest busy CPU for migrating timers
* from an idle CPU. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle CPU will add more delays to the timers than intended
* (as that CPU's timer base may not be uptodate wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
int i, cpu = smp_processor_id(), default_cpu = -1;
struct sched_domain *sd;
const struct cpumask *hk_mask;
if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
if (!idle_cpu(cpu))
return cpu;
default_cpu = cpu;
}
hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
guard(rcu)();
for_each_domain(cpu, sd) {
for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
if (cpu == i)
continue;
if (!idle_cpu(i))
return i;
}
}
if (default_cpu == -1)
default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
return default_cpu;
}
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
* which is scheduled to wake up that CPU. In case of a completely
* idle system the next event might even be infinite time into the
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
* leaves the inner idle loop so the newly added timer is taken into
* account when the CPU goes back to idle and evaluates the timer
* wheel for the next timer event.
*/
static void wake_up_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (cpu == smp_processor_id())
return;
if (set_nr_and_not_polling(rq->idle))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
static bool wake_up_full_nohz_cpu(int cpu)
{
/*
* We just need the target to call irq_exit() and re-evaluate
* the next tick. The nohz full kick at least implies that.
* If needed we can still optimize that later with an
* empty IRQ.
*/
if (cpu_is_offline(cpu))
return true; /* Don't try to wake offline CPUs. */
if (tick_nohz_full_cpu(cpu)) {
if (cpu != smp_processor_id() ||
tick_nohz_tick_stopped())
tick_nohz_full_kick_cpu(cpu);
return true;
}
return false;
}
/*
* Wake up the specified CPU. If the CPU is going offline, it is the
* caller's responsibility to deal with the lost wakeup, for example,
* by hooking into the CPU_DEAD notifier like timers and hrtimers do.
*/
void wake_up_nohz_cpu(int cpu)
{
if (!wake_up_full_nohz_cpu(cpu))
wake_up_idle_cpu(cpu);
}
static void nohz_csd_func(void *info)
{
struct rq *rq = info;
int cpu = cpu_of(rq);
unsigned int flags;
/*
* Release the rq::nohz_csd.
*/
flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
WARN_ON(!(flags & NOHZ_KICK_MASK));
rq->idle_balance = idle_cpu(cpu);
if (rq->idle_balance && !need_resched()) {
rq->nohz_idle_balance = flags;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
}
#endif /* CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
{
if (rq->nr_running != 1)
return false;
if (p->sched_class != &fair_sched_class)
return false;
if (!task_on_rq_queued(p))
return false;
return true;
}
bool sched_can_stop_tick(struct rq *rq)
{
int fifo_nr_running;
/* Deadline tasks, even if single, need the tick */
if (rq->dl.dl_nr_running)
return false;
/*
* If there are more than one RR tasks, we need the tick to affect the
* actual RR behaviour.
*/
if (rq->rt.rr_nr_running) {
if (rq->rt.rr_nr_running == 1)
return true;
else
return false;
}
/*
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
* forced preemption between FIFO tasks.
*/
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
if (fifo_nr_running)
return true;
/*
* If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
* if there's more than one we need the tick for involuntary
* preemption.
*/
if (rq->nr_running > 1)
return false;
/*
* If there is one task and it has CFS runtime bandwidth constraints
* and it's on the cpu now we don't want to stop the tick.
* This check prevents clearing the bit if a newly enqueued task here is
* dequeued by migrating while the constrained task continues to run.
* E.g. going from 2->1 without going through pick_next_task().
*/
if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
if (cfs_task_bw_constrained(rq->curr))
return false;
}
return true;
}
#endif /* CONFIG_NO_HZ_FULL */
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
/*
* Iterate task_group tree rooted at *from, calling @down when first entering a
* node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
parent = from;
down:
ret = (*down)(parent, data);
if (ret)
goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
up:
continue;
}
ret = (*up)(parent, data);
if (ret || parent == from)
goto out;
child = parent;
parent = parent->parent;
if (parent)
goto up;
out:
return ret;
}
int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
static void set_load_weight(struct task_struct *p, bool update_load)
{
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (task_has_idle_policy(p)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
}
/*
* SCHED_OTHER tasks have to update their load when changing their
* weight
*/
if (update_load && p->sched_class == &fair_sched_class) {
reweight_task(p, prio);
} else {
load->weight = scale_load(sched_prio_to_weight[prio]);
load->inv_weight = sched_prio_to_wmult[prio];
}
}
#ifdef CONFIG_UCLAMP_TASK
/*
* Serializes updates of utilization clamp values
*
* The (slow-path) user-space triggers utilization clamp value updates which
* can require updates on (fast-path) scheduler's data structures used to
* support enqueue/dequeue operations.
* While the per-CPU rq lock protects fast-path update operations, user-space
* requests are serialized using a mutex to reduce the risk of conflicting
* updates or API abuses.
*/
static DEFINE_MUTEX(uclamp_mutex);
/* Max allowed minimum utilization */
static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
/* Max allowed maximum utilization */
static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
/*
* By default RT tasks run at the maximum performance point/capacity of the
* system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
* SCHED_CAPACITY_SCALE.
*
* This knob allows admins to change the default behavior when uclamp is being
* used. In battery powered devices, particularly, running at the maximum
* capacity and frequency will increase energy consumption and shorten the
* battery life.
*
* This knob only affects RT tasks that their uclamp_se->user_defined == false.
*
* This knob will not override the system default sched_util_clamp_min defined
* above.
*/
static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
/* All clamps are required to be less or equal than these values */
static struct uclamp_se uclamp_default[UCLAMP_CNT];
/*
* This static key is used to reduce the uclamp overhead in the fast path. It
* primarily disables the call to uclamp_rq_{inc, dec}() in
* enqueue/dequeue_task().
*
* This allows users to continue to enable uclamp in their kernel config with
* minimum uclamp overhead in the fast path.
*
* As soon as userspace modifies any of the uclamp knobs, the static key is
* enabled, since we have an actual users that make use of uclamp
* functionality.
*
* The knobs that would enable this static key are:
*
* * A task modifying its uclamp value with sched_setattr().
* * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
* * An admin modifying the cgroup cpu.uclamp.{min, max}
*/
DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
/* Integer rounded range for each bucket */
#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
#define for_each_clamp_id(clamp_id) \
for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
{
return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
}
static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
return SCHED_CAPACITY_SCALE;
}
static inline void uclamp_se_set(struct uclamp_se *uc_se,
unsigned int value, bool user_defined)
{
uc_se->value = value;
uc_se->bucket_id = uclamp_bucket_id(value);
uc_se->user_defined = user_defined;
}
static inline unsigned int
uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
unsigned int clamp_value)
{
/*
* Avoid blocked utilization pushing up the frequency when we go
* idle (which drops the max-clamp) by retaining the last known
* max-clamp.
*/
if (clamp_id == UCLAMP_MAX) {
rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
return clamp_value;
}
return uclamp_none(UCLAMP_MIN);
}
static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
unsigned int clamp_value)
{
/* Reset max-clamp retention only on idle exit */
if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
return;
uclamp_rq_set(rq, clamp_id, clamp_value);
}
static inline
unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
unsigned int clamp_value)
{
struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
int bucket_id = UCLAMP_BUCKETS - 1;
/*
* Since both min and max clamps are max aggregated, find the
* top most bucket with tasks in.
*/
for ( ; bucket_id >= 0; bucket_id--) {
if (!bucket[bucket_id].tasks)
continue;
return bucket[bucket_id].value;
}
/* No tasks -- default clamp values */
return uclamp_idle_value(rq, clamp_id, clamp_value);
}
static void __uclamp_update_util_min_rt_default(struct task_struct *p)
{
unsigned int default_util_min;
struct uclamp_se *uc_se;
lockdep_assert_held(&p->pi_lock);
uc_se = &p->uclamp_req[UCLAMP_MIN];
/* Only sync if user didn't override the default */
if (uc_se->user_defined)
return;
default_util_min = sysctl_sched_uclamp_util_min_rt_default;
uclamp_se_set(uc_se, default_util_min, false);
}
static void uclamp_update_util_min_rt_default(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
if (!rt_task(p))
return;
/* Protect updates to p->uclamp_* */
rq = task_rq_lock(p, &rf);
__uclamp_update_util_min_rt_default(p);
task_rq_unlock(rq, p, &rf);
}
static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
{
/* Copy by value as we could modify it */
struct uclamp_se uc_req = p->uclamp_req[clamp_id];
#ifdef CONFIG_UCLAMP_TASK_GROUP
unsigned int tg_min, tg_max, value;
/*
* Tasks in autogroups or root task group will be
* restricted by system defaults.
*/
if (task_group_is_autogroup(task_group(p)))
return uc_req;
if (task_group(p) == &root_task_group)
return uc_req;
tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
value = uc_req.value;
value = clamp(value, tg_min, tg_max);
uclamp_se_set(&uc_req, value, false);
#endif
return uc_req;
}
/*
* The effective clamp bucket index of a task depends on, by increasing
* priority:
* - the task specific clamp value, when explicitly requested from userspace
* - the task group effective clamp value, for tasks not either in the root
* group or in an autogroup
* - the system default clamp value, defined by the sysadmin
*/
static inline struct uclamp_se
uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
{
struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
struct uclamp_se uc_max = uclamp_default[clamp_id];
/* System default restrictions always apply */
if (unlikely(uc_req.value > uc_max.value))
return uc_max;
return uc_req;
}
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
{
struct uclamp_se uc_eff;
/* Task currently refcounted: use back-annotated (effective) value */
if (p->uclamp[clamp_id].active)
return (unsigned long)p->uclamp[clamp_id].value;
uc_eff = uclamp_eff_get(p, clamp_id);
return (unsigned long)uc_eff.value;
}
/*
* When a task is enqueued on a rq, the clamp bucket currently defined by the
* task's uclamp::bucket_id is refcounted on that rq. This also immediately
* updates the rq's clamp value if required.
*
* Tasks can have a task-specific value requested from user-space, track
* within each bucket the maximum value for tasks refcounted in it.
* This "local max aggregation" allows to track the exact "requested" value
* for each bucket when all its RUNNABLE tasks require the same clamp.
*/
static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
enum uclamp_id clamp_id)
{
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
struct uclamp_se *uc_se = &p->uclamp[clamp_id];
struct uclamp_bucket *bucket;
lockdep_assert_rq_held(rq);
/* Update task effective clamp */
p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
bucket = &uc_rq->bucket[uc_se->bucket_id];
bucket->tasks++;
uc_se->active = true;
uclamp_idle_reset(rq, clamp_id, uc_se->value);
/*
* Local max aggregation: rq buckets always track the max
* "requested" clamp value of its RUNNABLE tasks.
*/
if (bucket->tasks == 1 || uc_se->value > bucket->value)
bucket->value = uc_se->value;
if (uc_se->value > uclamp_rq_get(rq, clamp_id))
uclamp_rq_set(rq, clamp_id, uc_se->value);
}
/*
* When a task is dequeued from a rq, the clamp bucket refcounted by the task
* is released. If this is the last task reference counting the rq's max
* active clamp value, then the rq's clamp value is updated.
*
* Both refcounted tasks and rq's cached clamp values are expected to be
* always valid. If it's detected they are not, as defensive programming,
* enforce the expected state and warn.
*/
static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
enum uclamp_id clamp_id)
{
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
struct uclamp_se *uc_se = &p->uclamp[clamp_id];
struct uclamp_bucket *bucket;
unsigned int bkt_clamp;
unsigned int rq_clamp;
lockdep_assert_rq_held(rq);
/*
* If sched_uclamp_used was enabled after task @p was enqueued,
* we could end up with unbalanced call to uclamp_rq_dec_id().
*
* In this case the uc_se->active flag should be false since no uclamp
* accounting was performed at enqueue time and we can just return
* here.
*
* Need to be careful of the following enqueue/dequeue ordering
* problem too
*
* enqueue(taskA)
* // sched_uclamp_used gets enabled
* enqueue(taskB)
* dequeue(taskA)
* // Must not decrement bucket->tasks here
* dequeue(taskB)
*
* where we could end up with stale data in uc_se and
* bucket[uc_se->bucket_id].
*
* The following check here eliminates the possibility of such race.
*/
if (unlikely(!uc_se->active))
return;
bucket = &uc_rq->bucket[uc_se->bucket_id];
SCHED_WARN_ON(!bucket->tasks);
if (likely(bucket->tasks))
bucket->tasks--;
uc_se->active = false;
/*
* Keep "local max aggregation" simple and accept to (possibly)
* overboost some RUNNABLE tasks in the same bucket.
* The rq clamp bucket value is reset to its base value whenever
* there are no more RUNNABLE tasks refcounting it.
*/
if (likely(bucket->tasks))
return;
rq_clamp = uclamp_rq_get(rq, clamp_id);
/*
* Defensive programming: this should never happen. If it happens,
* e.g. due to future modification, warn and fixup the expected value.
*/
SCHED_WARN_ON(bucket->value > rq_clamp);
if (bucket->value >= rq_clamp) {
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
uclamp_rq_set(rq, clamp_id, bkt_clamp);
}
}
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
{
enum uclamp_id clamp_id;
/*
* Avoid any overhead until uclamp is actually used by the userspace.
*
* The condition is constructed such that a NOP is generated when
* sched_uclamp_used is disabled.
*/
if (!static_branch_unlikely(&sched_uclamp_used))
return;
if (unlikely(!p->sched_class->uclamp_enabled))
return;
for_each_clamp_id(clamp_id)
uclamp_rq_inc_id(rq, p, clamp_id);
/* Reset clamp idle holding when there is one RUNNABLE task */
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
}
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
{
enum uclamp_id clamp_id;
/*
* Avoid any overhead until uclamp is actually used by the userspace.
*
* The condition is constructed such that a NOP is generated when
* sched_uclamp_used is disabled.
*/
if (!static_branch_unlikely(&sched_uclamp_used))
return;
if (unlikely(!p->sched_class->uclamp_enabled))
return;
for_each_clamp_id(clamp_id)
uclamp_rq_dec_id(rq, p, clamp_id);
}
static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
enum uclamp_id clamp_id)
{
if (!p->uclamp[clamp_id].active)
return;
uclamp_rq_dec_id(rq, p, clamp_id);
uclamp_rq_inc_id(rq, p, clamp_id);
/*
* Make sure to clear the idle flag if we've transiently reached 0
* active tasks on rq.
*/
if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
}
static inline void
uclamp_update_active(struct task_struct *p)
{
enum uclamp_id clamp_id;
struct rq_flags rf;
struct rq *rq;
/*
* Lock the task and the rq where the task is (or was) queued.
*
* We might lock the (previous) rq of a !RUNNABLE task, but that's the
* price to pay to safely serialize util_{min,max} updates with
* enqueues, dequeues and migration operations.
* This is the same locking schema used by __set_cpus_allowed_ptr().
*/
rq = task_rq_lock(p, &rf);
/*
* Setting the clamp bucket is serialized by task_rq_lock().
* If the task is not yet RUNNABLE and its task_struct is not
* affecting a valid clamp bucket, the next time it's enqueued,
* it will already see the updated clamp bucket value.
*/
for_each_clamp_id(clamp_id)
uclamp_rq_reinc_id(rq, p, clamp_id);
task_rq_unlock(rq, p, &rf);
}
#ifdef CONFIG_UCLAMP_TASK_GROUP
static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state *css)
{
struct css_task_iter it;
struct task_struct *p;
css_task_iter_start(css, 0, &it);
while ((p = css_task_iter_next(&it)))
uclamp_update_active(p);
css_task_iter_end(&it);
}
static void cpu_util_update_eff(struct cgroup_subsys_state *css);
#endif
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_UCLAMP_TASK
#ifdef CONFIG_UCLAMP_TASK_GROUP
static void uclamp_update_root_tg(void)
{
struct task_group *tg = &root_task_group;
uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
sysctl_sched_uclamp_util_min, false);
uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
sysctl_sched_uclamp_util_max, false);
rcu_read_lock();
cpu_util_update_eff(&root_task_group.css);
rcu_read_unlock();
}
#else
static void uclamp_update_root_tg(void) { }
#endif
static void uclamp_sync_util_min_rt_default(void)
{
struct task_struct *g, *p;
/*
* copy_process() sysctl_uclamp
* uclamp_min_rt = X;
* write_lock(&tasklist_lock) read_lock(&tasklist_lock)
* // link thread smp_mb__after_spinlock()
* write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
* sched_post_fork() for_each_process_thread()
* __uclamp_sync_rt() __uclamp_sync_rt()
*
* Ensures that either sched_post_fork() will observe the new
* uclamp_min_rt or for_each_process_thread() will observe the new
* task.
*/
read_lock(&tasklist_lock);
smp_mb__after_spinlock();
read_unlock(&tasklist_lock);
rcu_read_lock();
for_each_process_thread(g, p)
uclamp_update_util_min_rt_default(p);
rcu_read_unlock();
}
static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
bool update_root_tg = false;
int old_min, old_max, old_min_rt;
int result;
guard(mutex)(&uclamp_mutex);
old_min = sysctl_sched_uclamp_util_min;
old_max = sysctl_sched_uclamp_util_max;
old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
result = proc_dointvec(table, write, buffer, lenp, ppos);
if (result)
goto undo;
if (!write)
return 0;
if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
result = -EINVAL;
goto undo;
}
if (old_min != sysctl_sched_uclamp_util_min) {
uclamp_se_set(&uclamp_default[UCLAMP_MIN],
sysctl_sched_uclamp_util_min, false);
update_root_tg = true;
}
if (old_max != sysctl_sched_uclamp_util_max) {
uclamp_se_set(&uclamp_default[UCLAMP_MAX],
sysctl_sched_uclamp_util_max, false);
update_root_tg = true;
}
if (update_root_tg) {
static_branch_enable(&sched_uclamp_used);
uclamp_update_root_tg();
}
if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
static_branch_enable(&sched_uclamp_used);
uclamp_sync_util_min_rt_default();
}
/*
* We update all RUNNABLE tasks only when task groups are in use.
* Otherwise, keep it simple and do just a lazy update at each next
* task enqueue time.
*/
return 0;
undo:
sysctl_sched_uclamp_util_min = old_min;
sysctl_sched_uclamp_util_max = old_max;
sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
return result;
}
#endif
#endif
static int uclamp_validate(struct task_struct *p,
const struct sched_attr *attr)
{
int util_min = p->uclamp_req[UCLAMP_MIN].value;
int util_max = p->uclamp_req[UCLAMP_MAX].value;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
util_min = attr->sched_util_min;
if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
return -EINVAL;
}
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
util_max = attr->sched_util_max;
if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
return -EINVAL;
}
if (util_min != -1 && util_max != -1 && util_min > util_max)
return -EINVAL;
/*
* We have valid uclamp attributes; make sure uclamp is enabled.
*
* We need to do that here, because enabling static branches is a
* blocking operation which obviously cannot be done while holding
* scheduler locks.
*/
static_branch_enable(&sched_uclamp_used);
return 0;
}
static bool uclamp_reset(const struct sched_attr *attr,
enum uclamp_id clamp_id,
struct uclamp_se *uc_se)
{
/* Reset on sched class change for a non user-defined clamp value. */
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
!uc_se->user_defined)
return true;
/* Reset on sched_util_{min,max} == -1. */
if (clamp_id == UCLAMP_MIN &&
attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
attr->sched_util_min == -1) {
return true;
}
if (clamp_id == UCLAMP_MAX &&
attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
attr->sched_util_max == -1) {
return true;
}
return false;
}
static void __setscheduler_uclamp(struct task_struct *p,
const struct sched_attr *attr)
{
enum uclamp_id clamp_id;
for_each_clamp_id(clamp_id) {
struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
unsigned int value;
if (!uclamp_reset(attr, clamp_id, uc_se))
continue;
/*
* RT by default have a 100% boost value that could be modified
* at runtime.
*/
if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
value = sysctl_sched_uclamp_util_min_rt_default;
else
value = uclamp_none(clamp_id);
uclamp_se_set(uc_se, value, false);
}
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
return;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
attr->sched_util_min != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
attr->sched_util_min, true);
}
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
attr->sched_util_max != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
attr->sched_util_max, true);
}
}
static void uclamp_fork(struct task_struct *p)
{
enum uclamp_id clamp_id;
/*
* We don't need to hold task_rq_lock() when updating p->uclamp_* here
* as the task is still at its early fork stages.
*/
for_each_clamp_id(clamp_id)
p->uclamp[clamp_id].active = false;
if (likely(!p->sched_reset_on_fork))
return;
for_each_clamp_id(clamp_id) {
uclamp_se_set(&p->uclamp_req[clamp_id],
uclamp_none(clamp_id), false);
}
}
static void uclamp_post_fork(struct task_struct *p)
{
uclamp_update_util_min_rt_default(p);
}
static void __init init_uclamp_rq(struct rq *rq)
{
enum uclamp_id clamp_id;
struct uclamp_rq *uc_rq = rq->uclamp;
for_each_clamp_id(clamp_id) {
uc_rq[clamp_id] = (struct uclamp_rq) {
.value = uclamp_none(clamp_id)
};
}
rq->uclamp_flags = UCLAMP_FLAG_IDLE;
}
static void __init init_uclamp(void)
{
struct uclamp_se uc_max = {};
enum uclamp_id clamp_id;
int cpu;
for_each_possible_cpu(cpu)
init_uclamp_rq(cpu_rq(cpu));
for_each_clamp_id(clamp_id) {
uclamp_se_set(&init_task.uclamp_req[clamp_id],
uclamp_none(clamp_id), false);
}
/* System defaults allow max clamp values for both indexes */
uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
for_each_clamp_id(clamp_id) {
uclamp_default[clamp_id] = uc_max;
#ifdef CONFIG_UCLAMP_TASK_GROUP
root_task_group.uclamp_req[clamp_id] = uc_max;
root_task_group.uclamp[clamp_id] = uc_max;
#endif
}
}
#else /* CONFIG_UCLAMP_TASK */
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
static inline int uclamp_validate(struct task_struct *p,
const struct sched_attr *attr)
{
return -EOPNOTSUPP;
}
static void __setscheduler_uclamp(struct task_struct *p,
const struct sched_attr *attr) { }
static inline void uclamp_fork(struct task_struct *p) { }
static inline void uclamp_post_fork(struct task_struct *p) { }
static inline void init_uclamp(void) { }
#endif /* CONFIG_UCLAMP_TASK */
bool sched_task_on_rq(struct task_struct *p)
{
return task_on_rq_queued(p);
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long ip = 0;
unsigned int state;
if (!p || p == current)
return 0;
/* Only get wchan if task is blocked and we can keep it that way. */
raw_spin_lock_irq(&p->pi_lock);
state = READ_ONCE(p->__state);
smp_rmb(); /* see try_to_wake_up() */
if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
ip = __get_wchan(p);
raw_spin_unlock_irq(&p->pi_lock);
return ip;
}
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
if (!(flags & ENQUEUE_RESTORE)) {
sched_info_enqueue(rq, p);
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
}
uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
if (sched_core_enabled(rq))
sched_core_enqueue(rq, p);
}
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p, flags);
if (!(flags & DEQUEUE_NOCLOCK))
update_rq_clock(rq);
if (!(flags & DEQUEUE_SAVE)) {
sched_info_dequeue(rq, p);
psi_dequeue(p, flags & DEQUEUE_SLEEP);
}
uclamp_rq_dec(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_on_rq_migrating(p))
flags |= ENQUEUE_MIGRATED;
if (flags & ENQUEUE_MIGRATED)
sched_mm_cid_migrate_to(rq, p);
enqueue_task(rq, p, flags);
p->on_rq = TASK_ON_RQ_QUEUED;
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
dequeue_task(rq, p, flags);
}
static inline int __normal_prio(int policy, int rt_prio, int nice)
{
int prio;
if (dl_policy(policy))
prio = MAX_DL_PRIO - 1;
else if (rt_policy(policy))
prio = MAX_RT_PRIO - 1 - rt_prio;
else
prio = NICE_TO_PRIO(nice);
return prio;
}
/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
* boosted by interactivity modifiers. Changes upon fork,
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
static inline int normal_prio(struct task_struct *p)
{
return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
}
/*
* Calculate the current priority, i.e. the priority
* taken into account by the scheduler. This value might
* be boosted by RT tasks, or might be boosted by
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*
* Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
/*
* switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
* use the balance_callback list if you want balancing.
*
* this means any call to check_class_changed() must be followed by a call to
* balance_callback().
*/
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
prev_class->switched_from(rq, p);
p->sched_class->switched_to(rq, p);
} else if (oldprio != p->prio || dl_task(p))
p->sched_class->prio_changed(rq, p, oldprio);
}
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
if (p->sched_class == rq->curr->sched_class)
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
else if (sched_class_above(p->sched_class, rq->curr->sched_class))
resched_curr(rq);
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq_clock_skip_update(rq);
}
static __always_inline
int __task_state_match(struct task_struct *p, unsigned int state)
{
if (READ_ONCE(p->__state) & state)
return 1;
#ifdef CONFIG_PREEMPT_RT
if (READ_ONCE(p->saved_state) & state)
return -1;
#endif
return 0;
}
static __always_inline
int task_state_match(struct task_struct *p, unsigned int state)
{
#ifdef CONFIG_PREEMPT_RT
int match;
/*
* Serialize against current_save_and_set_rtlock_wait_state() and
* current_restore_rtlock_saved_state().
*/
raw_spin_lock_irq(&p->pi_lock);
match = __task_state_match(p, state);
raw_spin_unlock_irq(&p->pi_lock);
return match;
#else
return __task_state_match(p, state);
#endif
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*
* Wait for the thread to block in any of the states set in @match_state.
* If it changes, i.e. @p might have woken up, then return zero. When we
* succeed in waiting for @p to be off its CPU, we return a positive number
* (its total switch count). If a second call a short while later returns the
* same number, the caller can be sure that @p has remained unscheduled the
* whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
int running, queued, match;
struct rq_flags rf;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_on_cpu()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_on_cpu(rq, p)) {
if (!task_state_match(p, match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &rf);
trace_sched_wait_task(p);
running = task_on_cpu(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
if ((match = __task_state_match(p, match_state))) {
/*
* When matching on p->saved_state, consider this task
* still queued so it will wait.
*/
if (match < 0)
queued = 1;
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
}
task_rq_unlock(rq, p, &rf);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(queued)) {
ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
#ifdef CONFIG_SMP
static void
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
static int __set_cpus_allowed_ptr(struct task_struct *p,
struct affinity_context *ctx);
static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
{
struct affinity_context ac = {
.new_mask = cpumask_of(rq->cpu),
.flags = SCA_MIGRATE_DISABLE,
};
if (likely(!p->migration_disabled))
return;
if (p->cpus_ptr != &p->cpus_mask)
return;
/*
* Violates locking rules! see comment in __do_set_cpus_allowed().
*/
__do_set_cpus_allowed(p, &ac);
}
void migrate_disable(void)
{
struct task_struct *p = current;
if (p->migration_disabled) {
p->migration_disabled++;
return;
}
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
void migrate_enable(void)
{
struct task_struct *p = current;
struct affinity_context ac = {
.new_mask = &p->cpus_mask,
.flags = SCA_MIGRATE_ENABLE,
};
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}
if (WARN_ON_ONCE(!p->migration_disabled))
return;
/*
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/
preempt_disable();
if (p->cpus_ptr != &p->cpus_mask)
__set_cpus_allowed_ptr(p, &ac);
/*
* Mustn't clear migration_disabled() until cpus_ptr points back at the
* regular cpus_mask, otherwise things that race (eg.
* select_fallback_rq) get confused.
*/
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
static inline bool rq_has_pinned_tasks(struct rq *rq)
{
return rq->nr_pinned;
}
/*
* Per-CPU kthreads are allowed to run on !active && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
/* When not in the task's cpumask, no point in looking further. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
/* migrate_disabled() must be allowed to finish. */
if (is_migration_disabled(p))
return cpu_online(cpu);
/* Non kernel threads are not allowed during either online or offline. */
if (!(p->flags & PF_KTHREAD))
return cpu_active(cpu) && task_cpu_possible(cpu, p);
/* KTHREAD_IS_PER_CPU is always allowed. */
if (kthread_is_per_cpu(p))
return cpu_online(cpu);
/* Regular kernel threads don't get to stay during offline. */
if (cpu_dying(cpu))
return false;
/* But are allowed during online. */
return cpu_online(cpu);
}
/*
* This is how migration works:
*
* 1) we invoke migration_cpu_stop() on the target CPU using
* stop_one_cpu().
* 2) stopper starts to run (implicitly forcing the migrated thread
* off the CPU)
* 3) it checks whether the migrated task is still in the wrong runqueue.
* 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
* 5) stopper completes and stop_one_cpu() returns and the migration
* is done.
*/
/*
* move_queued_task - move a queued task to new rq.
*
* Returns (locked) new rq. Old rq's lock is released.
*/
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
{
lockdep_assert_rq_held(rq);
deactivate_task(rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, new_cpu);
rq_unlock(rq, rf);
rq = cpu_rq(new_cpu);
rq_lock(rq, rf);
WARN_ON_ONCE(task_cpu(p) != new_cpu);
activate_task(rq, p, 0);
check_preempt_curr(rq, p, 0);
return rq;
}
struct migration_arg {
struct task_struct *task;
int dest_cpu;
struct set_affinity_pending *pending;
};
/*
* @refs: number of wait_for_completion()
* @stop_pending: is @stop_work in use
*/
struct set_affinity_pending {
refcount_t refs;
unsigned int stop_pending;
struct completion done;
struct cpu_stop_work stop_work;
struct migration_arg arg;
};
/*
* Move (not current) task off this CPU, onto the destination CPU. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*/
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
{
/* Affinity changed (again). */
if (!is_cpu_allowed(p, dest_cpu))
return rq;
rq = move_queued_task(rq, rf, p, dest_cpu);
return rq;
}
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
static int migration_cpu_stop(void *data)
{
struct migration_arg *arg = data;
struct set_affinity_pending *pending = arg->pending;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
bool complete = false;
struct rq_flags rf;
/*
* The original target CPU might have gone down and we might
* be on another CPU but it doesn't matter.
*/
local_irq_save(rf.flags);
/*
* We need to explicitly wake pending tasks before running
* __migrate_task() such that we will not miss enforcing cpus_ptr
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
flush_smp_call_function_queue();
raw_spin_lock(&p->pi_lock);
rq_lock(rq, &rf);
/*
* If we were passed a pending, then ->stop_pending was set, thus
* p->migration_pending must have remained stable.
*/
WARN_ON_ONCE(pending && pending != p->migration_pending);
/*
* If task_rq(p) != rq, it cannot be migrated here, because we're
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
* we're holding p->pi_lock.
*/
if (task_rq(p) == rq) {
if (is_migration_disabled(p))
goto out;
if (pending) {
p->migration_pending = NULL;
complete = true;
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
goto out;
}
if (task_on_rq_queued(p)) {
update_rq_clock(rq);
rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
} else {
p->wake_cpu = arg->dest_cpu;
}
/*
* XXX __migrate_task() can fail, at which point we might end
* up running on a dodgy CPU, AFAICT this can only happen
* during CPU hotplug, at which point we'll get pushed out
* anyway, so it's probably not a big deal.
*/
} else if (pending) {
/*
* This happens when we get migrated between migrate_enable()'s
* preempt_enable() and scheduling the stopper task. At that
* point we're a regular task again and not current anymore.
*
* A !PREEMPT kernel has a giant hole here, which makes it far
* more likely.
*/
/*
* The task moved before the stopper got to run. We're holding
* ->pi_lock, so the allowed mask is stable - if it got
* somewhere allowed, we're done.
*/
if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
p->migration_pending = NULL;
complete = true;
goto out;
}
/*
* When migrate_enable() hits a rq mis-match we can't reliably
* determine is_migration_disabled() and so have to chase after
* it.
*/
WARN_ON_ONCE(!pending->stop_pending);
task_rq_unlock(rq, p, &rf);
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
&pending->arg, &pending->stop_work);
return 0;
}
out:
if (pending)
pending->stop_pending = false;
task_rq_unlock(rq, p, &rf);
if (complete)
complete_all(&pending->done);
return 0;
}
int push_cpu_stop(void *arg)
{
struct rq *lowest_rq = NULL, *rq = this_rq();
struct task_struct *p = arg;
raw_spin_lock_irq(&p->pi_lock);
raw_spin_rq_lock(rq);
if (task_rq(p) != rq)
goto out_unlock;
if (is_migration_disabled(p)) {
p->migration_flags |= MDF_PUSH;
goto out_unlock;
}
p->migration_flags &= ~MDF_PUSH;
if (p->sched_class->find_lock_rq)
lowest_rq = p->sched_class->find_lock_rq(p, rq);
if (!lowest_rq)
goto out_unlock;
// XXX validate p is still the highest prio task
if (task_rq(p) == rq) {
deactivate_task(rq, p, 0);
set_task_cpu(p, lowest_rq->cpu);
activate_task(lowest_rq, p, 0);
resched_curr(lowest_rq);
}
double_unlock_balance(rq, lowest_rq);
out_unlock:
rq->push_busy = false;
raw_spin_rq_unlock(rq);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return 0;
}
/*
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
*/
void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
{
if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
p->cpus_ptr = ctx->new_mask;
return;
}
cpumask_copy(&p->cpus_mask, ctx->new_mask);
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
/*
* Swap in a new user_cpus_ptr if SCA_USER flag set
*/
if (ctx->flags & SCA_USER)
swap(p->user_cpus_ptr, ctx->user_mask);
}
static void
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{
struct rq *rq = task_rq(p);
bool queued, running;
/*
* This here violates the locking rules for affinity, since we're only
* supposed to change these variables while holding both rq->lock and
* p->pi_lock.
*
* HOWEVER, it magically works, because ttwu() is the only code that
* accesses these variables under p->pi_lock and only does so after
* smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
* before finish_task().
*
* XXX do further audits, this smells like something putrid.
*/
if (ctx->flags & SCA_MIGRATE_DISABLE)
SCHED_WARN_ON(!p->on_cpu);
else
lockdep_assert_held(&p->pi_lock);
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued) {
/*
* Because __kthread_bind() calls this on blocked tasks without
* holding rq->lock.
*/
lockdep_assert_rq_held(rq);
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
}
if (running)
put_prev_task(rq, p);
p->sched_class->set_cpus_allowed(p, ctx);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_next_task(rq, p);
}
/*
* Used for kthread_bind() and select_fallback_rq(), in both cases the user
* affinity (if any) should be destroyed too.
*/
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
struct affinity_context ac = {
.new_mask = new_mask,
.user_mask = NULL,
.flags = SCA_USER, /* clear the user requested mask */
};
union cpumask_rcuhead {
cpumask_t cpumask;
struct rcu_head rcu;
};
__do_set_cpus_allowed(p, &ac);
/*
* Because this is called with p->pi_lock held, it is not possible
* to use kfree() here (when PREEMPT_RT=y), therefore punt to using
* kfree_rcu().
*/
kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
}
static cpumask_t *alloc_user_cpus_ptr(int node)
{
/*
* See do_set_cpus_allowed() above for the rcu_head usage.
*/
int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
return kmalloc_node(size, GFP_KERNEL, node);
}
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
int node)
{
cpumask_t *user_mask;
unsigned long flags;
/*
* Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
* may differ by now due to racing.
*/
dst->user_cpus_ptr = NULL;
/*
* This check is racy and losing the race is a valid situation.
* It is not worth the extra overhead of taking the pi_lock on
* every fork/clone.
*/
if (data_race(!src->user_cpus_ptr))
return 0;
user_mask = alloc_user_cpus_ptr(node);
if (!user_mask)
return -ENOMEM;
/*
* Use pi_lock to protect content of user_cpus_ptr
*
* Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
* do_set_cpus_allowed().
*/
raw_spin_lock_irqsave(&src->pi_lock, flags);
if (src->user_cpus_ptr) {
swap(dst->user_cpus_ptr, user_mask);
cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
}
raw_spin_unlock_irqrestore(&src->pi_lock, flags);
if (unlikely(user_mask))
kfree(user_mask);
return 0;
}
static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
{
struct cpumask *user_mask = NULL;
swap(p->user_cpus_ptr, user_mask);
return user_mask;
}
void release_user_cpus_ptr(struct task_struct *p)
{
kfree(clear_user_cpus_ptr(p));
}
/*
* This function is wildly self concurrent; here be dragons.
*
*
* When given a valid mask, __set_cpus_allowed_ptr() must block until the
* designated task is enqueued on an allowed CPU. If that task is currently
* running, we have to kick it out using the CPU stopper.
*
* Migrate-Disable comes along and tramples all over our nice sandcastle.
* Consider:
*
* Initial conditions: P0->cpus_mask = [0, 1]
*
* P0@CPU0 P1
*
* migrate_disable();
* <preempted>
* set_cpus_allowed_ptr(P0, [1]);
*
* P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
* its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
* This means we need the following scheme:
*
* P0@CPU0 P1
*
* migrate_disable();
* <preempted>
* set_cpus_allowed_ptr(P0, [1]);
* <blocks>
* <resumes>
* migrate_enable();
* __set_cpus_allowed_ptr();
* <wakes local stopper>
* `--> <woken on migration completion>
*
* Now the fun stuff: there may be several P1-like tasks, i.e. multiple
* concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
* task p are serialized by p->pi_lock, which we can leverage: the one that
* should come into effect at the end of the Migrate-Disable region is the last
* one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
* but we still need to properly signal those waiting tasks at the appropriate
* moment.
*
* This is implemented using struct set_affinity_pending. The first
* __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
* setup an instance of that struct and install it on the targeted task_struct.
* Any and all further callers will reuse that instance. Those then wait for
* a completion signaled at the tail of the CPU stopper callback (1), triggered
* on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
*
*
* (1) In the cases covered above. There is one more where the completion is
* signaled within affine_move_task() itself: when a subsequent affinity request
* occurs after the stopper bailed out due to the targeted task still being
* Migrate-Disable. Consider:
*
* Initial conditions: P0->cpus_mask = [0, 1]
*
* CPU0 P1 P2
* <P0>
* migrate_disable();
* <preempted>
* set_cpus_allowed_ptr(P0, [1]);
* <blocks>
* <migration/0>
* migration_cpu_stop()
* is_migration_disabled()
* <bails>
* set_cpus_allowed_ptr(P0, [0, 1]);
* <signal completion>
* <awakes>
*
* Note that the above is safe vs a concurrent migrate_enable(), as any
* pending affinity completion is preceded by an uninstallation of
* p->migration_pending done with p->pi_lock held.
*/
static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
int dest_cpu, unsigned int flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
struct set_affinity_pending my_pending = { }, *pending = NULL;
bool stop_pending, complete = false;
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
struct task_struct *push_task = NULL;
if ((flags & SCA_MIGRATE_ENABLE) &&
(p->migration_flags & MDF_PUSH) && !rq->push_busy) {
rq->push_busy = true;
push_task = get_task_struct(p);
}
/*
* If there are pending waiters, but no pending stop_work,
* then complete now.
*/
pending = p->migration_pending;
if (pending && !pending->stop_pending) {
p->migration_pending = NULL;
complete = true;
}
task_rq_unlock(rq, p, rf);
if (push_task) {
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
p, &rq->push_work);
}
if (complete)
complete_all(&pending->done);
return 0;
}
if (!(flags & SCA_MIGRATE_ENABLE)) {
/* serialized by p->pi_lock */
if (!p->migration_pending) {
/* Install the request */
refcount_set(&my_pending.refs, 1);
init_completion(&my_pending.done);
my_pending.arg = (struct migration_arg) {
.task = p,
.dest_cpu = dest_cpu,
.pending = &my_pending,
};
p->migration_pending = &my_pending;
} else {
pending = p->migration_pending;
refcount_inc(&pending->refs);
/*
* Affinity has changed, but we've already installed a
* pending. migration_cpu_stop() *must* see this, else
* we risk a completion of the pending despite having a
* task on a disallowed CPU.
*
* Serialized by p->pi_lock, so this is safe.
*/
pending->arg.dest_cpu = dest_cpu;
}
}
pending = p->migration_pending;
/*
* - !MIGRATE_ENABLE:
* we'll have installed a pending if there wasn't one already.
*
* - MIGRATE_ENABLE:
* we're here because the current CPU isn't matching anymore,
* the only way that can happen is because of a concurrent
* set_cpus_allowed_ptr() call, which should then still be
* pending completion.
*
* Either way, we really should have a @pending here.
*/
if (WARN_ON_ONCE(!pending)) {
task_rq_unlock(rq, p, rf);
return -EINVAL;
}
if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
/*
* MIGRATE_ENABLE gets here because 'p == current', but for
* anything else we cannot do is_migration_disabled(), punt
* and have the stopper function handle it all race-free.
*/
stop_pending = pending->stop_pending;
if (!stop_pending)
pending->stop_pending = true;
if (flags & SCA_MIGRATE_ENABLE)
p->migration_flags &= ~MDF_PUSH;
task_rq_unlock(rq, p, rf);
if (!stop_pending) {
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
&pending->arg, &pending->stop_work);
}
if (flags & SCA_MIGRATE_ENABLE)
return 0;
} else {
if (!is_migration_disabled(p)) {
if (task_on_rq_queued(p))
rq = move_queued_task(rq, rf, p, dest_cpu);
if (!pending->stop_pending) {
p->migration_pending = NULL;
complete = true;
}
}
task_rq_unlock(rq, p, rf);
if (complete)
complete_all(&pending->done);
}
wait_for_completion(&pending->done);
if (refcount_dec_and_test(&pending->refs))
wake_up_var(&pending->refs); /* No UaF, just an address */
/*
* Block the original owner of &pending until all subsequent callers
* have seen the completion and decremented the refcount
*/
wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
/* ARGH */
WARN_ON_ONCE(my_pending.stop_pending);
return 0;
}
/*
* Called with both p->pi_lock and rq->lock held; drops both before returning.
*/
static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
struct affinity_context *ctx,
struct rq *rq,
struct rq_flags *rf)
__releases(rq->lock)
__releases(p->pi_lock)
{
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
const struct cpumask *cpu_valid_mask = cpu_active_mask;
bool kthread = p->flags & PF_KTHREAD;
unsigned int dest_cpu;
int ret = 0;
update_rq_clock(rq);
if (kthread || is_migration_disabled(p)) {
/*
* Kernel threads are allowed on online && !active CPUs,
* however, during cpu-hot-unplug, even these might get pushed
* away if not KTHREAD_IS_PER_CPU.
*
* Specifically, migration_disabled() tasks must not fail the
* cpumask_any_and_distribute() pick below, esp. so on
* SCA_MIGRATE_ENABLE, otherwise we'll not call
* set_cpus_allowed_common() and actually reset p->cpus_ptr.
*/
cpu_valid_mask = cpu_online_mask;
}
if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
ret = -EINVAL;
goto out;
}
/*
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.
*/
if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out;
}
if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
if (ctx->flags & SCA_USER)
swap(p->user_cpus_ptr, ctx->user_mask);
goto out;
}
if (WARN_ON_ONCE(p == current &&
is_migration_disabled(p) &&
!cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
ret = -EBUSY;
goto out;
}
}
/*
* Picking a ~random cpu helps in cases where we are changing affinity
* for groups of tasks (ie. cpuset), so that load balancing is not
* immediately required to distribute the tasks within their new mask.
*/
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
}
__do_set_cpus_allowed(p, ctx);
return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
out:
task_rq_unlock(rq, p, rf);
return ret;
}
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
static int __set_cpus_allowed_ptr(struct task_struct *p,
struct affinity_context *ctx)
{
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
/*
* Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
* flags are set.
*/
if (p->user_cpus_ptr &&
!(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
ctx->new_mask = rq->scratch_mask;
return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
}
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
struct affinity_context ac = {
.new_mask = new_mask,
.flags = 0,
};
return __set_cpus_allowed_ptr(p, &ac);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Change a given task's CPU affinity to the intersection of its current
* affinity mask and @subset_mask, writing the resulting mask to @new_mask.
* If user_cpus_ptr is defined, use it as the basis for restricting CPU
* affinity or use cpu_online_mask instead.
*
* If the resulting mask is empty, leave the affinity unchanged and return
* -EINVAL.
*/
static int restrict_cpus_allowed_ptr(struct task_struct *p,
struct cpumask *new_mask,
const struct cpumask *subset_mask)
{
struct affinity_context ac = {
.new_mask = new_mask,
.flags = 0,
};
struct rq_flags rf;
struct rq *rq;
int err;
rq = task_rq_lock(p, &rf);
/*
* Forcefully restricting the affinity of a deadline task is
* likely to cause problems, so fail and noisily override the
* mask entirely.
*/
if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
err = -EPERM;
goto err_unlock;
}
if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
err = -EINVAL;
goto err_unlock;
}
return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
err_unlock:
task_rq_unlock(rq, p, &rf);
return err;
}
/*
* Restrict the CPU affinity of task @p so that it is a subset of
* task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
* old affinity mask. If the resulting mask is empty, we warn and walk
* up the cpuset hierarchy until we find a suitable mask.
*/
void force_compatible_cpus_allowed_ptr(struct task_struct *p)
{
cpumask_var_t new_mask;
const struct cpumask *override_mask = task_cpu_possible_mask(p);
alloc_cpumask_var(&new_mask, GFP_KERNEL);
/*
* __migrate_task() can fail silently in the face of concurrent
* offlining of the chosen destination CPU, so take the hotplug
* lock to ensure that the migration succeeds.
*/
cpus_read_lock();
if (!cpumask_available(new_mask))
goto out_set_mask;
if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
goto out_free_mask;
/*
* We failed to find a valid subset of the affinity mask for the
* task, so override it based on its cpuset hierarchy.
*/
cpuset_cpus_allowed(p, new_mask);
override_mask = new_mask;
out_set_mask:
if (printk_ratelimit()) {
printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
task_pid_nr(p), p->comm,
cpumask_pr_args(override_mask));
}
WARN_ON(set_cpus_allowed_ptr(p, override_mask));
out_free_mask:
cpus_read_unlock();
free_cpumask_var(new_mask);
}
static int
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
/*
* Restore the affinity of a task @p which was previously restricted by a
* call to force_compatible_cpus_allowed_ptr().
*
* It is the caller's responsibility to serialise this with any calls to
* force_compatible_cpus_allowed_ptr(@p).
*/
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
{
struct affinity_context ac = {
.new_mask = task_user_cpus(p),
.flags = 0,
};
int ret;
/*
* Try to restore the old affinity mask with __sched_setaffinity().
* Cpuset masking will be done there too.
*/
ret = __sched_setaffinity(p, &ac);
WARN_ON_ONCE(ret);
}
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
unsigned int state = READ_ONCE(p->__state);
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
/*
* Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
* because schedstat_wait_{start,end} rebase migrating task's wait_start
* time relying on p->on_rq.
*/
WARN_ON_ONCE(state == TASK_RUNNING &&
p->sched_class == &fair_sched_class &&
(p->on_rq && !task_on_rq_migrating(p)));
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
*
* sched_move_task() holds both and thus holding either pins the cgroup,
* see task_group().
*
* Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(__rq_lockp(task_rq(p)))));
#endif
/*
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
*/
WARN_ON_ONCE(!cpu_online(new_cpu));
WARN_ON_ONCE(is_migration_disabled(p));
#endif
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
rseq_migrate(p);
sched_mm_cid_migrate_from(p);
perf_event_task_migrate(p);
}
__set_task_cpu(p, new_cpu);
}
#ifdef CONFIG_NUMA_BALANCING
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
struct rq *src_rq, *dst_rq;
struct rq_flags srf, drf;
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);
rq_pin_lock(src_rq, &srf);
rq_pin_lock(dst_rq, &drf);
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
check_preempt_curr(dst_rq, p, 0);
rq_unpin_lock(dst_rq, &drf);
rq_unpin_lock(src_rq, &srf);
} else {
/*
* Task isn't running anymore; make it appear like we migrated
* it before it went to sleep. This means on wakeup we make the
* previous CPU our target instead of where it really is.
*/
p->wake_cpu = cpu;
}
}
struct migration_swap_arg {
struct task_struct *src_task, *dst_task;
int src_cpu, dst_cpu;
};
static int migrate_swap_stop(void *data)
{
struct migration_swap_arg *arg = data;
struct rq *src_rq, *dst_rq;
if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
return -EAGAIN;
src_rq = cpu_rq(arg->src_cpu);
dst_rq = cpu_rq(arg->dst_cpu);
guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
guard(double_rq_lock)(src_rq, dst_rq);
if (task_cpu(arg->dst_task) != arg->dst_cpu)
return -EAGAIN;
if (task_cpu(arg->src_task) != arg->src_cpu)
return -EAGAIN;
if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
return -EAGAIN;
if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
return -EAGAIN;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
__migrate_swap_task(arg->dst_task, arg->src_cpu);
return 0;
}
/*
* Cross migrate two tasks
*/
int migrate_swap(struct task_struct *cur, struct task_struct *p,
int target_cpu, int curr_cpu)
{
struct migration_swap_arg arg;
int ret = -EINVAL;
arg = (struct migration_swap_arg){
.src_task = cur,
.src_cpu = curr_cpu,
.dst_task = p,
.dst_cpu = target_cpu,
};
if (arg.src_cpu == arg.dst_cpu)
goto out;
/*
* These three tests are all lockless; this is OK since all of them
* will be re-checked with proper locks held further down the line.
*/
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
goto out;
if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
out:
return ret;
}
#endif /* CONFIG_NUMA_BALANCING */
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*
* NOTE: this function doesn't have to take the runqueue lock,
* because all it wants to ensure is that the remote task enters
* the kernel. If the IPI races and the task has been migrated
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
void kick_process(struct task_struct *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
/*
* ->cpus_ptr is protected by both rq->lock and p->pi_lock
*
* A few notes on cpu_active vs cpu_online:
*
* - cpu_active must be a subset of cpu_online
*
* - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
* see __set_cpus_allowed_ptr(). At this point the newly online
* CPU isn't yet part of the sched domains, and balancing will not
* see it.
*
* - on CPU-down we clear cpu_active() to mask the sched domains and
* avoid the load balancer to place new tasks on the to be removed
* CPU. Existing tasks will remain running there and will be taken
* off.
*
* This means that fallback selection must not select !active CPUs.
* And can assume that any active CPU must be online. Conversely
* select_task_rq() below may allow selection of !active CPUs in order
* to satisfy the above rules.
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
enum { cpuset, possible, fail } state = cpuset;
int dest_cpu;
/*
* If the node that the CPU is on has been offlined, cpu_to_node()
* will return -1. There is no CPU on the node, and we should
* select the CPU on the other node.
*/
if (nid != -1) {
nodemask = cpumask_of_node(nid);
/* Look for allowed, online CPU in same node. */
for_each_cpu(dest_cpu, nodemask) {
if (is_cpu_allowed(p, dest_cpu))
return dest_cpu;
}
}
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, p->cpus_ptr) {
if (!is_cpu_allowed(p, dest_cpu))
continue;
goto out;
}
/* No more Mr. Nice Guy. */
switch (state) {
case cpuset:
if (cpuset_cpus_allowed_fallback(p)) {
state = possible;
break;
}
fallthrough;
case possible:
/*
* XXX When called from select_task_rq() we only
* hold p->pi_lock and again violate locking order.
*
* More yuck to audit.
*/
do_set_cpus_allowed(p, task_cpu_possible_mask(p));
state = fail;
break;
case fail:
BUG();
break;
}
}
out:
if (state != cpuset) {
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk_deferred("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
{
lockdep_assert_held(&p->pi_lock);
if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
else
cpu = cpumask_any(p->cpus_ptr);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_ptr
* CPU.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!is_cpu_allowed(p, cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
static struct lock_class_key stop_pi_lock;
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
if (stop) {
/*
* Make it appear like a SCHED_FIFO task, its something
* userspace knows about and won't get confused about.
*
* Also, it will make PI more or less work without too
* much confusion -- but then, stop work should not
* rely on PI working anyway.
*/
sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
stop->sched_class = &stop_sched_class;
/*
* The PI code calls rt_mutex_setprio() with ->pi_lock held to
* adjust the effective priority of a task. As a result,
* rt_mutex_setprio() can trigger (RT) balancing operations,
* which can then trigger wakeups of the stop thread to push
* around the current task.
*
* The stop task itself will never be part of the PI-chain, it
* never blocks, therefore that ->pi_lock recursion is safe.
* Tell lockdep about this by placing the stop->pi_lock in its
* own class.
*/
lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
}
cpu_rq(cpu)->stop = stop;
if (old_stop) {
/*
* Reset it back to a normal scheduling class so that
* it can die in pieces.
*/
old_stop->sched_class = &rt_sched_class;
}
}
#else /* CONFIG_SMP */
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
struct affinity_context *ctx)
{
return set_cpus_allowed_ptr(p, ctx->new_mask);
}
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
static inline bool rq_has_pinned_tasks(struct rq *rq)
{
return false;
}
static inline cpumask_t *alloc_user_cpus_ptr(int node)
{
return NULL;
}
#endif /* !CONFIG_SMP */
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
struct rq *rq;
if (!schedstat_enabled())
return;
rq = this_rq();
#ifdef CONFIG_SMP
if (cpu == rq->cpu) {
__schedstat_inc(rq->ttwu_local);
__schedstat_inc(p->stats.nr_wakeups_local);
} else {
struct sched_domain *sd;
__schedstat_inc(p->stats.nr_wakeups_remote);
guard(rcu)();
for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
__schedstat_inc(sd->ttwu_wake_remote);
break;
}
}
}
if (wake_flags & WF_MIGRATED)
__schedstat_inc(p->stats.nr_wakeups_migrate);
#endif /* CONFIG_SMP */
__schedstat_inc(rq->ttwu_count);
__schedstat_inc(p->stats.nr_wakeups);
if (wake_flags & WF_SYNC)
__schedstat_inc(p->stats.nr_wakeups_sync);
}
/*
* Mark the task runnable.
*/
static inline void ttwu_do_wakeup(struct task_struct *p)
{
WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p);
}
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
struct rq_flags *rf)
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#ifdef CONFIG_SMP
if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
else
#endif
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
activate_task(rq, p, en_flags);
check_preempt_curr(rq, p, wake_flags);
ttwu_do_wakeup(p);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
/*
* Our task @p is fully woken up and running; so it's safe to
* drop the rq->lock, hereafter rq is only used for statistics.
*/
rq_unpin_lock(rq, rf);
p->sched_class->task_woken(rq, p);
rq_repin_lock(rq, rf);
}
if (rq->idle_stamp) {
u64 delta = rq_clock(rq) - rq->idle_stamp;
u64 max = 2*rq->max_idle_balance_cost;
update_avg(&rq->avg_idle, delta);
if (rq->avg_idle > max)
rq->avg_idle = max;
rq->wake_stamp = jiffies;
rq->wake_avg_idle = rq->avg_idle / 2;
rq->idle_stamp = 0;
}
#endif
}
/*
* Consider @p being inside a wait loop:
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
*
* if (CONDITION)
* break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* between set_current_state() and schedule(). In this case @p is still
* runnable, so all that needs doing is change p->state back to TASK_RUNNING in
* an atomic manner.
*
* By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
* then schedule() must still happen and p->state can be changed to
* TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
* need to do a full wakeup with enqueue.
*
* Returns: %true when the wakeup is done,
* %false otherwise.
*/
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
struct rq_flags rf;
struct rq *rq;
int ret = 0;
rq = __task_rq_lock(p, &rf);
if (task_on_rq_queued(p)) {
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
* it should preempt the task that is current now.
*/
update_rq_clock(rq);
check_preempt_curr(rq, p, wake_flags);
}
ttwu_do_wakeup(p);
ret = 1;
}
__task_rq_unlock(rq, &rf);
return ret;
}
#ifdef CONFIG_SMP
void sched_ttwu_pending(void *arg)
{
struct llist_node *llist = arg;
struct rq *rq = this_rq();
struct task_struct *p, *t;
struct rq_flags rf;
if (!llist)
return;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
if (WARN_ON_ONCE(p->on_cpu))
smp_cond_load_acquire(&p->on_cpu, !VAL);
if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
set_task_cpu(p, cpu_of(rq));
ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
}
/*
* Must be after enqueueing at least once task such that
* idle_cpu() does not observe a false-negative -- if it does,
* it is possible for select_idle_siblings() to stack a number
* of tasks on this CPU during that window.
*
* It is ok to clear ttwu_pending when another task pending.
* We will receive IPI after local irq enabled and then enqueue it.
* Since now nr_running > 0, idle_cpu() will always get correct result.
*/
WRITE_ONCE(rq->ttwu_pending, 0);
rq_unlock_irqrestore(rq, &rf);
}
/*
* Prepare the scene for sending an IPI for a remote smp_call
*
* Returns true if the caller can proceed with sending the IPI.
* Returns false otherwise.
*/
bool call_function_single_prep_ipi(int cpu)
{
if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
trace_sched_wake_idle_without_ipi(cpu);
return false;
}
return true;
}
/*
* Queue a task on the target CPUs wake_list and wake the CPU via IPI if
* necessary. The wakee CPU on receipt of the IPI will queue the task
* via sched_ttwu_wakeup() for activation so the wakee incurs the cost
* of the wakeup instead of the waker.
*/
static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
struct rq *rq = cpu_rq(cpu);
p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
WRITE_ONCE(rq->ttwu_pending, 1);
__smp_call_single_queue(cpu, &p->wake_entry.llist);
}
void wake_up_if_idle(int cpu)
{
struct rq *rq = cpu_rq(cpu);
guard(rcu)();
if (is_idle_task(rcu_dereference(rq->curr))) {
guard(rq_lock_irqsave)(rq);
if (is_idle_task(rq->curr))
resched_curr(rq);
}
}
bool cpus_share_cache(int this_cpu, int that_cpu)
{
if (this_cpu == that_cpu)
return true;
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
/*
* Do not complicate things with the async wake_list while the CPU is
* in hotplug state.
*/
if (!cpu_active(cpu))
return false;
/* Ensure the task will still be allowed to run on the CPU. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
/*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
*/
if (!cpus_share_cache(smp_processor_id(), cpu))
return true;
if (cpu == smp_processor_id())
return false;
/*
* If the wakee cpu is idle, or the task is descheduling and the
* only running task on the CPU, then use the wakelist to offload
* the task activation to the idle (or soon-to-be-idle) CPU as
* the current CPU is likely busy. nr_running is checked to
* avoid unnecessary task stacking.
*
* Note that we can only get here with (wakee) p->on_rq=0,
* p->on_cpu can be whatever, we've done the dequeue, so
* the wakee has been accounted out of ->nr_running.
*/
if (!cpu_rq(cpu)->nr_running)
return true;
return false;
}
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags);
return true;
}
return false;
}
#else /* !CONFIG_SMP */
static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
return false;
}
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
{
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
if (ttwu_queue_wakelist(p, cpu, wake_flags))
return;
rq_lock(rq, &rf);
update_rq_clock(rq);
ttwu_do_activate(rq, p, wake_flags, &rf);
rq_unlock(rq, &rf);
}
/*
* Invoked from try_to_wake_up() to check whether the task can be woken up.
*
* The caller holds p::pi_lock if p != current or has preemption
* disabled when p == current.
*
* The rules of PREEMPT_RT saved_state:
*
* The related locking code always holds p::pi_lock when updating
* p::saved_state, which means the code is fully serialized in both cases.
*
* The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
* bits set. This allows to distinguish all wakeup scenarios.
*/
static __always_inline
bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
{
int match;
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
state != TASK_RTLOCK_WAIT);
}
*success = !!(match = __task_state_match(p, state));
#ifdef CONFIG_PREEMPT_RT
/*
* Saved state preserves the task state across blocking on
* an RT lock. If the state matches, set p::saved_state to
* TASK_RUNNING, but do not wake the task because it waits
* for a lock wakeup. Also indicate success because from
* the regular waker's point of view this has succeeded.
*
* After acquiring the lock the task will restore p::__state
* from p::saved_state which ensures that the regular
* wakeup is not lost. The restore will also set
* p::saved_state to TASK_RUNNING so any further tests will
* not result in false positives vs. @success
*/
if (match < 0)
p->saved_state = TASK_RUNNING;
#endif
return match > 0;
}
/*
* Notes on Program-Order guarantees on SMP systems.
*
* MIGRATION
*
* The basic program-order guarantee on SMP systems is that when a task [t]
* migrates, all its activity on its old CPU [c0] happens-before any subsequent
* execution on its new CPU [c1].
*
* For migration (of runnable tasks) this is provided by the following means:
*
* A) UNLOCK of the rq(c0)->lock scheduling out task t
* B) migration for t is required to synchronize *both* rq(c0)->lock and
* rq(c1)->lock (if not at the same time, then in that order).
* C) LOCK of the rq(c1)->lock scheduling in task
*
* Release/acquire chaining guarantees that B happens after A and C after B.
* Note: the CPU doing B need not be c0 or c1
*
* Example:
*
* CPU0 CPU1 CPU2
*
* LOCK rq(0)->lock
* sched-out X
* sched-in Y
* UNLOCK rq(0)->lock
*
* LOCK rq(0)->lock // orders against CPU0
* dequeue X
* UNLOCK rq(0)->lock
*
* LOCK rq(1)->lock
* enqueue X
* UNLOCK rq(1)->lock
*
* LOCK rq(1)->lock // orders against CPU2
* sched-out Z
* sched-in X
* UNLOCK rq(1)->lock
*
*
* BLOCKING -- aka. SLEEP + WAKEUP
*
* For blocking we (obviously) need to provide the same guarantee as for
* migration. However the means are completely different as there is no lock
* chain to provide order. Instead we do:
*
* 1) smp_store_release(X->on_cpu, 0) -- finish_task()
* 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
*
* Example:
*
* CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
*
* LOCK rq(0)->lock LOCK X->pi_lock
* dequeue X
* sched-out X
* smp_store_release(X->on_cpu, 0);
*
* smp_cond_load_acquire(&X->on_cpu, !VAL);
* X->state = WAKING
* set_task_cpu(X,2)
*
* LOCK rq(2)->lock
* enqueue X
* X->state = RUNNING
* UNLOCK rq(2)->lock
*
* LOCK rq(2)->lock // orders against CPU1
* sched-out Z
* sched-in X
* UNLOCK rq(2)->lock
*
* UNLOCK X->pi_lock
* UNLOCK rq(0)->lock
*
*
* However, for wakeups there is a second guarantee we must provide, namely we
* must ensure that CONDITION=1 done by the caller can not be reordered with
* accesses to the task state; see try_to_wake_up() and set_current_state().
*/
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
* Conceptually does:
*
* If (@state & @p->state) @p->state = TASK_RUNNING.
*
* If the task was not queued/runnable, also place it back on a runqueue.
*
* This function is atomic against schedule() which would dequeue the task.
*
* It issues a full memory barrier before accessing @p->state, see the comment
* with set_current_state().
*
* Uses p->pi_lock to serialize against concurrent wake-ups.
*
* Relies on p->pi_lock stabilizing:
* - p->sched_class
* - p->cpus_ptr
* - p->sched_task_group
* in order to do migration, see its use of select_task_rq()/set_task_cpu().
*
* Tries really hard to only take one task_rq(p)->lock for performance.
* Takes rq->lock in:
* - ttwu_runnable() -- old rq, unavoidable, see comment there;
* - ttwu_queue() -- new rq, for enqueue of the task;
* - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
*
* As a consequence we race really badly with just about everything. See the
* many memory barriers and their comments for details.
*
* Return: %true if @p->state changes (an actual wakeup was done),
* %false otherwise.
*/
int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
guard(preempt)();
int cpu, success = 0;
if (p == current) {
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
* == smp_processor_id()'. Together this means we can special
* case the whole 'p->on_rq && ttwu_runnable()' case below
* without taking any locks.
*
* In particular:
* - we rely on Program-Order guarantees for all the ordering,
* - we're serialized against set_special_state() by virtue of
* it disabling IRQs (this allows not taking ->pi_lock).
*/
if (!ttwu_state_match(p, state, &success))
goto out;
trace_sched_waking(p);
ttwu_do_wakeup(p);
goto out;
}
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with smp_store_mb()
* in set_current_state() that the waiting thread does.
*/
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
smp_mb__after_spinlock();
if (!ttwu_state_match(p, state, &success))
break;
trace_sched_waking(p);
/*
* Ensure we load p->on_rq _after_ p->state, otherwise it would
* be possible to, falsely, observe p->on_rq == 0 and get stuck
* in smp_cond_load_acquire() below.
*
* sched_ttwu_pending() try_to_wake_up()
* STORE p->on_rq = 1 LOAD p->state
* UNLOCK rq->lock
*
* __schedule() (switch to task 'p')
* LOCK rq->lock smp_rmb();
* smp_mb__after_spinlock();
* UNLOCK rq->lock
*
* [task p]
* STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
*
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
* __schedule(). See the comment for smp_mb__after_spinlock().
*
* A similar smb_rmb() lives in try_invoke_on_locked_down_task().
*/
smp_rmb();
if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
break;
#ifdef CONFIG_SMP
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
* possible to, falsely, observe p->on_cpu == 0.
*
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
* __schedule() (switch to task 'p') try_to_wake_up()
* STORE p->on_cpu = 1 LOAD p->on_rq
* UNLOCK rq->lock
*
* __schedule() (put 'p' to sleep)
* LOCK rq->lock smp_rmb();
* smp_mb__after_spinlock();
* STORE p->on_rq = 0 LOAD p->on_cpu
*
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
* __schedule(). See the comment for smp_mb__after_spinlock().
*
* Form a control-dep-acquire with p->on_rq == 0 above, to ensure
* schedule()'s deactivate_task() has 'happened' and p will no longer
* care about it's own p->state. See the comment in __schedule().
*/
smp_acquire__after_ctrl_dep();
/*
* We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
* == 0), which means we need to do an enqueue, change p->state to
* TASK_WAKING such that we can unlock p->pi_lock before doing the
* enqueue, such as ttwu_queue_wakelist().
*/
WRITE_ONCE(p->__state, TASK_WAKING);
/*
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, considering queueing p on the remote CPUs wake_list
* which potentially sends an IPI instead of spinning on p->on_cpu to
* let the waker make forward progress. This is safe because IRQs are
* disabled and the IPI will deliver after on_cpu is cleared.
*
* Ensure we load task_cpu(p) after p->on_cpu:
*
* set_task_cpu(p, cpu);
* STORE p->cpu = @cpu
* __schedule() (switch to task 'p')
* LOCK rq->lock
* smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
* STORE p->on_cpu = 1 LOAD p->cpu
*
* to ensure we observe the correct CPU on which the task is currently
* scheduling.
*/
if (smp_load_acquire(&p->on_cpu) &&
ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
break;
/*
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until it's done referencing the task.
*
* Pairs with the smp_store_release() in finish_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
*/
smp_cond_load_acquire(&p->on_cpu, !VAL);
cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
if (task_cpu(p) != cpu) {
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
wake_flags |= WF_MIGRATED;
psi_ttwu_dequeue(p);
set_task_cpu(p, cpu);
}
#else
cpu = task_cpu(p);
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu, wake_flags);
}
out:
if (success)
ttwu_stat(p, task_cpu(p), wake_flags);
return success;
}
static bool __task_needs_rq_lock(struct task_struct *p)
{
unsigned int state = READ_ONCE(p->__state);
/*
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
* the task is blocked. Make sure to check @state since ttwu() can drop
* locks at the end, see ttwu_queue_wakelist().
*/
if (state == TASK_RUNNING || state == TASK_WAKING)
return true;
/*
* Ensure we load p->on_rq after p->__state, otherwise it would be
* possible to, falsely, observe p->on_rq == 0.
*
* See try_to_wake_up() for a longer comment.
*/
smp_rmb();
if (p->on_rq)
return true;
#ifdef CONFIG_SMP
/*
* Ensure the task has finished __schedule() and will not be referenced
* anymore. Again, see try_to_wake_up() for a longer comment.
*/
smp_rmb();
smp_cond_load_acquire(&p->on_cpu, !VAL);
#endif
return false;
}
/**
* task_call_func - Invoke a function on task in fixed state
* @p: Process for which the function is to be invoked, can be @current.
* @func: Function to invoke.
* @arg: Argument to function.
*
* Fix the task in it's current state by avoiding wakeups and or rq operations
* and call @func(@arg) on it. This function can use ->on_rq and task_curr()
* to work out what the state is, if required. Given that @func can be invoked
* with a runqueue lock held, it had better be quite lightweight.
*
* Returns:
* Whatever @func returns
*/
int task_call_func(struct task_struct *p, task_call_f func, void *arg)
{
struct rq *rq = NULL;
struct rq_flags rf;
int ret;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
if (__task_needs_rq_lock(p))
rq = __task_rq_lock(p, &rf);
/*
* At this point the task is pinned; either:
* - blocked and we're holding off wakeups (pi->lock)
* - woken, and we're holding off enqueue (rq->lock)
* - queued, and we're holding off schedule (rq->lock)
* - running, and we're holding off de-schedule (rq->lock)
*
* The called function (@func) can use: task_curr(), p->on_rq and
* p->__state to differentiate between these states.
*/
ret = func(p, arg);
if (rq)
rq_unlock(rq, &rf);
raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
return ret;
}
/**
* cpu_curr_snapshot - Return a snapshot of the currently running task
* @cpu: The CPU on which to snapshot the task.
*
* Returns the task_struct pointer of the task "currently" running on
* the specified CPU. If the same task is running on that CPU throughout,
* the return value will be a pointer to that task's task_struct structure.
* If the CPU did any context switches even vaguely concurrently with the
* execution of this function, the return value will be a pointer to the
* task_struct structure of a randomly chosen task that was running on
* that CPU somewhere around the time that this function was executing.
*
* If the specified CPU was offline, the return value is whatever it
* is, perhaps a pointer to the task_struct structure of that CPU's idle
* task, but there is no guarantee. Callers wishing a useful return
* value must take some action to ensure that the specified CPU remains
* online throughout.
*
* This function executes full memory barriers before and after fetching
* the pointer, which permits the caller to confine this function's fetch
* with respect to the caller's accesses to other shared variables.
*/
struct task_struct *cpu_curr_snapshot(int cpu)
{
struct task_struct *t;
smp_mb(); /* Pairing determined by caller's synchronization design. */
t = rcu_dereference(cpu_curr(cpu));
smp_mb(); /* Pairing determined by caller's synchronization design. */
return t;
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes.
*
* Return: 1 if the process was woken up, 0 if it was already running.
*
* This function executes a full memory barrier before accessing the task state.
*/
int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
* __sched_fork() is basic setup used by init_idle() too:
*/
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
#ifdef CONFIG_SCHEDSTATS
/* Even if schedstat is disabled, there should not be garbage */
memset(&p->stats, 0, sizeof(p->stats));
#endif
RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl);
init_dl_inactive_task_timer(&p->dl);
__dl_clear_params(p);
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;
p->rt.time_slice = sched_rr_timeslice;
p->rt.on_rq = 0;
p->rt.on_list = 0;
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
#ifdef CONFIG_COMPACTION
p->capture_control = NULL;
#endif
init_numa_balancing(clone_flags, p);
#ifdef CONFIG_SMP
p->wake_entry.u_flags = CSD_TYPE_TTWU;
p->migration_pending = NULL;
#endif
init_sched_mm_cid(p);
}
DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
#ifdef CONFIG_NUMA_BALANCING
int sysctl_numa_balancing_mode;
static void __set_numabalancing_state(bool enabled)
{
if (enabled)
static_branch_enable(&sched_numa_balancing);
else
static_branch_disable(&sched_numa_balancing);
}
void set_numabalancing_state(bool enabled)
{
if (enabled)
sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
else
sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
__set_numabalancing_state(enabled);
}
#ifdef CONFIG_PROC_SYSCTL
static void reset_memory_tiering(void)
{
struct pglist_data *pgdat;
for_each_online_pgdat(pgdat) {
pgdat->nbp_threshold = 0;
pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
}
}
static int sysctl_numa_balancing(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int err;
int state = sysctl_numa_balancing_mode;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &state;
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write) {
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
(state & NUMA_BALANCING_MEMORY_TIERING))
reset_memory_tiering();
sysctl_numa_balancing_mode = state;
__set_numabalancing_state(state);
}
return err;
}
#endif
#endif
#ifdef CONFIG_SCHEDSTATS
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
static void set_schedstats(bool enabled)
{
if (enabled)
static_branch_enable(&sched_schedstats);
else
static_branch_disable(&sched_schedstats);
}
void force_schedstat_enabled(void)
{
if (!schedstat_enabled()) {
pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
static_branch_enable(&sched_schedstats);
}
}
static int __init setup_schedstats(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "enable")) {
set_schedstats(true);
ret = 1;
} else if (!strcmp(str, "disable")) {
set_schedstats(false);
ret = 1;
}
out:
if (!ret)
pr_warn("Unable to parse schedstats=\n");
return ret;
}
__setup("schedstats=", setup_schedstats);
#ifdef CONFIG_PROC_SYSCTL
static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int err;
int state = static_branch_likely(&sched_schedstats);
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &state;
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write)
set_schedstats(state);
return err;
}
#endif /* CONFIG_PROC_SYSCTL */
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_core_sysctls[] = {
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_schedstats,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_UCLAMP_TASK
{
.procname = "sched_util_clamp_min",
.data = &sysctl_sched_uclamp_util_min,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_sched_uclamp_handler,
},
{
.procname = "sched_util_clamp_max",
.data = &sysctl_sched_uclamp_util_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_sched_uclamp_handler,
},
{
.procname = "sched_util_clamp_min_rt_default",
.data = &sysctl_sched_uclamp_util_min_rt_default,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_sched_uclamp_handler,
},
#endif /* CONFIG_UCLAMP_TASK */
#ifdef CONFIG_NUMA_BALANCING
{
.procname = "numa_balancing",
.data = NULL, /* filled in by handler */
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_numa_balancing,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_FOUR,
},
#endif /* CONFIG_NUMA_BALANCING */
{}
};
static int __init sched_core_sysctl_init(void)
{
register_sysctl_init("kernel", sched_core_sysctls);
return 0;
}
late_initcall(sched_core_sysctl_init);
#endif /* CONFIG_SYSCTL */
/*
* fork()/clone()-time setup:
*/
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
__sched_fork(clone_flags, p);
/*
* We mark the process as NEW here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->__state = TASK_NEW;
/*
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
uclamp_fork(p);
/*
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->policy = SCHED_NORMAL;
p->static_prio = NICE_TO_PRIO(0);
p->rt_priority = 0;
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
if (dl_prio(p->prio))
return -EAGAIN;
else if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
init_entity_runnable_average(&p->se);
#ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
return 0;
}
void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
/*
* Because we're not yet on the pid-hash, p->pi_lock isn't strictly
* required yet, but lockdep gets upset if rules are violated.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
if (1) {
struct task_group *tg;
tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
struct task_group, css);
tg = autogroup_task_group(p, tg);
p->sched_task_group = tg;
}
#endif
rseq_migrate(p);
/*
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
*/
__set_task_cpu(p, smp_processor_id());
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p);
}
unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return BW_UNIT;
/*
* Doing this here saves a lot of checks in all
* the calling paths, and returning zero seems
* safe for them anyway.
*/
if (period == 0)
return 0;
return div64_u64(runtime << BW_SHIFT, period);
}
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void wake_up_new_task(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
WRITE_ONCE(p->__state, TASK_RUNNING);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - cpus_ptr can change in the fork path
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
* as we're not fully set-up yet.
*/
p->recent_used_cpu = task_cpu(p);
rseq_migrate(p);
__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
#endif
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
post_init_entity_util_avg(p);
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
/*
* Nothing relies on rq->lock after this, so it's fine to
* drop it.
*/
rq_unpin_lock(rq, &rf);
p->sched_class->task_woken(rq, p);
rq_repin_lock(rq, &rf);
}
#endif
task_rq_unlock(rq, p, &rf);
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
void preempt_notifier_inc(void)
{
static_branch_inc(&preempt_notifier_key);
}
EXPORT_SYMBOL_GPL(preempt_notifier_inc);
void preempt_notifier_dec(void)
{
static_branch_dec(&preempt_notifier_key);
}
EXPORT_SYMBOL_GPL(preempt_notifier_dec);
/**
* preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
{
if (!static_branch_unlikely(&preempt_notifier_key))
WARN(1, "registering preempt_notifier while notifiers disabled\n");
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);
/**
* preempt_notifier_unregister - no longer interested in preemption notifications
* @notifier: notifier struct to unregister
*
* This is *not* safe to call from within a preemption notifier.
*/
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
if (static_branch_unlikely(&preempt_notifier_key))
__fire_sched_in_preempt_notifiers(curr);
}
static void
__fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
if (static_branch_unlikely(&preempt_notifier_key))
__fire_sched_out_preempt_notifiers(curr, next);
}
#else /* !CONFIG_PREEMPT_NOTIFIERS */
static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}
static inline void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
}
#endif /* CONFIG_PREEMPT_NOTIFIERS */
static inline void prepare_task(struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* Claim the task as running, we do this before switching to it
* such that any running task will have this set.
*
* See the smp_load_acquire(&p->on_cpu) case in ttwu() and
* its ordering comment.
*/
WRITE_ONCE(next->on_cpu, 1);
#endif
}
static inline void finish_task(struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* This must be the very last reference to @prev from this CPU. After
* p->on_cpu is cleared, the task can be moved to a different CPU. We
* must ensure this doesn't happen until the switch is completely
* finished.
*
* In particular, the load of prev->state in finish_task_switch() must
* happen before this.
*
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
*/
smp_store_release(&prev->on_cpu, 0);
#endif
}
#ifdef CONFIG_SMP
static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
{
void (*func)(struct rq *rq);
struct balance_callback *next;
lockdep_assert_rq_held(rq);
while (head) {
func = (void (*)(struct rq *))head->func;
next = head->next;
head->next = NULL;
head = next;
func(rq);
}
}
static void balance_push(struct rq *rq);
/*
* balance_push_callback is a right abuse of the callback interface and plays
* by significantly different rules.
*
* Where the normal balance_callback's purpose is to be ran in the same context
* that queued it (only later, when it's safe to drop rq->lock again),
* balance_push_callback is specifically targeted at __schedule().
*
* This abuse is tolerated because it places all the unlikely/odd cases behind
* a single test, namely: rq->balance_callback == NULL.
*/
struct balance_callback balance_push_callback = {
.next = NULL,
.func = balance_push,
};
static inline struct balance_callback *
__splice_balance_callbacks(struct rq *rq, bool split)
{
struct balance_callback *head = rq->balance_callback;
if (likely(!head))
return NULL;
lockdep_assert_rq_held(rq);
/*
* Must not take balance_push_callback off the list when
* splice_balance_callbacks() and balance_callbacks() are not
* in the same rq->lock section.
*
* In that case it would be possible for __schedule() to interleave
* and observe the list empty.
*/
if (split && head == &balance_push_callback)
head = NULL;
else
rq->balance_callback = NULL;
return head;
}
static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
{
return __splice_balance_callbacks(rq, true);
}
static void __balance_callbacks(struct rq *rq)
{
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
}
static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
{
unsigned long flags;
if (unlikely(head)) {
raw_spin_rq_lock_irqsave(rq, flags);
do_balance_callbacks(rq, head);
raw_spin_rq_unlock_irqrestore(rq, flags);
}
}
#else
static inline void __balance_callbacks(struct rq *rq)
{
}
static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
{
return NULL;
}
static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
{
}
#endif
static inline void
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
rq_unpin_lock(rq, rf);
spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq_lockp(rq)->owner = next;
#endif
}
static inline void finish_lock_switch(struct rq *rq)
{
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
__balance_callbacks(rq);
raw_spin_rq_unlock_irq(rq);
}
/*
* NOP if the arch has not defined these:
*/
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
#ifndef finish_arch_post_lock_switch
# define finish_arch_post_lock_switch() do { } while (0)
#endif
static inline void kmap_local_sched_out(void)
{
#ifdef CONFIG_KMAP_LOCAL
if (unlikely(current->kmap_ctrl.idx))
__kmap_local_sched_out();
#endif
}
static inline void kmap_local_sched_in(void)
{
#ifdef CONFIG_KMAP_LOCAL
if (unlikely(current->kmap_ctrl.idx))
__kmap_local_sched_in();
#endif
}
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
* @prev: the current task that is being switched out
* @next: the task we are going to switch to.
*
* This is called with the rq lock held and interrupts off. It must
* be paired with a subsequent finish_task_switch after the context
* switch.
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
fire_sched_out_preempt_notifiers(prev, next);
kmap_local_sched_out();
prepare_task(next);
prepare_arch_switch(next);
}
/**
* finish_task_switch - clean up after a task-switch
* @prev: the thread we just switched away from.
*
* finish_task_switch must be called after the context switch, paired
* with a prepare_task_switch call before the context switch.
* finish_task_switch will reconcile locking set up by prepare_task_switch,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*
* The context switch have flipped the stack from under us and restored the
* local variables which were saved when this task called schedule() in the
* past. prev == current is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
static struct rq *finish_task_switch(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
struct mm_struct *mm = rq->prev_mm;
unsigned int prev_state;
/*
* The previous task will have left us with a preempt_count of 2
* because it left us after:
*
* schedule()
* preempt_disable(); // 1
* __schedule()
* raw_spin_lock_irq(&rq->lock) // 2
*
* Also, see FORK_PREEMPT_COUNT.
*/
if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
"corrupted preempt_count: %s/%d/0x%x\n",
current->comm, current->pid, preempt_count()))
preempt_count_set(FORK_PREEMPT_COUNT);
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
* finish_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
prev_state = READ_ONCE(prev->__state);
vtime_task_switch(prev);
perf_event_task_sched_in(prev, current);
finish_task(prev);
tick_nohz_task_switch();
finish_lock_switch(rq);
finish_arch_post_lock_switch();
kcov_finish_switch(current);
/*
* kmap_local_sched_out() is invoked with rq::lock held and
* interrupts disabled. There is no requirement for that, but the
* sched out code does not have an interrupt enabled section.
* Restoring the maps on sched in does not require interrupts being
* disabled either.
*/
kmap_local_sched_in();
fire_sched_in_preempt_notifiers(current);
/*
* When switching through a kernel thread, the loop in
* membarrier_{private,global}_expedited() may have observed that
* kernel thread and not issued an IPI. It is therefore possible to
* schedule between user->kernel->user threads without passing though
* switch_mm(). Membarrier requires a barrier after storing to
* rq->curr, before returning to userspace, so provide them here:
*
* - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
* provided by mmdrop_lazy_tlb(),
* - a sync_core for SYNC_CORE.
*/
if (mm) {
membarrier_mm_sync_core_before_usermode(mm);
mmdrop_lazy_tlb_sched(mm);
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
/* Task is done with its stack. */
put_task_stack(prev);
put_task_struct_rcu_user(prev);
}
return rq;
}
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
* finish_task_switch() for details.
*
* finish_task_switch() will drop rq->lock() and lower preempt_count
* and the preempt_enable() will end up enabling preemption (on
* PREEMPT_COUNT kernels).
*/
finish_task_switch(prev);
preempt_enable();
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
calculate_sigpending();
}
/*
* context_switch - switch to the new MM and the new thread's register state.
*/
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next, struct rq_flags *rf)
{
prepare_task_switch(rq, prev, next);
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
/*
* kernel -> kernel lazy + transfer active
* user -> kernel lazy + mmgrab_lazy_tlb() active
*
* kernel -> user switch + mmdrop_lazy_tlb() active
* user -> user switch
*
* switch_mm_cid() needs to be updated if the barriers provided
* by context_switch() are modified.
*/
if (!next->mm) { // to kernel
enter_lazy_tlb(prev->active_mm, next);
next->active_mm = prev->active_mm;
if (prev->mm) // from user
mmgrab_lazy_tlb(prev->active_mm);
else
prev->active_mm = NULL;
} else { // to user
membarrier_switch_mm(rq, prev->active_mm, next->mm);
/*
* sys_membarrier() requires an smp_mb() between setting
* rq->curr / membarrier_switch_mm() and returning to userspace.
*
* The below provides this either through switch_mm(), or in
* case 'prev->active_mm == next->mm' through
* finish_task_switch()'s mmdrop().
*/
switch_mm_irqs_off(prev->active_mm, next->mm, next);
lru_gen_use_mm(next->mm);
if (!prev->mm) { // from kernel
/* will mmdrop_lazy_tlb() in finish_task_switch(). */
rq->prev_mm = prev->active_mm;
prev->active_mm = NULL;
}
}
/* switch_mm_cid() requires the memory barriers above. */
switch_mm_cid(rq, prev, next);
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
return finish_task_switch(prev);
}
/*
* nr_running and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, total number of context switches performed since bootup.
*/
unsigned int nr_running(void)
{
unsigned int i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
return sum;
}
/*
* Check if only the current task is running on the CPU.
*
* Caution: this function does not check that the caller has disabled
* preemption, thus the result might have a time-of-check-to-time-of-use
* race. The caller is responsible to use it correctly, for example:
*
* - from a non-preemptible section (of course)
*
* - from a thread that is bound to a single CPU
*
* - in a loop with very short iterations (e.g. a polling loop)
*/
bool single_task_running(void)
{
return raw_rq()->nr_running == 1;
}
EXPORT_SYMBOL(single_task_running);
unsigned long long nr_context_switches_cpu(int cpu)
{
return cpu_rq(cpu)->nr_switches;
}
unsigned long long nr_context_switches(void)
{
int i;
unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
}
/*
* Consumers of these two interfaces, like for example the cpuidle menu
* governor, are using nonsensical data. Preferring shallow idle state selection
* for a CPU that has IO-wait which might not even end up running the task when
* it does become runnable.
*/
unsigned int nr_iowait_cpu(int cpu)
{
return atomic_read(&cpu_rq(cpu)->nr_iowait);
}
/*
* IO-wait accounting, and how it's mostly bollocks (on SMP).
*
* The idea behind IO-wait account is to account the idle time that we could
* have spend running if it were not for IO. That is, if we were to improve the
* storage performance, we'd have a proportional reduction in IO-wait time.
*
* This all works nicely on UP, where, when a task blocks on IO, we account
* idle time as IO-wait, because if the storage were faster, it could've been
* running and we'd not be idle.
*
* This has been extended to SMP, by doing the same for each CPU. This however
* is broken.
*
* Imagine for instance the case where two tasks block on one CPU, only the one
* CPU will have IO-wait accounted, while the other has regular idle. Even
* though, if the storage were faster, both could've ran at the same time,
* utilising both CPUs.
*
* This means, that when looking globally, the current IO-wait accounting on
* SMP is a lower bound, by reason of under accounting.
*
* Worse, since the numbers are provided per CPU, they are sometimes
* interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
* associated with any one particular CPU, it can wake to another CPU than it
* blocked on. This means the per CPU IO-wait number is meaningless.
*
* Task CPU affinities can make all that even more 'interesting'.
*/
unsigned int nr_iowait(void)
{
unsigned int i, sum = 0;
for_each_possible_cpu(i)
sum += nr_iowait_cpu(i);
return sum;
}
#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
struct task_struct *p = current;
struct migration_arg arg;
int dest_cpu;
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
if (dest_cpu == smp_processor_id())
return;
if (unlikely(!cpu_active(dest_cpu)))
return;
arg = (struct migration_arg){ p, dest_cpu };
}
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
}
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
/*
* The function fair_sched_class.update_curr accesses the struct curr
* and its field curr->exec_start; when called from task_sched_runtime(),
* we observe a high rate of cache misses in practice.
* Prefetching this data results in improved performance.
*/
static inline void prefetch_curr_exec_start(struct task_struct *p)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity *curr = (&p->se)->cfs_rq->curr;
#else
struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
#endif
prefetch(curr);
prefetch(&curr->exec_start);
}
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* pending runtime that have not been accounted yet.
*/
unsigned long long task_sched_runtime(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
u64 ns;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/*
* 64-bit doesn't need locks to atomically read a 64-bit value.
* So we have a optimization chance when the task's delta_exec is 0.
* Reading ->on_cpu is racy, but this is ok.
*
* If we race with it leaving CPU, we'll take a lock. So we're correct.
* If we race with it entering CPU, unaccounted time is 0. This is
* indistinguishable from the read occurring a few cycles earlier.
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
* been accounted, so we're correct here as well.
*/
if (!p->on_cpu || !task_on_rq_queued(p))
return p->se.sum_exec_runtime;
#endif
rq = task_rq_lock(p, &rf);
/*
* Must be ->curr _and_ ->on_rq. If dequeued, we would
* project cycles that may never be accounted to this
* thread, breaking clock_gettime().
*/
if (task_current(rq, p) && task_on_rq_queued(p)) {
prefetch_curr_exec_start(p);
update_rq_clock(rq);
p->sched_class->update_curr(rq);
}
ns = p->se.sum_exec_runtime;
task_rq_unlock(rq, p, &rf);
return ns;
}
#ifdef CONFIG_SCHED_DEBUG
static u64 cpu_resched_latency(struct rq *rq)
{
int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
u64 resched_latency, now = rq_clock(rq);
static bool warned_once;
if (sysctl_resched_latency_warn_once && warned_once)
return 0;
if (!need_resched() || !latency_warn_ms)
return 0;
if (system_state == SYSTEM_BOOTING)
return 0;
if (!rq->last_seen_need_resched_ns) {
rq->last_seen_need_resched_ns = now;
rq->ticks_without_resched = 0;
return 0;
}
rq->ticks_without_resched++;
resched_latency = now - rq->last_seen_need_resched_ns;
if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
return 0;
warned_once = true;
return resched_latency;
}
static int __init setup_resched_latency_warn_ms(char *str)
{
long val;
if ((kstrtol(str, 0, &val))) {
pr_warn("Unable to set resched_latency_warn_ms\n");
return 1;
}
sysctl_resched_latency_warn_ms = val;
return 1;
}
__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
#else
static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
#endif /* CONFIG_SCHED_DEBUG */
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*/
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
struct rq_flags rf;
unsigned long thermal_pressure;
u64 resched_latency;
if (housekeeping_cpu(cpu, HK_TYPE_TICK))
arch_scale_freq_tick();
sched_clock_tick();
rq_lock(rq, &rf);
update_rq_clock(rq);
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
curr->sched_class->task_tick(rq, curr, 0);
if (sched_feat(LATENCY_WARN))
resched_latency = cpu_resched_latency(rq);
calc_global_load_tick(rq);
sched_core_tick(rq);
task_tick_mm_cid(rq, curr);
rq_unlock(rq, &rf);
if (sched_feat(LATENCY_WARN) && resched_latency)
resched_latency_warn(cpu, resched_latency);
perf_event_task_tick();
if (curr->flags & PF_WQ_WORKER)
wq_worker_tick(curr);
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq);
#endif
}
#ifdef CONFIG_NO_HZ_FULL
struct tick_work {
int cpu;
atomic_t state;
struct delayed_work work;
};
/* Values for ->state, see diagram below. */
#define TICK_SCHED_REMOTE_OFFLINE 0
#define TICK_SCHED_REMOTE_OFFLINING 1
#define TICK_SCHED_REMOTE_RUNNING 2
/*
* State diagram for ->state:
*
*
* TICK_SCHED_REMOTE_OFFLINE
* | ^
* | |
* | | sched_tick_remote()
* | |
* | |
* +--TICK_SCHED_REMOTE_OFFLINING
* | ^
* | |
* sched_tick_start() | | sched_tick_stop()
* | |
* V |
* TICK_SCHED_REMOTE_RUNNING
*
*
* Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
* and sched_tick_start() are happy to leave the state in RUNNING.
*/
static struct tick_work __percpu *tick_work_cpu;
static void sched_tick_remote(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tick_work *twork = container_of(dwork, struct tick_work, work);
int cpu = twork->cpu;
struct rq *rq = cpu_rq(cpu);
int os;
/*
* Handle the tick only if it appears the remote CPU is running in full
* dynticks mode. The check is racy by nature, but missing a tick or
* having one too much is no big deal because the scheduler tick updates
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
if (tick_nohz_tick_stopped_cpu(cpu)) {
guard(rq_lock_irq)(rq);
struct task_struct *curr = rq->curr;
if (cpu_online(cpu)) {
update_rq_clock(rq);
if (!is_idle_task(curr)) {
/*
* Make sure the next tick runs within a
* reasonable amount of time.
*/
u64 delta = rq_clock_task(rq) - curr->se.exec_start;
WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
}
curr->sched_class->task_tick(rq, curr, 0);
calc_load_nohz_remote(rq);
}
}
/*
* Run the remote tick once per second (1Hz). This arbitrary
* frequency is large enough to avoid overload but short enough
* to keep scheduler internal stats reasonably up to date. But
* first update state to reflect hotplug activity if required.
*/
os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
if (os == TICK_SCHED_REMOTE_RUNNING)
queue_delayed_work(system_unbound_wq, dwork, HZ);
}
static void sched_tick_start(int cpu)
{
int os;
struct tick_work *twork;
if (housekeeping_cpu(cpu, HK_TYPE_TICK))
return;
WARN_ON_ONCE(!tick_work_cpu);
twork = per_cpu_ptr(tick_work_cpu, cpu);
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
if (os == TICK_SCHED_REMOTE_OFFLINE) {
twork->cpu = cpu;
INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
queue_delayed_work(system_unbound_wq, &twork->work, HZ);
}
}
#ifdef CONFIG_HOTPLUG_CPU
static void sched_tick_stop(int cpu)
{
struct tick_work *twork;
int os;
if (housekeeping_cpu(cpu, HK_TYPE_TICK))
return;
WARN_ON_ONCE(!tick_work_cpu);
twork = per_cpu_ptr(tick_work_cpu, cpu);
/* There cannot be competing actions, but don't rely on stop-machine. */
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
/* Don't cancel, as this would mess up the state machine. */
}
#endif /* CONFIG_HOTPLUG_CPU */
int __init sched_tick_offload_init(void)
{
tick_work_cpu = alloc_percpu(struct tick_work);
BUG_ON(!tick_work_cpu);
return 0;
}
#else /* !CONFIG_NO_HZ_FULL */
static inline void sched_tick_start(int cpu) { }
static inline void sched_tick_stop(int cpu) { }
#endif
#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_TRACE_PREEMPT_TOGGLE))
/*
* If the value passed in is equal to the current preempt count
* then we just disabled preemption. Start timing the latency.
*/
static inline void preempt_latency_start(int val)
{
if (preempt_count() == val) {
unsigned long ip = get_lock_parent_ip();
#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = ip;
#endif
trace_preempt_off(CALLER_ADDR0, ip);
}
}
void preempt_count_add(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
__preempt_count_add(val);
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
preempt_latency_start(val);
}
EXPORT_SYMBOL(preempt_count_add);
NOKPROBE_SYMBOL(preempt_count_add);
/*
* If the value passed in equals to the current preempt count
* then we just enabled preemption. Stop timing the latency.
*/
static inline void preempt_latency_stop(int val)
{
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
}
void preempt_count_sub(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
preempt_latency_stop(val);
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
NOKPROBE_SYMBOL(preempt_count_sub);
#else
static inline void preempt_latency_start(int val) { }
static inline void preempt_latency_stop(int val) { }
#endif
static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
{
#ifdef CONFIG_DEBUG_PREEMPT
return p->preempt_disable_ip;
#else
return 0;
#endif
}
/*
* Print scheduling while atomic bug:
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
/* Save this before calling printk(), since that will clobber it */
unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
if (oops_in_progress)
return;
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
debug_show_held_locks(prev);
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
&& in_atomic_preempt_off()) {
pr_err("Preemption disabled at:");
print_ip_sym(KERN_ERR, preempt_disable_ip);
}
check_panic_on_warn("scheduling while atomic");
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev, bool preempt)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
if (task_stack_end_corrupted(prev))
panic("corrupted stack end detected inside scheduler\n");
if (task_scs_end_corrupted(prev))
panic("corrupted shadow stack detected inside scheduler\n");
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
prev->comm, prev->pid, prev->non_block_count);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
#endif
if (unlikely(in_atomic_preempt_off())) {
__schedule_bug(prev);
preempt_count_set(PREEMPT_DISABLED);
}
rcu_sleep_check();
SCHED_WARN_ON(ct_state() == CONTEXT_USER);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq()->sched_count);
}
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
struct rq_flags *rf)
{
#ifdef CONFIG_SMP
const struct sched_class *class;
/*
* We must do the balancing pass before put_prev_task(), such
* that when we release the rq->lock the task is in the same
* state as before we took rq->lock.
*
* We can terminate the balance pass as soon as we know there is
* a runnable task of @class priority or higher.
*/
for_class_range(class, prev->sched_class, &idle_sched_class) {
if (class->balance(rq, prev, rf))
break;
}
#endif
put_prev_task(rq, prev);
}
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
const struct sched_class *class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
* higher scheduling class, because otherwise those lose the
* opportunity to pull in more work from other CPUs.
*/
if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
rq->nr_running == rq->cfs.h_nr_running)) {
p = pick_next_task_fair(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
goto restart;
/* Assume the next prioritized class is idle_sched_class */
if (!p) {
put_prev_task(rq, prev);
p = pick_next_task_idle(rq);
}
return p;
}
restart:
put_prev_task_balance(rq, prev, rf);
for_each_class(class) {
p = class->pick_next_task(rq);
if (p)
return p;
}
BUG(); /* The idle class should always have a runnable task. */
}
#ifdef CONFIG_SCHED_CORE
static inline bool is_task_rq_idle(struct task_struct *t)
{
return (task_rq(t)->idle == t);
}
static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
{
return is_task_rq_idle(a) || (a->core_cookie == cookie);
}
static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
{
if (is_task_rq_idle(a) || is_task_rq_idle(b))
return true;
return a->core_cookie == b->core_cookie;
}
static inline struct task_struct *pick_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
for_each_class(class) {
p = class->pick_task(rq);
if (p)
return p;
}
BUG(); /* The idle class should always have a runnable task. */
}
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
static void queue_core_balance(struct rq *rq);
static struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct task_struct *next, *p, *max = NULL;
const struct cpumask *smt_mask;
bool fi_before = false;
bool core_clock_updated = (rq == rq->core);
unsigned long cookie;
int i, cpu, occ = 0;
struct rq *rq_i;
bool need_sync;
if (!sched_core_enabled(rq))
return __pick_next_task(rq, prev, rf);
cpu = cpu_of(rq);
/* Stopper task is switching into idle, no need core-wide selection. */
if (cpu_is_offline(cpu)) {
/*
* Reset core_pick so that we don't enter the fastpath when
* coming online. core_pick would already be migrated to
* another cpu during offline.
*/
rq->core_pick = NULL;
return __pick_next_task(rq, prev, rf);
}
/*
* If there were no {en,de}queues since we picked (IOW, the task
* pointers are all still valid), and we haven't scheduled the last
* pick yet, do so now.
*
* rq->core_pick can be NULL if no selection was made for a CPU because
* it was either offline or went offline during a sibling's core-wide
* selection. In this case, do a core-wide selection.
*/
if (rq->core->core_pick_seq == rq->core->core_task_seq &&
rq->core->core_pick_seq != rq->core_sched_seq &&
rq->core_pick) {
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
next = rq->core_pick;
if (next != prev) {
put_prev_task(rq, prev);
set_next_task(rq, next);
}
rq->core_pick = NULL;
goto out;
}
put_prev_task_balance(rq, prev, rf);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
/* reset state */
rq->core->core_cookie = 0UL;
if (rq->core->core_forceidle_count) {
if (!core_clock_updated) {
update_rq_clock(rq->core);
core_clock_updated = true;
}
sched_core_account_forceidle(rq);
/* reset after accounting force idle */
rq->core->core_forceidle_start = 0;
rq->core->core_forceidle_count = 0;
rq->core->core_forceidle_occupation = 0;
need_sync = true;
fi_before = true;
}
/*
* core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
*
* @task_seq guards the task state ({en,de}queues)
* @pick_seq is the @task_seq we did a selection on
* @sched_seq is the @pick_seq we scheduled
*
* However, preemptions can cause multiple picks on the same task set.
* 'Fix' this by also increasing @task_seq for every pick.
*/
rq->core->core_task_seq++;
/*
* Optimize for common case where this CPU has no cookies
* and there are no cookied tasks running on siblings.
*/
if (!need_sync) {
next = pick_task(rq);
if (!next->core_cookie) {
rq->core_pick = NULL;
/*
* For robustness, update the min_vruntime_fi for
* unconstrained picks as well.
*/
WARN_ON_ONCE(fi_before);
task_vruntime_update(rq, next, false);
goto out_set_next;
}
}
/*
* For each thread: do the regular task pick and find the max prio task
* amongst them.
*
* Tie-break prio towards the current CPU
*/
for_each_cpu_wrap(i, smt_mask, cpu) {
rq_i = cpu_rq(i);
/*
* Current cpu always has its clock updated on entrance to
* pick_next_task(). If the current cpu is not the core,
* the core may also have been updated above.
*/
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
update_rq_clock(rq_i);
p = rq_i->core_pick = pick_task(rq_i);
if (!max || prio_less(max, p, fi_before))
max = p;
}
cookie = rq->core->core_cookie = max->core_cookie;
/*
* For each thread: try and find a runnable task that matches @max or
* force idle.
*/
for_each_cpu(i, smt_mask) {
rq_i = cpu_rq(i);
p = rq_i->core_pick;
if (!cookie_equals(p, cookie)) {
p = NULL;
if (cookie)
p = sched_core_find(rq_i, cookie);
if (!p)
p = idle_sched_class.pick_task(rq_i);
}
rq_i->core_pick = p;
if (p == rq_i->idle) {
if (rq_i->nr_running) {
rq->core->core_forceidle_count++;
if (!fi_before)
rq->core->core_forceidle_seq++;
}
} else {
occ++;
}
}
if (schedstat_enabled() && rq->core->core_forceidle_count) {
rq->core->core_forceidle_start = rq_clock(rq->core);
rq->core->core_forceidle_occupation = occ;
}
rq->core->core_pick_seq = rq->core->core_task_seq;
next = rq->core_pick;
rq->core_sched_seq = rq->core->core_pick_seq;
/* Something should have been selected for current CPU */
WARN_ON_ONCE(!next);
/*
* Reschedule siblings
*
* NOTE: L1TF -- at this point we're no longer running the old task and
* sending an IPI (below) ensures the sibling will no longer be running
* their task. This ensures there is no inter-sibling overlap between
* non-matching user state.
*/
for_each_cpu(i, smt_mask) {
rq_i = cpu_rq(i);
/*
* An online sibling might have gone offline before a task
* could be picked for it, or it might be offline but later
* happen to come online, but its too late and nothing was
* picked for it. That's Ok - it will pick tasks for itself,
* so ignore it.
*/
if (!rq_i->core_pick)
continue;
/*
* Update for new !FI->FI transitions, or if continuing to be in !FI:
* fi_before fi update?
* 0 0 1
* 0 1 1
* 1 0 1
* 1 1 0
*/
if (!(fi_before && rq->core->core_forceidle_count))
task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
rq_i->core_pick->core_occupation = occ;
if (i == cpu) {
rq_i->core_pick = NULL;
continue;
}
/* Did we break L1TF mitigation requirements? */
WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
if (rq_i->curr == rq_i->core_pick) {
rq_i->core_pick = NULL;
continue;
}
resched_curr(rq_i);
}
out_set_next:
set_next_task(rq, next);
out:
if (rq->core->core_forceidle_count && next == rq->idle)
queue_core_balance(rq);
return next;
}
static bool try_steal_cookie(int this, int that)
{
struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
struct task_struct *p;
unsigned long cookie;
bool success = false;
guard(irq)();
guard(double_rq_lock)(dst, src);
cookie = dst->core->core_cookie;
if (!cookie)
return false;
if (dst->curr != dst->idle)
return false;
p = sched_core_find(src, cookie);
if (!p)
return false;
do {
if (p == src->core_pick || p == src->curr)
goto next;
if (!is_cpu_allowed(p, this))
goto next;
if (p->core_occupation > dst->idle->core_occupation)
goto next;
/*
* sched_core_find() and sched_core_next() will ensure
* that task @p is not throttled now, we also need to
* check whether the runqueue of the destination CPU is
* being throttled.
*/
if (sched_task_is_throttled(p, this))
goto next;
deactivate_task(src, p, 0);
set_task_cpu(p, this);
activate_task(dst, p, 0);
resched_curr(dst);
success = true;
break;
next:
p = sched_core_next(p, cookie);
} while (p);
return success;
}
static bool steal_cookie_task(int cpu, struct sched_domain *sd)
{
int i;
for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
if (i == cpu)
continue;
if (need_resched())
break;
if (try_steal_cookie(cpu, i))
return true;
}
return false;
}
static void sched_core_balance(struct rq *rq)
{
struct sched_domain *sd;
int cpu = cpu_of(rq);
preempt_disable();
rcu_read_lock();
raw_spin_rq_unlock_irq(rq);
for_each_domain(cpu, sd) {
if (need_resched())
break;
if (steal_cookie_task(cpu, sd))
break;
}
raw_spin_rq_lock_irq(rq);
rcu_read_unlock();
preempt_enable();
}
static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
static void queue_core_balance(struct rq *rq)
{
if (!sched_core_enabled(rq))
return;
if (!rq->core->core_cookie)
return;
if (!rq->nr_running) /* not forced idle */
return;
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
}
DEFINE_LOCK_GUARD_1(core_lock, int,
sched_core_lock(*_T->lock, &_T->flags),
sched_core_unlock(*_T->lock, &_T->flags),
unsigned long flags)
static void sched_core_cpu_starting(unsigned int cpu)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
int t;
guard(core_lock)(&cpu);
WARN_ON_ONCE(rq->core != rq);
/* if we're the first, we'll be our own leader */
if (cpumask_weight(smt_mask) == 1)
return;
/* find the leader */
for_each_cpu(t, smt_mask) {
if (t == cpu)
continue;
rq = cpu_rq(t);
if (rq->core == rq) {
core_rq = rq;
break;
}
}
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
return;
/* install and validate core_rq */
for_each_cpu(t, smt_mask) {
rq = cpu_rq(t);
if (t == cpu)
rq->core = core_rq;
WARN_ON_ONCE(rq->core != core_rq);
}
}
static void sched_core_cpu_deactivate(unsigned int cpu)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
int t;
guard(core_lock)(&cpu);
/* if we're the last man standing, nothing to do */
if (cpumask_weight(smt_mask) == 1) {
WARN_ON_ONCE(rq->core != rq);
return;
}
/* if we're not the leader, nothing to do */
if (rq->core != rq)
return;
/* find a new leader */
for_each_cpu(t, smt_mask) {
if (t == cpu)
continue;
core_rq = cpu_rq(t);
break;
}
if (WARN_ON_ONCE(!core_rq)) /* impossible */
return;
/* copy the shared state to the new leader */
core_rq->core_task_seq = rq->core_task_seq;
core_rq->core_pick_seq = rq->core_pick_seq;
core_rq->core_cookie = rq->core_cookie;
core_rq->core_forceidle_count = rq->core_forceidle_count;
core_rq->core_forceidle_seq = rq->core_forceidle_seq;
core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
/*
* Accounting edge for forced idle is handled in pick_next_task().
* Don't need another one here, since the hotplug thread shouldn't
* have a cookie.
*/
core_rq->core_forceidle_start = 0;
/* install new leader */
for_each_cpu(t, smt_mask) {
rq = cpu_rq(t);
rq->core = core_rq;
}
}
static inline void sched_core_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->core != rq)
rq->core = rq;
}
#else /* !CONFIG_SCHED_CORE */
static inline void sched_core_cpu_starting(unsigned int cpu) {}
static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
static inline void sched_core_cpu_dying(unsigned int cpu) {}
static struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
return __pick_next_task(rq, prev, rf);
}
#endif /* CONFIG_SCHED_CORE */
/*
* Constants for the sched_mode argument of __schedule().
*
* The mode argument allows RT enabled kernels to differentiate a
* preemption from blocking on an 'sleeping' spin/rwlock. Note that
* SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
* optimize the AND operation out and just check for zero.
*/
#define SM_NONE 0x0
#define SM_PREEMPT 0x1
#define SM_RTLOCK_WAIT 0x2
#ifndef CONFIG_PREEMPT_RT
# define SM_MASK_PREEMPT (~0U)
#else
# define SM_MASK_PREEMPT SM_PREEMPT
#endif
/*
* __schedule() is the main scheduler function.
*
* The main means of driving the scheduler and thus entering this function are:
*
* 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
*
* 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
* paths. For example, see arch/x86/entry_64.S.
*
* To drive preemption between tasks, the scheduler sets the flag in timer
* interrupt handler scheduler_tick().
*
* 3. Wakeups don't really cause entry into schedule(). They add a
* task to the run-queue and that's it.
*
* Now, if the new task added to the run-queue preempts the current
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
* called on the nearest possible occasion:
*
* - If the kernel is preemptible (CONFIG_PREEMPTION=y):
*
* - in syscall or exception context, at the next outmost
* preempt_enable(). (this might be as soon as the wake_up()'s
* spin_unlock()!)
*
* - in IRQ context, return from interrupt-handler to
* preemptible context
*
* - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
* then at the next:
*
* - cond_resched() call
* - explicit schedule() call
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
*
* WARNING: must be called with preemption disabled!
*/
static void __sched notrace __schedule(unsigned int sched_mode)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
unsigned long prev_state;
struct rq_flags rf;
struct rq *rq;
int cpu;
cpu = smp_processor_id();
rq = cpu_rq(cpu);
prev = rq->curr;
schedule_debug(prev, !!sched_mode);
if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
hrtick_clear(rq);
local_irq_disable();
rcu_note_context_switch(!!sched_mode);
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up():
*
* __set_current_state(@state) signal_wake_up()
* schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
* wake_up_state(p, state)
* LOCK rq->lock LOCK p->pi_state
* smp_mb__after_spinlock() smp_mb__after_spinlock()
* if (signal_pending_state()) if (p->state & @state)
*
* Also, the membarrier system call requires a full memory barrier
* after coming from user-space, before storing to rq->curr.
*/
rq_lock(rq, &rf);
smp_mb__after_spinlock();
/* Promote REQ to ACT */
rq->clock_update_flags <<= 1;
update_rq_clock(rq);
switch_count = &prev->nivcsw;
/*
* We must load prev->state once (task_struct::state is volatile), such
* that we form a control dependency vs deactivate_task() below.
*/
prev_state = READ_ONCE(prev->__state);
if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
if (signal_pending_state(prev_state, prev)) {
WRITE_ONCE(prev->__state, TASK_RUNNING);
} else {
prev->sched_contributes_to_load =
(prev_state & TASK_UNINTERRUPTIBLE) &&
!(prev_state & TASK_NOLOAD) &&
!(prev_state & TASK_FROZEN);
if (prev->sched_contributes_to_load)
rq->nr_uninterruptible++;
/*
* __schedule() ttwu()
* prev_state = prev->state; if (p->on_rq && ...)
* if (prev_state) goto out;
* p->on_rq = 0; smp_acquire__after_ctrl_dep();
* p->state = TASK_WAKING
*
* Where __schedule() and ttwu() have matching control dependencies.
*
* After this, schedule() must not care about p->state any more.
*/
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
if (prev->in_iowait) {
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
}
switch_count = &prev->nvcsw;
}
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
#endif
if (likely(prev != next)) {
rq->nr_switches++;
/*
* RCU users of rcu_dereference(rq->curr) may not see
* changes to task_struct made by pick_next_task().
*/
RCU_INIT_POINTER(rq->curr, next);
/*
* The membarrier system call requires each architecture
* to have a full memory barrier after updating
* rq->curr, before returning to user-space.
*
* Here are the schemes providing that barrier on the
* various architectures:
* - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
* switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
* - finish_lock_switch() for weakly-ordered
* architectures where spin_unlock is a full barrier,
* - switch_to() for arm64 (weakly-ordered, spin_unlock
* is a RELEASE barrier),
*/
++*switch_count;
migrate_disable_switch(rq, prev);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
/* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf);
} else {
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
rq_unpin_lock(rq, &rf);
__balance_callbacks(rq);
raw_spin_rq_unlock_irq(rq);
}
}
void __noreturn do_task_dead(void)
{
/* Causes final put_task_struct in finish_task_switch(): */
set_special_state(TASK_DEAD);
/* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE;
__schedule(SM_NONE);
BUG();
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
for (;;)
cpu_relax();
}
static inline void sched_submit_work(struct task_struct *tsk)
{
unsigned int task_flags;
if (task_is_running(tsk))
return;
task_flags = tsk->flags;
/*
* If a worker goes to sleep, notify and ask workqueue whether it
* wants to wake up a task to maintain concurrency.
*/
if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
if (task_flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
else
io_wq_worker_sleeping(tsk);
}
/*
* spinlock and rwlock must not flush block requests. This will
* deadlock if the callback attempts to acquire a lock which is
* already acquired.
*/
SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
blk_flush_plug(tsk->plug, true);
}
static void sched_update_worker(struct task_struct *tsk)
{
if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
if (tsk->flags & PF_WQ_WORKER)
wq_worker_running(tsk);
else
io_wq_worker_running(tsk);
}
}
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
do {
preempt_disable();
__schedule(SM_NONE);
sched_preempt_enable_no_resched();
} while (need_resched());
sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
/*
* synchronize_rcu_tasks() makes sure that no task is stuck in preempted
* state (have scheduled out non-voluntarily) by making sure that all
* tasks have either left the run queue or have gone into user space.
* As idle tasks do not do either, they must not ever be preempted
* (schedule out non-voluntarily).
*
* schedule_idle() is similar to schedule_preempt_disable() except that it
* never enables preemption because it does not call sched_submit_work().
*/
void __sched schedule_idle(void)
{
/*
* As this skips calling sched_submit_work(), which the idle task does
* regardless because that function is a nop when the task is in a
* TASK_RUNNING state, make sure this isn't used someplace that the
* current task can be in any other state. Note, idle is always in the
* TASK_RUNNING state.
*/
WARN_ON_ONCE(current->__state);
do {
__schedule(SM_NONE);
} while (need_resched());
}
#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
asmlinkage __visible void __sched schedule_user(void)
{
/*
* If we come here after a random call to set_need_resched(),
* or we have been woken up remotely but the IPI has not yet arrived,
* we haven't yet exited the RCU idle mode. Do it here manually until
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
* should warn if prev_state != CONTEXT_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();
schedule();
exception_exit(prev_state);
}
#endif
/**
* schedule_preempt_disabled - called with preemption disabled
*
* Returns with preemption disabled. Note: preempt_count must be 1
*/
void __sched schedule_preempt_disabled(void)
{
sched_preempt_enable_no_resched();
schedule();
preempt_disable();
}
#ifdef CONFIG_PREEMPT_RT
void __sched notrace schedule_rtlock(void)
{
do {
preempt_disable();
__schedule(SM_RTLOCK_WAIT);
sched_preempt_enable_no_resched();
} while (need_resched());
}
NOKPROBE_SYMBOL(schedule_rtlock);
#endif
static void __sched notrace preempt_schedule_common(void)
{
do {
/*
* Because the function tracer can trace preempt_count_sub()
* and it also uses preempt_enable/disable_notrace(), if
* NEED_RESCHED is set, the preempt_enable_notrace() called
* by the function tracer will call this function again and
* cause infinite recursion.
*
* Preemption must be disabled here before the function
* tracer can trace. Break up preempt_disable() into two
* calls. One to disable preemption without fear of being
* traced. The other to still record the preemption latency,
* which can also be traced by the function tracer.
*/
preempt_disable_notrace();
preempt_latency_start(1);
__schedule(SM_PREEMPT);
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
} while (need_resched());
}
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
* off of preempt_enable.
*/
asmlinkage __visible void __sched notrace preempt_schedule(void)
{
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(!preemptible()))
return;
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_dynamic_enabled
#define preempt_schedule_dynamic_enabled preempt_schedule
#define preempt_schedule_dynamic_disabled NULL
#endif
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
void __sched notrace dynamic_preempt_schedule(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
return;
preempt_schedule();
}
NOKPROBE_SYMBOL(dynamic_preempt_schedule);
EXPORT_SYMBOL(dynamic_preempt_schedule);
#endif
#endif
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
* The tracing infrastructure uses preempt_enable_notrace to prevent
* recursion and tracing preempt enabling caused by the tracing
* infrastructure itself. But as tracing can happen in areas coming
* from userspace or just about to enter userspace, a preempt enable
* can occur before user_exit() is called. This will cause the scheduler
* to be called when the system is still in usermode.
*
* To prevent this, the preempt_enable_notrace will use this function
* instead of preempt_schedule() to exit user context if needed before
* calling the scheduler.
*/
asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
{
enum ctx_state prev_ctx;
if (likely(!preemptible()))
return;
do {
/*
* Because the function tracer can trace preempt_count_sub()
* and it also uses preempt_enable/disable_notrace(), if
* NEED_RESCHED is set, the preempt_enable_notrace() called
* by the function tracer will call this function again and
* cause infinite recursion.
*
* Preemption must be disabled here before the function
* tracer can trace. Break up preempt_disable() into two
* calls. One to disable preemption without fear of being
* traced. The other to still record the preemption latency,
* which can also be traced by the function tracer.
*/
preempt_disable_notrace();
preempt_latency_start(1);
/*
* Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing
* an infinite recursion.
*/
prev_ctx = exception_enter();
__schedule(SM_PREEMPT);
exception_exit(prev_ctx);
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
} while (need_resched());
}
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_notrace_dynamic_enabled
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
#define preempt_schedule_notrace_dynamic_disabled NULL
#endif
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
void __sched notrace dynamic_preempt_schedule_notrace(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
return;
preempt_schedule_notrace();
}
NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
#endif
#endif
#endif /* CONFIG_PREEMPTION */
/*
* This is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage __visible void __sched preempt_schedule_irq(void)
{
enum ctx_state prev_state;
/* Catch callers which need to be fixed */
BUG_ON(preempt_count() || !irqs_disabled());
prev_state = exception_enter();
do {
preempt_disable();
local_irq_enable();
__schedule(SM_PREEMPT);
local_irq_disable();
sched_preempt_enable_no_resched();
} while (need_resched());
exception_exit(prev_state);
}
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
void *key)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
static void __setscheduler_prio(struct task_struct *p, int prio)
{
if (dl_prio(prio))
p->sched_class = &dl_sched_class;
else if (rt_prio(prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
p->prio = prio;
}
#ifdef CONFIG_RT_MUTEXES
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
{
if (pi_task)
prio = min(prio, pi_task->prio);
return prio;
}
static inline int rt_effective_prio(struct task_struct *p, int prio)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
return __rt_effective_prio(pi_task, prio);
}
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task to boost
* @pi_task: donor task
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
* Used by the rt_mutex code to implement priority inheritance
* logic. Call site only calls if the priority of the task changed.
*/
void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
{
int prio, oldprio, queued, running, queue_flag =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
const struct sched_class *prev_class;
struct rq_flags rf;
struct rq *rq;
/* XXX used to be waiter->prio, not waiter->task->prio */
prio = __rt_effective_prio(pi_task, p->normal_prio);
/*
* If nothing changed; bail early.
*/
if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
return;
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
/*
* Set under pi_lock && rq->lock, such that the value can be used under
* either lock.
*
* Note that there is loads of tricky to make this pointer cache work
* right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
* ensure a task is de-boosted (pi_task is set to NULL) before the
* task is allowed to run again (and can exit). This ensures the pointer
* points to a blocked task -- which guarantees the task is present.
*/
p->pi_top_task = pi_task;
/*
* For FIFO/RR we only need to set prio, if that matches we're done.
*/
if (prio == p->prio && !dl_prio(prio))
goto out_unlock;
/*
* Idle task boosting is a nono in general. There is one
* exception, when PREEMPT_RT and NOHZ is active:
*
* The idle task calls get_next_timer_interrupt() and holds
* the timer wheel base->lock on the CPU and another CPU wants
* to access the timer (probably to cancel it). We can safely
* ignore the boosting request, as the idle CPU runs this code
* with interrupts disabled and will complete the lock
* protected section without being interrupted. So there is no
* real need to boost.
*/
if (unlikely(p == rq->idle)) {
WARN_ON(p != rq->curr);
WARN_ON(p->pi_blocked_on);
goto out_unlock;
}
trace_sched_pi_setprio(p, pi_task);
oldprio = p->prio;
if (oldprio == prio)
queue_flag &= ~DEQUEUE_MOVE;
prev_class = p->sched_class;
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued)
dequeue_task(rq, p, queue_flag);
if (running)
put_prev_task(rq, p);
/*
* Boosting condition are:
* 1. -rt task is running and holds mutex A
* --> -dl task blocks on mutex A
*
* 2. -dl task is running and holds mutex A
* --> -dl task blocks on mutex A and could preempt the
* running task
*/
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_prio(pi_task->prio) &&
dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.pi_se = pi_task->dl.pi_se;
queue_flag |= ENQUEUE_REPLENISH;
} else {
p->dl.pi_se = &p->dl;
}
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
} else {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (rt_prio(oldprio))
p->rt.timeout = 0;
}
__setscheduler_prio(p, prio);
if (queued)
enqueue_task(rq, p, queue_flag);
if (running)
set_next_task(rq, p);
check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
/* Avoid rq from going away on us: */
preempt_disable();
rq_unpin_lock(rq, &rf);
__balance_callbacks(rq);
raw_spin_rq_unlock(rq);
preempt_enable();
}
#else
static inline int rt_effective_prio(struct task_struct *p, int prio)
{
return prio;
}
#endif
void set_user_nice(struct task_struct *p, long nice)
{
bool queued, running;
int old_prio;
struct rq_flags rf;
struct rq *rq;
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it won't have any effect on scheduling until the task is
* SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued)
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
if (running)
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p, true);
old_prio = p->prio;
p->prio = effective_prio(p);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_next_task(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
p->sched_class->prio_changed(rq, p, old_prio);
out_unlock:
task_rq_unlock(rq, p, &rf);
}
EXPORT_SYMBOL(set_user_nice);
/*
* is_nice_reduction - check if nice value is an actual reduction
*
* Similar to can_nice() but does not perform a capability check.
*
* @p: task
* @nice: nice value
*/
static bool is_nice_reduction(const struct task_struct *p, const int nice)
{
/* Convert nice value [19,-20] to rlimit style value [1,40]: */
int nice_rlim = nice_to_rlimit(nice);
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
}
/*
* can_nice - check if a task can reduce its nice value
* @p: task
* @nice: nice value
*/
int can_nice(const struct task_struct *p, const int nice)
{
return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
}
#ifdef __ARCH_WANT_SYS_NICE
/*
* sys_nice - change the priority of the current process.
* @increment: priority increment
*
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
nice = task_nice(current) + increment;
nice = clamp_val(nice, MIN_NICE, MAX_NICE);
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
return retval;
set_user_nice(current, nice);
return 0;
}
#endif
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
* Return: The priority value as seen by users in /proc.
*
* sched policy return value kernel prio user prio/nice
*
* normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
* fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
* deadline -101 -1 0
*/
int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
/**
* idle_cpu - is a given CPU idle currently?
* @cpu: the processor in question.
*
* Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->curr != rq->idle)
return 0;
if (rq->nr_running)
return 0;
#ifdef CONFIG_SMP
if (rq->ttwu_pending)
return 0;
#endif
return 1;
}
/**
* available_idle_cpu - is a given CPU idle for enqueuing work.
* @cpu: the CPU in question.
*
* Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int available_idle_cpu(int cpu)
{
if (!idle_cpu(cpu))
return 0;
if (vcpu_is_preempted(cpu))
return 0;
return 1;
}
/**
* idle_task - return the idle task for a given CPU.
* @cpu: the processor in question.
*
* Return: The idle task for the CPU @cpu.
*/
struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
#ifdef CONFIG_SCHED_CORE
int sched_core_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (sched_core_enabled(rq) && rq->curr == rq->idle)
return 1;
return idle_cpu(cpu);
}
#endif
#ifdef CONFIG_SMP
/*
* This function computes an effective utilization for the given CPU, to be
* used for frequency selection given the linear relation: f = u * f_max.
*
* The scheduler tracks the following metrics:
*
* cpu_util_{cfs,rt,dl,irq}()
* cpu_bw_dl()
*
* Where the cfs,rt and dl util numbers are tracked with the same metric and
* synchronized windows and are thus directly comparable.
*
* The cfs,rt,dl utilization are the running times measured with rq->clock_task
* which excludes things like IRQ and steal-time. These latter are then accrued
* in the irq utilization.
*
* The DL bandwidth number otoh is not a measured metric but a value computed
* based on the task model parameters and gives the minimal utilization
* required to meet deadlines.
*/
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
enum cpu_util_type type,
struct task_struct *p)
{
unsigned long dl_util, util, irq, max;
struct rq *rq = cpu_rq(cpu);
max = arch_scale_cpu_capacity(cpu);
if (!uclamp_is_used() &&
type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
return max;
}
/*
* Early check to see if IRQ/steal time saturates the CPU, can be
* because of inaccuracies in how we track these -- see
* update_irq_load_avg().
*/
irq = cpu_util_irq(rq);
if (unlikely(irq >= max))
return max;
/*
* Because the time spend on RT/DL tasks is visible as 'lost' time to
* CFS tasks and we use the same metric to track the effective
* utilization (PELT windows are synchronized) we can directly add them
* to obtain the CPU's actual utilization.
*
* CFS and RT utilization can be boosted or capped, depending on
* utilization clamp constraints requested by currently RUNNABLE
* tasks.
* When there are no CFS RUNNABLE tasks, clamps are released and
* frequency will be gracefully reduced with the utilization decay.
*/
util = util_cfs + cpu_util_rt(rq);
if (type == FREQUENCY_UTIL)
util = uclamp_rq_util_with(rq, util, p);
dl_util = cpu_util_dl(rq);
/*
* For frequency selection we do not make cpu_util_dl() a permanent part
* of this sum because we want to use cpu_bw_dl() later on, but we need
* to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
* that we select f_max when there is no idle time.
*
* NOTE: numerical errors or stop class might cause us to not quite hit
* saturation when we should -- something for later.
*/
if (util + dl_util >= max)
return max;
/*
* OTOH, for energy computation we need the estimated running time, so
* include util_dl and ignore dl_bw.
*/
if (type == ENERGY_UTIL)
util += dl_util;
/*
* There is still idle time; further improve the number by using the
* irq metric. Because IRQ/steal time is hidden from the task clock we
* need to scale the task numbers:
*
* max - irq
* U' = irq + --------- * U
* max
*/
util = scale_irq_capacity(util, irq, max);
util += irq;
/*
* Bandwidth required by DEADLINE must always be granted while, for
* FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
* to gracefully reduce the frequency when no tasks show up for longer
* periods of time.
*
* Ideally we would like to set bw_dl as min/guaranteed freq and util +
* bw_dl as requested freq. However, cpufreq is not yet ready for such
* an interface. So, we only do the latter for now.
*/
if (type == FREQUENCY_UTIL)
util += cpu_bw_dl(rq);
return min(max, util);
}
unsigned long sched_cpu_util(int cpu)
{
return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
}
#endif /* CONFIG_SMP */
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*
* The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/*
* sched_setparam() passes in -1 for its policy, to let the functions
* it calls know not to change it.
*/
#define SETPARAM_POLICY -1
static void __setscheduler_params(struct task_struct *p,
const struct sched_attr *attr)
{
int policy = attr->sched_policy;
if (policy == SETPARAM_POLICY)
policy = p->policy;
p->policy = policy;
if (dl_policy(policy))
__setparam_dl(p, attr);
else if (fair_policy(policy))
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
/*
* __sched_setscheduler() ensures attr->sched_priority == 0 when
* !rt_policy. Always setting this ensures that things like
* getparam()/getattr() don't report silly values for !rt tasks.
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
set_load_weight(p, true);
}
/*
* Check the target process has a UID that matches the current process's:
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
match = (uid_eq(cred->euid, pcred->euid) ||
uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
/*
* Allow unprivileged RT tasks to decrease priority.
* Only issue a capable test if needed and only once to avoid an audit
* event on permitted non-privileged operations:
*/
static int user_check_sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
int policy, int reset_on_fork)
{
if (fair_policy(policy)) {
if (attr->sched_nice < task_nice(p) &&
!is_nice_reduction(p, attr->sched_nice))
goto req_priv;
}
if (rt_policy(policy)) {
unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
/* Can't set/change the rt policy: */
if (policy != p->policy && !rlim_rtprio)
goto req_priv;
/* Can't increase priority: */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
goto req_priv;
}
/*
* Can't set/change SCHED_DEADLINE policy at all for now
* (safest behavior); in the future we would like to allow
* unprivileged DL tasks to increase their relative deadline
* or reduce their runtime (both ways reducing utilization)
*/
if (dl_policy(policy))
goto req_priv;
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
if (task_has_idle_policy(p) && !idle_policy(policy)) {
if (!is_nice_reduction(p, task_nice(p)))
goto req_priv;
}
/* Can't change other user's priorities: */
if (!check_same_owner(p))
goto req_priv;
/* Normal users shall not reset the sched_reset_on_fork flag: */
if (p->sched_reset_on_fork && !reset_on_fork)
goto req_priv;
return 0;
req_priv:
if (!capable(CAP_SYS_NICE))
return -EPERM;
return 0;
}
static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user, bool pi)
{
int oldpolicy = -1, policy = attr->sched_policy;
int retval, oldprio, newprio, queued, running;
const struct sched_class *prev_class;
struct balance_callback *head;
struct rq_flags rf;
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq *rq;
bool cpuset_locked = false;
/* The pi code expects interrupts enabled */
BUG_ON(pi && in_interrupt());
recheck:
/* Double check policy once rq lock held: */
if (policy < 0) {
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
if (!valid_policy(policy))
return -EINVAL;
}
if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
return -EINVAL;
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
if (attr->sched_priority > MAX_RT_PRIO-1)
return -EINVAL;
if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
(rt_policy(policy) != (attr->sched_priority != 0)))
return -EINVAL;
if (user) {
retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
if (retval)
return retval;
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return -EINVAL;
retval = security_task_setscheduler(p);
if (retval)
return retval;
}
/* Update task specific "requested" clamps */
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
retval = uclamp_validate(p, attr);
if (retval)
return retval;
}
/*
* SCHED_DEADLINE bandwidth accounting relies on stable cpusets
* information.
*/
if (dl_policy(policy) || dl_policy(p->policy)) {
cpuset_locked = true;
cpuset_lock();
}
/*
* Make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*
* To be able to change p->policy safely, the appropriate
* runqueue lock must be held.
*/
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
/*
* Changing the policy of the stop threads its a very bad idea:
*/
if (p == rq->stop) {
retval = -EINVAL;
goto unlock;
}
/*
* If not changing anything there's no need to proceed further,
* but store a possible modification of reset_on_fork.
*/
if (unlikely(policy == p->policy)) {
if (fair_policy(policy) && attr->sched_nice != task_nice(p))
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
if (dl_policy(policy) && dl_param_changed(p, attr))
goto change;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
goto change;
p->sched_reset_on_fork = reset_on_fork;
retval = 0;
goto unlock;
}
change:
if (user) {
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
retval = -EPERM;
goto unlock;
}
#endif
#ifdef CONFIG_SMP
if (dl_bandwidth_enabled() && dl_policy(policy) &&
!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
cpumask_t *span = rq->rd->span;
/*
* Don't allow tasks with an affinity mask smaller than
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
if (!cpumask_subset(span, p->cpus_ptr) ||
rq->rd->dl_bw.bw == 0) {
retval = -EPERM;
goto unlock;
}
}
#endif
}
/* Re-check policy now with rq lock held: */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
goto recheck;
}
/*
* If setscheduling to SCHED_DEADLINE (or changing the parameters
* of a SCHED_DEADLINE task) we need to check if enough bandwidth
* is available.
*/
if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
retval = -EBUSY;
goto unlock;
}
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
if (pi) {
/*
* Take priority boosted tasks into account. If the new
* effective priority is unchanged, we just store the new
* normal parameters and do not touch the scheduler class and
* the runqueue. This will be done when the task deboost
* itself.
*/
newprio = rt_effective_prio(p, newprio);
if (newprio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued)
dequeue_task(rq, p, queue_flags);
if (running)
put_prev_task(rq, p);
prev_class = p->sched_class;
if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
__setscheduler_params(p, attr);
__setscheduler_prio(p, newprio);
}
__setscheduler_uclamp(p, attr);
if (queued) {
/*
* We enqueue to tail when the priority of a task is
* increased (user space view).
*/
if (oldprio < p->prio)
queue_flags |= ENQUEUE_HEAD;
enqueue_task(rq, p, queue_flags);
}
if (running)
set_next_task(rq, p);
check_class_changed(rq, p, prev_class, oldprio);
/* Avoid rq from going away on us: */
preempt_disable();
head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, &rf);
if (pi) {
if (cpuset_locked)
cpuset_unlock();
rt_mutex_adjust_pi(p);
}
/* Run balance callbacks after we've adjusted the PI chain: */
balance_callbacks(rq, head);
preempt_enable();
return 0;
unlock:
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
return retval;
}
static int _sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool check)
{
struct sched_attr attr = {
.sched_policy = policy,
.sched_priority = param->sched_priority,
.sched_nice = PRIO_TO_NICE(p->static_prio),
};
/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
policy &= ~SCHED_RESET_ON_FORK;
attr.sched_policy = policy;
}
return __sched_setscheduler(p, &attr, check, true);
}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Use sched_set_fifo(), read its comment.
*
* Return: 0 on success. An error code otherwise.
*
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, true);
}
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, true, true);
}
int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Just like sched_setscheduler, only don't bother checking if the
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*
* Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, false);
}
/*
* SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
* incapable of resource management, which is the one thing an OS really should
* be doing.
*
* This is of course the reason it is limited to privileged users only.
*
* Worse still; it is fundamentally impossible to compose static priority
* workloads. You cannot take two correctly working static prio workloads
* and smash them together and still expect them to work.
*
* For this reason 'all' FIFO tasks the kernel creates are basically at:
*
* MAX_RT_PRIO / 2
*
* The administrator _MUST_ configure the system, the kernel simply doesn't
* know enough information to make a sensible choice.
*/
void sched_set_fifo(struct task_struct *p)
{
struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
}
EXPORT_SYMBOL_GPL(sched_set_fifo);
/*
* For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
*/
void sched_set_fifo_low(struct task_struct *p)
{
struct sched_param sp = { .sched_priority = 1 };
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
}
EXPORT_SYMBOL_GPL(sched_set_fifo_low);
void sched_set_normal(struct task_struct *p, int nice)
{
struct sched_attr attr = {
.sched_policy = SCHED_NORMAL,
.sched_nice = nice,
};
WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
}
EXPORT_SYMBOL_GPL(sched_set_normal);
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (likely(p))
get_task_struct(p);
rcu_read_unlock();
if (likely(p)) {
retval = sched_setscheduler(p, policy, &lparam);
put_task_struct(p);
}
return retval;
}
/*
* Mimics kernel/events/core.c perf_copy_attr().
*/
static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
{
u32 size;
int ret;
/* Zero the full structure, so that a short copy will be nice: */
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
/* ABI compatibility quirk: */
if (!size)
size = SCHED_ATTR_SIZE_VER0;
if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
goto err_size;
ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
if (ret) {
if (ret == -E2BIG)
goto err_size;
return ret;
}
if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
size < SCHED_ATTR_SIZE_VER1)
return -EINVAL;
/*
* XXX: Do we want to be lenient like existing syscalls; or do we want
* to be strict and return an error on out-of-bounds values?
*/
attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
return 0;
err_size:
put_user(sizeof(*attr), &uattr->size);
return -E2BIG;
}
static void get_params(struct task_struct *p, struct sched_attr *attr)
{
if (task_has_dl_policy(p))
__getparam_dl(p, attr);
else if (task_has_rt_policy(p))
attr->sched_priority = p->rt_priority;
else
attr->sched_nice = task_nice(p);
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
{
if (policy < 0)
return -EINVAL;
return do_sched_setscheduler(pid, policy, param);
}
/**
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
}
/**
* sys_sched_setattr - same as above, but with extended sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
* @flags: for future extension.
*/
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, flags)
{
struct sched_attr attr;
struct task_struct *p;
int retval;
if (!uattr || pid < 0 || flags)
return -EINVAL;
retval = sched_copy_attr(uattr, &attr);
if (retval)
return retval;
if ((int)attr.sched_policy < 0)
return -EINVAL;
if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
attr.sched_policy = SETPARAM_POLICY;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (likely(p))
get_task_struct(p);
rcu_read_unlock();
if (likely(p)) {
if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
get_params(p, &attr);
retval = sched_setattr(p, &attr);
put_task_struct(p);
}
return retval;
}
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*
* Return: On success, the policy of the thread. Otherwise, a negative error
* code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*
* Return: On success, 0 and the RT priority is in @param. Otherwise, an error
* code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp = { .sched_priority = 0 };
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
if (task_has_rt_policy(p))
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
/*
* Copy the kernel size attribute structure (which might be larger
* than what user-space knows about) to user-space.
*
* Note that all cases are valid: user-space buffer can be larger or
* smaller than the kernel-space buffer. The usual case is that both
* have the same size.
*/
static int
sched_attr_copy_to_user(struct sched_attr __user *uattr,
struct sched_attr *kattr,
unsigned int usize)
{
unsigned int ksize = sizeof(*kattr);
if (!access_ok(uattr, usize))
return -EFAULT;
/*
* sched_getattr() ABI forwards and backwards compatibility:
*
* If usize == ksize then we just copy everything to user-space and all is good.
*
* If usize < ksize then we only copy as much as user-space has space for,
* this keeps ABI compatibility as well. We skip the rest.
*
* If usize > ksize then user-space is using a newer version of the ABI,
* which part the kernel doesn't know about. Just ignore it - tooling can
* detect the kernel's knowledge of attributes from the attr->size value
* which is set to ksize in this case.
*/
kattr->size = min(usize, ksize);
if (copy_to_user(uattr, kattr, kattr->size))
return -EFAULT;
return 0;
}
/**
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
* @usize: sizeof(attr) for fwd/bwd comp.
* @flags: for future extension.
*/
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, usize, unsigned int, flags)
{
struct sched_attr kattr = { };
struct task_struct *p;
int retval;
if (!uattr || pid < 0 || usize > PAGE_SIZE ||
usize < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
kattr.sched_policy = p->policy;
if (p->sched_reset_on_fork)
kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
get_params(p, &kattr);
kattr.sched_flags &= SCHED_FLAG_ALL;
#ifdef CONFIG_UCLAMP_TASK
/*
* This could race with another potential updater, but this is fine
* because it'll correctly read the old or the new value. We don't need
* to guarantee who wins the race as long as it doesn't return garbage.
*/
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
#endif
rcu_read_unlock();
return sched_attr_copy_to_user(uattr, &kattr, usize);
out_unlock:
rcu_read_unlock();
return retval;
}
#ifdef CONFIG_SMP
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
{
int ret = 0;
/*
* If the task isn't a deadline task or admission control is
* disabled then we don't care about affinity changes.
*/
if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
return 0;
/*
* Since bandwidth control happens on root_domain basis,
* if admission test is enabled, we only admit -deadline
* tasks allowed to run on all the CPUs in the task's
* root_domain.
*/
rcu_read_lock();
if (!cpumask_subset(task_rq(p)->rd->span, mask))
ret = -EBUSY;
rcu_read_unlock();
return ret;
}
#endif
static int
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
{
int retval;
cpumask_var_t cpus_allowed, new_mask;
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
ctx->new_mask = new_mask;
ctx->flags |= SCA_CHECK;
retval = dl_task_check_affinity(p, new_mask);
if (retval)
goto out_free_new_mask;
retval = __set_cpus_allowed_ptr(p, ctx);
if (retval)
goto out_free_new_mask;
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset update.
* Just reset the cpumask to the cpuset's cpus_allowed.
*/
cpumask_copy(new_mask, cpus_allowed);
/*
* If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
* will restore the previous user_cpus_ptr value.
*
* In the unlikely event a previous user_cpus_ptr exists,
* we need to further restrict the mask to what is allowed
* by that old user_cpus_ptr.
*/
if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
bool empty = !cpumask_and(new_mask, new_mask,
ctx->user_mask);
if (WARN_ON_ONCE(empty))
cpumask_copy(new_mask, cpus_allowed);
}
__set_cpus_allowed_ptr(p, ctx);
retval = -EINVAL;
}
out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
return retval;
}
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
struct affinity_context ac;
struct cpumask *user_mask;
struct task_struct *p;
int retval;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (p->flags & PF_NO_SETAFFINITY) {
retval = -EINVAL;
goto out_put_task;
}
if (!check_same_owner(p)) {
rcu_read_lock();
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
rcu_read_unlock();
retval = -EPERM;
goto out_put_task;
}
rcu_read_unlock();
}
retval = security_task_setscheduler(p);
if (retval)
goto out_put_task;
/*
* With non-SMP configs, user_cpus_ptr/user_mask isn't used and
* alloc_user_cpus_ptr() returns NULL.
*/
user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
if (user_mask) {
cpumask_copy(user_mask, in_mask);
} else if (IS_ENABLED(CONFIG_SMP)) {
retval = -ENOMEM;
goto out_put_task;
}
ac = (struct affinity_context){
.new_mask = in_mask,
.user_mask = user_mask,
.flags = SCA_USER,
};
retval = __sched_setaffinity(p, &ac);
kfree(ac.user_mask);
out_put_task:
put_task_struct(p);
return retval;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
{
if (len < cpumask_size())
cpumask_clear(new_mask);
else if (len > cpumask_size())
len = cpumask_size();
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}
/**
* sys_sched_setaffinity - set the CPU affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new CPU mask
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
int retval;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getaffinity - get the CPU affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current CPU mask
*
* Return: size of CPU mask copied to user_mask_ptr on success. An
* error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
unsigned int retlen = min(len, cpumask_size());
if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
static void do_sched_yield(void)
{
struct rq_flags rf;
struct rq *rq;
rq = this_rq_lock_irq(&rf);
schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);
preempt_disable();
rq_unlock_irq(rq, &rf);
sched_preempt_enable_no_resched();
schedule();
}
/**
* sys_sched_yield - yield the current processor to other threads.
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*
* Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
do_sched_yield();
return 0;
}
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
int __sched __cond_resched(void)
{
if (should_resched(0)) {
preempt_schedule_common();
return 1;
}
/*
* In preemptible kernels, ->rcu_read_lock_nesting tells the tick
* whether the current CPU is in an RCU read-side critical section,
* so the tick can report quiescent states even for CPUs looping
* in kernel context. In contrast, in non-preemptible kernels,
* RCU readers leave no in-memory hints, which means that CPU-bound
* processes executing in kernel context might never report an
* RCU quiescent state. Therefore, the following code causes
* cond_resched() to report a quiescent state, but only when RCU
* is in urgent need of one.
*/
#ifndef CONFIG_PREEMPT_RCU
rcu_all_qs();
#endif
return 0;
}
EXPORT_SYMBOL(__cond_resched);
#endif
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define cond_resched_dynamic_enabled __cond_resched
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(cond_resched);
#define might_resched_dynamic_enabled __cond_resched
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(might_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
int __sched dynamic_cond_resched(void)
{
klp_sched_try_switch();
if (!static_branch_unlikely(&sk_dynamic_cond_resched))
return 0;
return __cond_resched();
}
EXPORT_SYMBOL(dynamic_cond_resched);
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
int __sched dynamic_might_resched(void)
{
if (!static_branch_unlikely(&sk_dynamic_might_resched))
return 0;
return __cond_resched();
}
EXPORT_SYMBOL(dynamic_might_resched);
#endif
#endif
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held(lock);
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (!_cond_resched())
cpu_relax();
ret = 1;
spin_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_lock);
int __cond_resched_rwlock_read(rwlock_t *lock)
{
int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held_read(lock);
if (rwlock_needbreak(lock) || resched) {
read_unlock(lock);
if (!_cond_resched())
cpu_relax();
ret = 1;
read_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_rwlock_read);
int __cond_resched_rwlock_write(rwlock_t *lock)
{
int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held_write(lock);
if (rwlock_needbreak(lock) || resched) {
write_unlock(lock);
if (!_cond_resched())
cpu_relax();
ret = 1;
write_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_rwlock_write);
#ifdef CONFIG_PREEMPT_DYNAMIC
#ifdef CONFIG_GENERIC_ENTRY
#include <linux/entry-common.h>
#endif
/*
* SC:cond_resched
* SC:might_resched
* SC:preempt_schedule
* SC:preempt_schedule_notrace
* SC:irqentry_exit_cond_resched
*
*
* NONE:
* cond_resched <- __cond_resched
* might_resched <- RET0
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* VOLUNTARY:
* cond_resched <- __cond_resched
* might_resched <- __cond_resched
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* FULL:
* cond_resched <- RET0
* might_resched <- RET0
* preempt_schedule <- preempt_schedule
* preempt_schedule_notrace <- preempt_schedule_notrace
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
*/
enum {
preempt_dynamic_undefined = -1,
preempt_dynamic_none,
preempt_dynamic_voluntary,
preempt_dynamic_full,
};
int preempt_dynamic_mode = preempt_dynamic_undefined;
int sched_dynamic_mode(const char *str)
{
if (!strcmp(str, "none"))
return preempt_dynamic_none;
if (!strcmp(str, "voluntary"))
return preempt_dynamic_voluntary;
if (!strcmp(str, "full"))
return preempt_dynamic_full;
return -EINVAL;
}
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
#else
#error "Unsupported PREEMPT_DYNAMIC mechanism"
#endif
static DEFINE_MUTEX(sched_dynamic_mutex);
static bool klp_override;
static void __sched_dynamic_update(int mode)
{
/*
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
* the ZERO state, which is invalid.
*/
if (!klp_override)
preempt_dynamic_enable(cond_resched);
preempt_dynamic_enable(might_resched);
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
preempt_dynamic_enable(irqentry_exit_cond_resched);
switch (mode) {
case preempt_dynamic_none:
if (!klp_override)
preempt_dynamic_enable(cond_resched);
preempt_dynamic_disable(might_resched);
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
preempt_dynamic_disable(irqentry_exit_cond_resched);
if (mode != preempt_dynamic_mode)
pr_info("Dynamic Preempt: none\n");
break;
case preempt_dynamic_voluntary:
if (!klp_override)
preempt_dynamic_enable(cond_resched);
preempt_dynamic_enable(might_resched);
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
preempt_dynamic_disable(irqentry_exit_cond_resched);
if (mode != preempt_dynamic_mode)
pr_info("Dynamic Preempt: voluntary\n");
break;
case preempt_dynamic_full:
if (!klp_override)
preempt_dynamic_disable(cond_resched);
preempt_dynamic_disable(might_resched);
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
preempt_dynamic_enable(irqentry_exit_cond_resched);
if (mode != preempt_dynamic_mode)
pr_info("Dynamic Preempt: full\n");
break;
}
preempt_dynamic_mode = mode;
}
void sched_dynamic_update(int mode)
{
mutex_lock(&sched_dynamic_mutex);
__sched_dynamic_update(mode);
mutex_unlock(&sched_dynamic_mutex);
}
#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
static int klp_cond_resched(void)
{
__klp_sched_try_switch();
return __cond_resched();
}
void sched_dynamic_klp_enable(void)
{
mutex_lock(&sched_dynamic_mutex);
klp_override = true;
static_call_update(cond_resched, klp_cond_resched);
mutex_unlock(&sched_dynamic_mutex);
}
void sched_dynamic_klp_disable(void)
{
mutex_lock(&sched_dynamic_mutex);
klp_override = false;
__sched_dynamic_update(preempt_dynamic_mode);
mutex_unlock(&sched_dynamic_mutex);
}
#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
static int __init setup_preempt_mode(char *str)
{
int mode = sched_dynamic_mode(str);
if (mode < 0) {
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
return 0;
}
sched_dynamic_update(mode);
return 1;
}
__setup("preempt=", setup_preempt_mode);
static void __init preempt_dynamic_init(void)
{
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
sched_dynamic_update(preempt_dynamic_none);
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
sched_dynamic_update(preempt_dynamic_voluntary);
} else {
/* Default static call setting, nothing to do */
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
preempt_dynamic_mode = preempt_dynamic_full;
pr_info("Dynamic Preempt: full\n");
}
}
}
#define PREEMPT_MODEL_ACCESSOR(mode) \
bool preempt_model_##mode(void) \
{ \
WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
return preempt_dynamic_mode == preempt_dynamic_##mode; \
} \
EXPORT_SYMBOL_GPL(preempt_model_##mode)
PREEMPT_MODEL_ACCESSOR(none);
PREEMPT_MODEL_ACCESSOR(voluntary);
PREEMPT_MODEL_ACCESSOR(full);
#else /* !CONFIG_PREEMPT_DYNAMIC */
static inline void preempt_dynamic_init(void) { }
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
/**
* yield - yield the current processor to other threads.
*
* Do not ever use this function, there's a 99% chance you're doing it wrong.
*
* The scheduler is at all times free to pick the calling task as the most
* eligible task to run, if removing the yield() call from your code breaks
* it, it's already broken.
*
* Typical broken usage is:
*
* while (!event)
* yield();
*
* where one assumes that yield() will let 'the other' process run that will
* make event true. If the current task is a SCHED_FIFO task that will never
* happen. Never use yield() as a progress guarantee!!
*
* If you want to use yield() to wait for something, use wait_event().
* If you want to use yield() to be 'nice' for others, use cond_resched().
* If you still want to use yield(), do not!
*/
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
do_sched_yield();
}
EXPORT_SYMBOL(yield);
/**
* yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the
* processor it's on.
* @p: target task
* @preempt: whether task preemption is allowed or not
*
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
* Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
*/
int __sched yield_to(struct task_struct *p, bool preempt)
{
struct task_struct *curr = current;
struct rq *rq, *p_rq;
unsigned long flags;
int yielded = 0;
local_irq_save(flags);
rq = this_rq();
again:
p_rq = task_rq(p);
/*
* If we're the only runnable task on the rq and target rq also
* has only one task, there's absolutely no point in yielding.
*/
if (rq->nr_running == 1 && p_rq->nr_running == 1) {
yielded = -ESRCH;
goto out_irq;
}
double_rq_lock(rq, p_rq);
if (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
goto again;
}
if (!curr->sched_class->yield_to_task)
goto out_unlock;
if (curr->sched_class != p->sched_class)
goto out_unlock;
if (task_on_cpu(p_rq, p) || !task_is_running(p))
goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p);
if (yielded) {
schedstat_inc(rq->yld_count);
/*
* Make p's CPU reschedule; pick_next_entity takes care of
* fairness.
*/
if (preempt && rq != p_rq)
resched_curr(p_rq);
}
out_unlock:
double_rq_unlock(rq, p_rq);
out_irq:
local_irq_restore(flags);
if (yielded > 0)
schedule();
return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);
int io_schedule_prepare(void)
{
int old_iowait = current->in_iowait;
current->in_iowait = 1;
blk_flush_plug(current->plug, true);
return old_iowait;
}
void io_schedule_finish(int token)
{
current->in_iowait = token;
}
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
long __sched io_schedule_timeout(long timeout)
{
int token;
long ret;
token = io_schedule_prepare();
ret = schedule_timeout(timeout);
io_schedule_finish(token);
return ret;
}
EXPORT_SYMBOL(io_schedule_timeout);
void __sched io_schedule(void)
{
int token;
token = io_schedule_prepare();
schedule();
io_schedule_finish(token);
}
EXPORT_SYMBOL(io_schedule);
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
* Return: On success, this syscall returns the maximum
* rt_priority that can be used by a given scheduling class.
* On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_RT_PRIO-1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
/**
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
* Return: On success, this syscall returns the minimum
* rt_priority that can be used by a given scheduling class.
* On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = 1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
}
return ret;
}
static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
{
struct task_struct *p;
unsigned int time_slice;
struct rq_flags rf;
struct rq *rq;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &rf);
time_slice = 0;
if (p->sched_class->get_rr_interval)
time_slice = p->sched_class->get_rr_interval(rq, p);
task_rq_unlock(rq, p, &rf);
rcu_read_unlock();
jiffies_to_timespec64(time_slice, t);
return 0;
out_unlock:
rcu_read_unlock();
return retval;
}
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*
* Return: On success, 0 and the timeslice is in @interval. Otherwise,
* an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct __kernel_timespec __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
if (retval == 0)
retval = put_timespec64(&t, interval);
return retval;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
struct old_timespec32 __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
if (retval == 0)
retval = put_old_timespec32(&t, interval);
return retval;
}
#endif
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
int ppid;
if (!try_get_task_stack(p))
return;
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
if (task_is_running(p))
pr_cont(" running task ");
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
ppid = 0;
rcu_read_lock();
if (pid_alive(p))
ppid = task_pid_nr(rcu_dereference(p->real_parent));
rcu_read_unlock();
pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
free, task_pid_nr(p), ppid,
read_task_thread_flags(p));
print_worker_info(KERN_INFO, p);
print_stop_info(KERN_INFO, p);
show_stack(p, NULL, KERN_INFO);
put_task_stack(p);
}
EXPORT_SYMBOL_GPL(sched_show_task);
static inline bool
state_filter_match(unsigned long state_filter, struct task_struct *p)
{
unsigned int state = READ_ONCE(p->__state);
/* no filter, everything matches */
if (!state_filter)
return true;
/* filter, but doesn't match */
if (!(state & state_filter))
return false;
/*
* When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
* TASK_KILLABLE).
*/
if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
return false;
return true;
}
void show_state_filter(unsigned int state_filter)
{
struct task_struct *g, *p;
rcu_read_lock();
for_each_process_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
* Also, reset softlockup watchdogs on all CPUs, because
* another CPU might be blocked waiting for us to process
* an IPI.
*/
touch_nmi_watchdog();
touch_all_softlockup_watchdogs();
if (state_filter_match(state_filter, p))
sched_show_task(p);
}
#ifdef CONFIG_SCHED_DEBUG
if (!state_filter)
sysrq_sched_debug_show();
#endif
rcu_read_unlock();
/*
* Only show locks if all tasks are dumped:
*/
if (!state_filter)
debug_show_all_locks();
}
/**
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: CPU the idle task belongs to
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
void __init init_idle(struct task_struct *idle, int cpu)
{
#ifdef CONFIG_SMP
struct affinity_context ac = (struct affinity_context) {
.new_mask = cpumask_of(cpu),
.flags = 0,
};
#endif
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
__sched_fork(0, idle);
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_rq_lock(rq);
idle->__state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
/*
* PF_KTHREAD should already be set at this point; regardless, make it
* look like a proper per-CPU kthread.
*/
idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
kthread_set_per_cpu(idle, cpu);
#ifdef CONFIG_SMP
/*
* It's possible that init_idle() gets called multiple times on a task,
* in that case do_set_cpus_allowed() will not do the right thing.
*
* And since this is boot we can forgo the serialization.
*/
set_cpus_allowed_common(idle, &ac);
#endif
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the CPU isn't yet set to this CPU so the
* lockdep check in task_group() will fail.
*
* Similar case to sched_fork(). / Alternatively we could
* use task_rq_lock() here and obtain the other rq->lock.
*
* Silence PROVE_RCU
*/
rcu_read_lock();
__set_task_cpu(idle, cpu);
rcu_read_unlock();
rq->idle = idle;
rcu_assign_pointer(rq->curr, idle);
idle->on_rq = TASK_ON_RQ_QUEUED;
#ifdef CONFIG_SMP
idle->on_cpu = 1;
#endif
raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
/*
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
vtime_init_idle(idle, cpu);
#ifdef CONFIG_SMP
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif
}
#ifdef CONFIG_SMP
int cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial)
{
int ret = 1;
if (cpumask_empty(cur))
return ret;
ret = dl_cpuset_cpumask_can_shrink(cur, trial);
return ret;
}
int task_can_attach(struct task_struct *p)
{
int ret = 0;
/*
* Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their CPU
* affinity and isolating such threads by their set of
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_mask may be changed.
*/
if (p->flags & PF_NO_SETAFFINITY)
ret = -EINVAL;
return ret;
}
bool sched_smp_initialized __read_mostly;
#ifdef CONFIG_NUMA_BALANCING
/* Migrate current task p to target_cpu */
int migrate_task_to(struct task_struct *p, int target_cpu)
{
struct migration_arg arg = { p, target_cpu };
int curr_cpu = task_cpu(p);
if (curr_cpu == target_cpu)
return 0;
if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
return -EINVAL;
/* TODO: This is not properly updating schedstats */
trace_sched_move_numa(p, curr_cpu, target_cpu);
return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
}
/*
* Requeue a task on a given node and accurately track the number of NUMA
* tasks on the runqueues
*/
void sched_setnuma(struct task_struct *p, int nid)
{
bool queued, running;
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(p, &rf);
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued)
dequeue_task(rq, p, DEQUEUE_SAVE);
if (running)
put_prev_task(rq, p);
p->numa_preferred_nid = nid;
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_next_task(rq, p);
task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
*/
void idle_task_exit(void)
{
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
BUG_ON(current != this_rq()->idle);
if (mm != &init_mm) {
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
static int __balance_push_cpu_stop(void *arg)
{
struct task_struct *p = arg;
struct rq *rq = this_rq();
struct rq_flags rf;
int cpu;
raw_spin_lock_irq(&p->pi_lock);
rq_lock(rq, &rf);
update_rq_clock(rq);
if (task_rq(p) == rq && task_on_rq_queued(p)) {
cpu = select_fallback_rq(rq->cpu, p);
rq = __migrate_task(rq, &rf, p, cpu);
}
rq_unlock(rq, &rf);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return 0;
}
static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
/*
* Ensure we only run per-cpu kthreads once the CPU goes !active.
*
* This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
* effective when the hotplug motion is down.
*/
static void balance_push(struct rq *rq)
{
struct task_struct *push_task = rq->curr;
lockdep_assert_rq_held(rq);
/*
* Ensure the thing is persistent until balance_push_set(.on = false);
*/
rq->balance_callback = &balance_push_callback;
/*
* Only active while going offline and when invoked on the outgoing
* CPU.
*/
if (!cpu_dying(rq->cpu) || rq != this_rq())
return;
/*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
if (kthread_is_per_cpu(push_task) ||
is_migration_disabled(push_task)) {
/*
* If this is the idle task on the outgoing CPU try to wake
* up the hotplug control thread which might wait for the
* last task to vanish. The rcuwait_active() check is
* accurate here because the waiter is pinned on this CPU
* and can't obviously be running in parallel.
*
* On RT kernels this also has to check whether there are
* pinned and scheduled out tasks on the runqueue. They
* need to leave the migrate disabled section first.
*/
if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
rcuwait_active(&rq->hotplug_wait)) {
raw_spin_rq_unlock(rq);
rcuwait_wake_up(&rq->hotplug_wait);
raw_spin_rq_lock(rq);
}
return;
}
get_task_struct(push_task);
/*
* Temporarily drop rq->lock such that we can wake-up the stop task.
* Both preemption and IRQs are still disabled.
*/
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
this_cpu_ptr(&push_work));
/*
* At this point need_resched() is true and we'll take the loop in
* schedule(). The next pick is obviously going to be the stop task
* which kthread_is_per_cpu() and will push this task away.
*/
raw_spin_rq_lock(rq);
}
static void balance_push_set(int cpu, bool on)
{
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
if (on) {
WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
} else if (rq->balance_callback == &balance_push_callback) {
rq->balance_callback = NULL;
}
rq_unlock_irqrestore(rq, &rf);
}
/*
* Invoked from a CPUs hotplug control thread after the CPU has been marked
* inactive. All tasks which are not per CPU kernel threads are either
* pushed off this CPU now via balance_push() or placed on a different CPU
* during wakeup. Wait until the CPU is quiescent.
*/
static void balance_hotplug_wait(void)
{
struct rq *rq = this_rq();
rcuwait_wait_event(&rq->hotplug_wait,
rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
TASK_UNINTERRUPTIBLE);
}
#else
static inline void balance_push(struct rq *rq)
{
}
static inline void balance_push_set(int cpu, bool on)
{
}
static inline void balance_hotplug_wait(void)
{
}
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
cpumask_set_cpu(rq->cpu, rq->rd->online);
rq->online = 1;
for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}
void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;
update_rq_clock(rq);
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}
cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq->online = 0;
}
}
/*
* used to mark begin/end of suspend/resume:
*/
static int num_cpus_frozen;
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
*
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
static void cpuset_cpu_active(void)
{
if (cpuhp_tasks_frozen) {
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
partition_sched_domains(1, NULL, NULL);
if (--num_cpus_frozen)
return;
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
cpuset_force_rebuild();
}
cpuset_update_active_cpus();
}
static int cpuset_cpu_inactive(unsigned int cpu)
{
if (!cpuhp_tasks_frozen) {
int ret = dl_bw_check_overflow(cpu);
if (ret)
return ret;
cpuset_update_active_cpus();
} else {
num_cpus_frozen++;
partition_sched_domains(1, NULL, NULL);
}
return 0;
}
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
/*
* Clear the balance_push callback and prepare to schedule
* regular tasks.
*/
balance_push_set(cpu, false);
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc_cpuslocked(&sched_smt_present);
#endif
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
sched_update_numa(cpu, true);
sched_domains_numa_masks_set(cpu);
cpuset_cpu_active();
}
/*
* Put the rq online, if not already. This happens:
*
* 1) In the early boot process, because we build the real domains
* after all CPUs have been brought up.
*
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
* domains.
*/
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
rq_unlock_irqrestore(rq, &rf);
return 0;
}
int sched_cpu_deactivate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
int ret;
/*
* Remove CPU from nohz.idle_cpus_mask to prevent participating in
* load balancing when not active
*/
nohz_balance_exit_idle(rq);
set_cpu_active(cpu, false);
/*
* From this point forward, this CPU will refuse to run any task that
* is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
* push those tasks away until this gets cleared, see
* sched_cpu_dying().
*/
balance_push_set(cpu, true);
/*
* We've cleared cpu_active_mask / set balance_push, wait for all
* preempt-disabled and RCU users of this state to go away such that
* all new such users will observe it.
*
* Specifically, we rely on ttwu to no longer target this CPU, see
* ttwu_queue_cond() and is_cpu_allowed().
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu();
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
rq_unlock_irqrestore(rq, &rf);
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_dec_cpuslocked(&sched_smt_present);
sched_core_cpu_deactivate(cpu);
#endif
if (!sched_smp_initialized)
return 0;
sched_update_numa(cpu, false);
ret = cpuset_cpu_inactive(cpu);
if (ret) {
balance_push_set(cpu, false);
set_cpu_active(cpu, true);
sched_update_numa(cpu, true);
return ret;
}
sched_domains_numa_masks_clear(cpu);
return 0;
}
static void sched_rq_cpu_starting(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
rq->calc_load_update = calc_load_update;
update_max_interval();
}
int sched_cpu_starting(unsigned int cpu)
{
sched_core_cpu_starting(cpu);
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Invoked immediately before the stopper thread is invoked to bring the
* CPU down completely. At this point all per CPU kthreads except the
* hotplug thread (current) and the stopper thread (inactive) have been
* either parked or have been unbound from the outgoing CPU. Ensure that
* any of those which might be on the way out are gone.
*
* If after this point a bound task is being woken on this CPU then the
* responsible hotplug callback has failed to do it's job.
* sched_cpu_dying() will catch it with the appropriate fireworks.
*/
int sched_cpu_wait_empty(unsigned int cpu)
{
balance_hotplug_wait();
return 0;
}
/*
* Since this CPU is going 'away' for a while, fold any nr_active delta we
* might have. Called from the CPU stopper task after ensuring that the
* stopper is the last running task on the CPU, so nr_active count is
* stable. We need to take the teardown thread which is calling this into
* account, so we hand in adjust = 1 to the load calculation.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_load_migrate(struct rq *rq)
{
long delta = calc_load_fold_active(rq, 1);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}
static void dump_rq_tasks(struct rq *rq, const char *loglvl)
{
struct task_struct *g, *p;
int cpu = cpu_of(rq);
lockdep_assert_rq_held(rq);
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
for_each_process_thread(g, p) {
if (task_cpu(p) != cpu)
continue;
if (!task_on_rq_queued(p))
continue;
printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
}
}
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
/* Handle pending wakeups and then migrate everything off */
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
WARN(true, "Dying CPU not properly vacated!");
dump_rq_tasks(rq, KERN_WARNING);
}
rq_unlock_irqrestore(rq, &rf);
calc_load_migrate(rq);
update_max_interval();
hrtick_clear(rq);
sched_core_cpu_dying(cpu);
return 0;
}
#endif
void __init sched_init_smp(void)
{
sched_init_numa(NUMA_NO_NODE);
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
* happen.
*/
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
BUG();
current->flags &= ~PF_NO_SETAFFINITY;
sched_init_granularity();
init_sched_rt_class();
init_sched_dl_class();
sched_smp_initialized = true;
}
static int __init migration_init(void)
{
sched_cpu_starting(smp_processor_id());
return 0;
}
early_initcall(migration_init);
#else
void __init sched_init_smp(void)
{
sched_init_granularity();
}
#endif /* CONFIG_SMP */
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
}
#ifdef CONFIG_CGROUP_SCHED
/*
* Default task group.
* Every task in system belongs to this group at bootup.
*/
struct task_group root_task_group;
LIST_HEAD(task_groups);
/* Cacheline aligned slab cache for task_group */
static struct kmem_cache *task_group_cache __read_mostly;
#endif
void __init sched_init(void)
{
unsigned long ptr = 0;
int i;
/* Make sure the linker didn't screw up */
BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
&fair_sched_class != &rt_sched_class + 1 ||
&rt_sched_class != &dl_sched_class + 1);
#ifdef CONFIG_SMP
BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
ptr += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
ptr += 2 * nr_cpu_ids * sizeof(void **);
#endif
if (ptr) {
ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
}
init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
task_group_cache = KMEM_CACHE(task_group, 0);
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
INIT_LIST_HEAD(&root_task_group.siblings);
autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
struct rq *rq;
rq = cpu_rq(i);
raw_spin_lock_init(&rq->__lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt);
init_dl_rq(&rq->dl);
#ifdef CONFIG_FAIR_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
/*
* How much CPU bandwidth does root_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the CPU resources in the system. This overall
* system CPU resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the CPU resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
rq->balance_callback = &balance_push_callback;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->wake_stamp = jiffies;
rq->wake_avg_idle = rq->avg_idle;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->cfs_tasks);
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
#endif
#ifdef CONFIG_HOTPLUG_CPU
rcuwait_init(&rq->hotplug_wait);
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
atomic_set(&rq->nr_iowait, 0);
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
rq->core_pick = NULL;
rq->core_enabled = 0;
rq->core_tree = RB_ROOT;
rq->core_forceidle_count = 0;
rq->core_forceidle_occupation = 0;
rq->core_forceidle_start = 0;
rq->core_cookie = 0UL;
#endif
zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
}
set_load_weight(&init_task, false);
/*
* The boot idle thread does lazy MMU switching as well:
*/
mmgrab_lazy_tlb(&init_mm);
enter_lazy_tlb(&init_mm, current);
/*
* The idle task doesn't need the kthread struct to function, but it
* is dressed up as a per-CPU kthread and thus needs to play the part
* if we want to avoid special-casing it in code that deals with per-CPU
* kthreads.
*/
WARN_ON(!set_kthread_struct(current));
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
calc_load_update = jiffies + LOAD_FREQ;
#ifdef CONFIG_SMP
idle_thread_set_boot_cpu();
balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();
psi_init();
init_uclamp();
preempt_dynamic_init();
scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void __might_sleep(const char *file, int line)
{
unsigned int state = get_current_state();
/*
* Blocking primitives will set (and therefore destroy) current->state,
* since we will exit with TASK_RUNNING make sure we enter with it,
* otherwise we will destroy state.
*/
WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
"do not call blocking ops when !TASK_RUNNING; "
"state=%x set at [<%p>] %pS\n", state,
(void *)current->task_state_change,
(void *)current->task_state_change);
__might_resched(file, line, 0);
}
EXPORT_SYMBOL(__might_sleep);
static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
{
if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
return;
if (preempt_count() == preempt_offset)
return;
pr_err("Preemption disabled at:");
print_ip_sym(KERN_ERR, ip);
}
static inline bool resched_offsets_ok(unsigned int offsets)
{
unsigned int nested = preempt_count();
nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
return nested == offsets;
}
void __might_resched(const char *file, int line, unsigned int offsets)
{
/* Ratelimiting timestamp: */
static unsigned long prev_jiffy;
unsigned long preempt_disable_ip;
/* WARN_ON_ONCE() by default, no rate limit required: */
rcu_sleep_check();
if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
!is_idle_task(current) && !current->non_block_count) ||
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
oops_in_progress)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
/* Save this before calling printk(), since that will clobber it: */
preempt_disable_ip = get_preempt_disable_ip(current);
pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(), current->non_block_count,
current->pid, current->comm);
pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
offsets & MIGHT_RESCHED_PREEMPT_MASK);
if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
pr_err("RCU nest depth: %d, expected: %u\n",
rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
}
if (task_stack_end_corrupted(current))
pr_emerg("Thread overran stack, or stack corrupted\n");
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
preempt_disable_ip);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL(__might_resched);
void __cant_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy;
if (irqs_disabled())
return;
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
return;
if (preempt_count() > preempt_offset)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
current->pid, current->comm);
debug_show_held_locks(current);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL_GPL(__cant_sleep);
#ifdef CONFIG_SMP
void __cant_migrate(const char *file, int line)
{
static unsigned long prev_jiffy;
if (irqs_disabled())
return;
if (is_migration_disabled(current))
return;
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
return;
if (preempt_count() > 0)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
in_atomic(), irqs_disabled(), is_migration_disabled(current),
current->pid, current->comm);
debug_show_held_locks(current);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL_GPL(__cant_migrate);
#endif
#endif
#ifdef CONFIG_MAGIC_SYSRQ
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
struct sched_attr attr = {
.sched_policy = SCHED_NORMAL,
};
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (p->flags & PF_KTHREAD)
continue;
p->se.exec_start = 0;
schedstat_set(p->stats.wait_start, 0);
schedstat_set(p->stats.sleep_start, 0);
schedstat_set(p->stats.block_start, 0);
if (!dl_task(p) && !rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
*/
if (task_nice(p) < 0)
set_user_nice(p, 0);
continue;
}
__sched_setscheduler(p, &attr, false, false);
}
read_unlock(&tasklist_lock);
}
#endif /* CONFIG_MAGIC_SYSRQ */
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
* These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
* activity can take place. Using them for anything else would
* be a serious bug, and as a result, they aren't even visible
* under any other configuration.
*/
/**
* curr_task - return the current task for a given CPU.
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*
* Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
#ifdef CONFIG_IA64
/**
* ia64_set_curr_task - set the current task for a given CPU.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a CPU in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
* re-starting the system.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
void ia64_set_curr_task(int cpu, struct task_struct *p)
{
cpu_curr(cpu) = p;
}
#endif
#ifdef CONFIG_CGROUP_SCHED
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
static inline void alloc_uclamp_sched_group(struct task_group *tg,
struct task_group *parent)
{
#ifdef CONFIG_UCLAMP_TASK_GROUP
enum uclamp_id clamp_id;
for_each_clamp_id(clamp_id) {
uclamp_se_set(&tg->uclamp_req[clamp_id],
uclamp_none(clamp_id), false);
tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
}
#endif
}
static void sched_free_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
autogroup_free(tg);
kmem_cache_free(task_group_cache, tg);
}
static void sched_free_group_rcu(struct rcu_head *rcu)
{
sched_free_group(container_of(rcu, struct task_group, rcu));
}
static void sched_unregister_group(struct task_group *tg)
{
unregister_fair_sched_group(tg);
unregister_rt_sched_group(tg);
/*
* We have to wait for yet another RCU grace period to expire, as
* print_cfs_stats() might run concurrently.
*/
call_rcu(&tg->rcu, sched_free_group_rcu);
}
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
if (!tg)
return ERR_PTR(-ENOMEM);
if (!alloc_fair_sched_group(tg, parent))
goto err;
if (!alloc_rt_sched_group(tg, parent))
goto err;
alloc_uclamp_sched_group(tg, parent);
return tg;
err:
sched_free_group(tg);
return ERR_PTR(-ENOMEM);
}
void sched_online_group(struct task_group *tg, struct task_group *parent)
{
unsigned long flags;
spin_lock_irqsave(&task_group_lock, flags);
list_add_rcu(&tg->list, &task_groups);
/* Root should already exist: */
WARN_ON(!parent);
tg->parent = parent;
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
online_fair_sched_group(tg);
}
/* rcu callback to free various structures associated with a task group */
static void sched_unregister_group_rcu(struct rcu_head *rhp)
{
/* Now it should be safe to free those cfs_rqs: */
sched_unregister_group(container_of(rhp, struct task_group, rcu));
}
void sched_destroy_group(struct task_group *tg)
{
/* Wait for possible concurrent references to cfs_rqs complete: */
call_rcu(&tg->rcu, sched_unregister_group_rcu);
}
void sched_release_group(struct task_group *tg)
{
unsigned long flags;
/*
* Unlink first, to avoid walk_tg_tree_from() from finding us (via
* sched_cfs_period_timer()).
*
* For this to be effective, we have to wait for all pending users of
* this task group to leave their RCU critical section to ensure no new
* user will see our dying task group any more. Specifically ensure
* that tg_unthrottle_up() won't add decayed cfs_rq's to it.
*
* We therefore defer calling unregister_fair_sched_group() to
* sched_unregister_group() which is guarantied to get called only after the
* current RCU grace period has expired.
*/
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
}
static struct task_group *sched_get_task_group(struct task_struct *tsk)
{
struct task_group *tg;
/*
* All callers are synchronized by task_rq_lock(); we do not use RCU
* which is pointless here. Thus, we pass "true" to task_css_check()
* to prevent lockdep warnings.
*/
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
return tg;
}
static void sched_change_group(struct task_struct *tsk, struct task_group *group)
{
tsk->sched_task_group = group;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_change_group)
tsk->sched_class->task_change_group(tsk);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
}
/*
* Change task's runqueue when it moves between groups.
*
* The caller of this function should have put the task in its new group by
* now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
* its new group.
*/
void sched_move_task(struct task_struct *tsk)
{
int queued, running, queue_flags =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct task_group *group;
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(tsk, &rf);
/*
* Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
* group changes.
*/
group = sched_get_task_group(tsk);
if (group == tsk->sched_task_group)
goto unlock;
update_rq_clock(rq);
running = task_current(rq, tsk);
queued = task_on_rq_queued(tsk);
if (queued)
dequeue_task(rq, tsk, queue_flags);
if (running)
put_prev_task(rq, tsk);
sched_change_group(tsk, group);
if (queued)
enqueue_task(rq, tsk, queue_flags);
if (running) {
set_next_task(rq, tsk);
/*
* After changing group, the running task may have joined a
* throttled one but it's still the running task. Trigger a
* resched to make sure that task can still run.
*/
resched_curr(rq);
}
unlock:
task_rq_unlock(rq, tsk, &rf);
}
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct task_group, css) : NULL;
}
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct task_group *parent = css_tg(parent_css);
struct task_group *tg;
if (!parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
return &tg->css;
}
/* Expose task group only after completing cgroup initialization */
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
struct task_group *parent = css_tg(css->parent);
if (parent)
sched_online_group(tg, parent);
#ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */
mutex_lock(&uclamp_mutex);
rcu_read_lock();
cpu_util_update_eff(css);
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
#endif
return 0;
}
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
sched_release_group(tg);
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
/*
* Relies on the RCU grace period between css_released() and this.
*/
sched_unregister_group(tg);
}
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
cgroup_taskset_for_each(task, css, tset) {
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
}
return 0;
}
#endif
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
}
#ifdef CONFIG_UCLAMP_TASK_GROUP
static void cpu_util_update_eff(struct cgroup_subsys_state *css)
{
struct cgroup_subsys_state *top_css = css;
struct uclamp_se *uc_parent = NULL;
struct uclamp_se *uc_se = NULL;
unsigned int eff[UCLAMP_CNT];
enum uclamp_id clamp_id;
unsigned int clamps;
lockdep_assert_held(&uclamp_mutex);
SCHED_WARN_ON(!rcu_read_lock_held());
css_for_each_descendant_pre(css, top_css) {
uc_parent = css_tg(css)->parent
? css_tg(css)->parent->uclamp : NULL;
for_each_clamp_id(clamp_id) {
/* Assume effective clamps matches requested clamps */
eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
/* Cap effective clamps with parent's effective clamps */
if (uc_parent &&
eff[clamp_id] > uc_parent[clamp_id].value) {
eff[clamp_id] = uc_parent[clamp_id].value;
}
}
/* Ensure protection is always capped by limit */
eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
/* Propagate most restrictive effective clamps */
clamps = 0x0;
uc_se = css_tg(css)->uclamp;
for_each_clamp_id(clamp_id) {
if (eff[clamp_id] == uc_se[clamp_id].value)
continue;
uc_se[clamp_id].value = eff[clamp_id];
uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
clamps |= (0x1 << clamp_id);
}
if (!clamps) {
css = css_rightmost_descendant(css);
continue;
}
/* Immediately update descendants RUNNABLE tasks */
uclamp_update_active_tasks(css);
}
}
/*
* Integer 10^N with a given N exponent by casting to integer the literal "1eN"
* C expression. Since there is no way to convert a macro argument (N) into a
* character constant, use two levels of macros.
*/
#define _POW10(exp) ((unsigned int)1e##exp)
#define POW10(exp) _POW10(exp)
struct uclamp_request {
#define UCLAMP_PERCENT_SHIFT 2
#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
s64 percent;
u64 util;
int ret;
};
static inline struct uclamp_request
capacity_from_percent(char *buf)
{
struct uclamp_request req = {
.percent = UCLAMP_PERCENT_SCALE,
.util = SCHED_CAPACITY_SCALE,
.ret = 0,
};
buf = strim(buf);
if (strcmp(buf, "max")) {
req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
&req.percent);
if (req.ret)
return req;
if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
req.ret = -ERANGE;
return req;
}
req.util = req.percent << SCHED_CAPACITY_SHIFT;
req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
}
return req;
}
static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off,
enum uclamp_id clamp_id)
{
struct uclamp_request req;
struct task_group *tg;
req = capacity_from_percent(buf);
if (req.ret)
return req.ret;
static_branch_enable(&sched_uclamp_used);
mutex_lock(&uclamp_mutex);
rcu_read_lock();
tg = css_tg(of_css(of));
if (tg->uclamp_req[clamp_id].value != req.util)
uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
/*
* Because of not recoverable conversion rounding we keep track of the
* exact requested value
*/
tg->uclamp_pct[clamp_id] = req.percent;
/* Update effective clamps to track the most restrictive value */
cpu_util_update_eff(of_css(of));
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
return nbytes;
}
static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
}
static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
}
static inline void cpu_uclamp_print(struct seq_file *sf,
enum uclamp_id clamp_id)
{
struct task_group *tg;
u64 util_clamp;
u64 percent;
u32 rem;
rcu_read_lock();
tg = css_tg(seq_css(sf));
util_clamp = tg->uclamp_req[clamp_id].value;
rcu_read_unlock();
if (util_clamp == SCHED_CAPACITY_SCALE) {
seq_puts(sf, "max\n");
return;
}
percent = tg->uclamp_pct[clamp_id];
percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
}
static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
{
cpu_uclamp_print(sf, UCLAMP_MIN);
return 0;
}
static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
{
cpu_uclamp_print(sf, UCLAMP_MAX);
return 0;
}
#endif /* CONFIG_UCLAMP_TASK_GROUP */
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
if (shareval > scale_load_down(ULONG_MAX))
shareval = MAX_SHARES;
return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct task_group *tg = css_tg(css);
return (u64) scale_load_down(tg->shares);
}
#ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex);
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
/* More than 203 days if BW_SHIFT equals 20. */
static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
u64 burst)
{
int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
if (tg == &root_task_group)
return -EINVAL;
/*
* Ensure we have at some amount of bandwidth every period. This is
* to prevent reaching a state of large arrears when throttled via
* entity_tick() resulting in prolonged exit starvation.
*/
if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
return -EINVAL;
/*
* Likewise, bound things on the other side by preventing insane quota
* periods. This also allows us to normalize in computing quota
* feasibility.
*/
if (period > max_cfs_quota_period)
return -EINVAL;
/*
* Bound quota to defend quota against overflow during bandwidth shift.
*/
if (quota != RUNTIME_INF && quota > max_cfs_runtime)
return -EINVAL;
if (quota != RUNTIME_INF && (burst > quota ||
burst + quota > max_cfs_runtime))
return -EINVAL;
/*
* Prevent race between setting of cfs_rq->runtime_enabled and
* unthrottle_offline_cfs_rqs().
*/
cpus_read_lock();
mutex_lock(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota);
if (ret)
goto out_unlock;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
/*
* If we need to toggle cfs_bandwidth_used, off->on must occur
* before making related changes, and on->off must occur afterwards
*/
if (runtime_enabled && !runtime_was_enabled)
cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
cfs_b->burst = burst;
__refill_cfs_bandwidth_runtime(cfs_b);
/* Restart the period timer (if active) to handle new period expiry: */
if (runtime_enabled)
start_cfs_bandwidth(cfs_b);
raw_spin_unlock_irq(&cfs_b->lock);
for_each_online_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
struct rq_flags rf;
rq_lock_irq(rq, &rf);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
rq_unlock_irq(rq, &rf);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
cpus_read_unlock();
return ret;
}
static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
u64 quota, period, burst;
period = ktime_to_ns(tg->cfs_bandwidth.period);
burst = tg->cfs_bandwidth.burst;
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
else
return -EINVAL;
return tg_set_cfs_bandwidth(tg, period, quota, burst);
}
static long tg_get_cfs_quota(struct task_group *tg)
{
u64 quota_us;
if (tg->cfs_bandwidth.quota == RUNTIME_INF)
return -1;
quota_us = tg->cfs_bandwidth.quota;
do_div(quota_us, NSEC_PER_USEC);
return quota_us;
}
static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
u64 quota, period, burst;
if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
burst = tg->cfs_bandwidth.burst;
return tg_set_cfs_bandwidth(tg, period, quota, burst);
}
static long tg_get_cfs_period(struct task_group *tg)
{
u64 cfs_period_us;
cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
do_div(cfs_period_us, NSEC_PER_USEC);
return cfs_period_us;
}
static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
{
u64 quota, period, burst;
if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
burst = (u64)cfs_burst_us * NSEC_PER_USEC;
period = ktime_to_ns(tg->cfs_bandwidth.period);
quota = tg->cfs_bandwidth.quota;
return tg_set_cfs_bandwidth(tg, period, quota, burst);
}
static long tg_get_cfs_burst(struct task_group *tg)
{
u64 burst_us;
burst_us = tg->cfs_bandwidth.burst;
do_div(burst_us, NSEC_PER_USEC);
return burst_us;
}
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return tg_get_cfs_quota(css_tg(css));
}
static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
struct cftype *cftype, s64 cfs_quota_us)
{
return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
}
static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return tg_get_cfs_period(css_tg(css));
}
static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 cfs_period_us)
{
return tg_set_cfs_period(css_tg(css), cfs_period_us);
}
static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return tg_get_cfs_burst(css_tg(css));
}
static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 cfs_burst_us)
{
return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
}
struct cfs_schedulable_data {
struct task_group *tg;
u64 period, quota;
};
/*
* normalize group quota/period to be quota/max_period
* note: units are usecs
*/
static u64 normalize_cfs_quota(struct task_group *tg,
struct cfs_schedulable_data *d)
{
u64 quota, period;
if (tg == d->tg) {
period = d->period;
quota = d->quota;
} else {
period = tg_get_cfs_period(tg);
quota = tg_get_cfs_quota(tg);
}
/* note: these should typically be equivalent */
if (quota == RUNTIME_INF || quota == -1)
return RUNTIME_INF;
return to_ratio(period, quota);
}
static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
{
struct cfs_schedulable_data *d = data;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
s64 quota = 0, parent_quota = -1;
if (!tg->parent) {
quota = RUNTIME_INF;
} else {
struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
quota = normalize_cfs_quota(tg, d);
parent_quota = parent_b->hierarchical_quota;
/*
* Ensure max(child_quota) <= parent_quota. On cgroup2,
* always take the non-RUNTIME_INF min. On cgroup1, only
* inherit when no limit is set. In both cases this is used
* by the scheduler to determine if a given CFS task has a
* bandwidth constraint at some higher level.
*/
if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
if (quota == RUNTIME_INF)
quota = parent_quota;
else if (parent_quota != RUNTIME_INF)
quota = min(quota, parent_quota);
} else {
if (quota == RUNTIME_INF)
quota = parent_quota;
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
return -EINVAL;
}
}
cfs_b->hierarchical_quota = quota;
return 0;
}
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
.quota = quota,
};
if (quota != RUNTIME_INF) {
do_div(data.period, NSEC_PER_USEC);
do_div(data.quota, NSEC_PER_USEC);
}
rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
{
struct task_group *tg = css_tg(seq_css(sf));
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
if (schedstat_enabled() && tg != &root_task_group) {
struct sched_statistics *stats;
u64 ws = 0;
int i;
for_each_possible_cpu(i) {
stats = __schedstats_from_se(tg->se[i]);
ws += schedstat_val(stats->wait_sum);
}
seq_printf(sf, "wait_sum %llu\n", ws);
}
seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
return 0;
}
static u64 throttled_time_self(struct task_group *tg)
{
int i;
u64 total = 0;
for_each_possible_cpu(i) {
total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
}
return total;
}
static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
{
struct task_group *tg = css_tg(seq_css(sf));
seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
struct cftype *cft, s64 val)
{
return sched_group_set_rt_runtime(css_tg(css), val);
}
static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return sched_group_rt_runtime(css_tg(css));
}
static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 rt_period_us)
{
return sched_group_set_rt_period(css_tg(css), rt_period_us);
}
static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return sched_group_rt_period(css_tg(css));
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return css_tg(css)->idle;
}
static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 idle)
{
return sched_group_set_idle(css_tg(css), idle);
}
#endif
static struct cftype cpu_legacy_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
.write_u64 = cpu_shares_write_u64,
},
{
.name = "idle",
.read_s64 = cpu_idle_read_s64,
.write_s64 = cpu_idle_write_s64,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
{
.name = "cfs_quota_us",
.read_s64 = cpu_cfs_quota_read_s64,
.write_s64 = cpu_cfs_quota_write_s64,
},
{
.name = "cfs_period_us",
.read_u64 = cpu_cfs_period_read_u64,
.write_u64 = cpu_cfs_period_write_u64,
},
{
.name = "cfs_burst_us",
.read_u64 = cpu_cfs_burst_read_u64,
.write_u64 = cpu_cfs_burst_write_u64,
},
{
.name = "stat",
.seq_show = cpu_cfs_stat_show,
},
{
.name = "stat.local",
.seq_show = cpu_cfs_local_stat_show,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
.name = "rt_runtime_us",
.read_s64 = cpu_rt_runtime_read,
.write_s64 = cpu_rt_runtime_write,
},
{
.name = "rt_period_us",
.read_u64 = cpu_rt_period_read_uint,
.write_u64 = cpu_rt_period_write_uint,
},
#endif
#ifdef CONFIG_UCLAMP_TASK_GROUP
{
.name = "uclamp.min",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_uclamp_min_show,
.write = cpu_uclamp_min_write,
},
{
.name = "uclamp.max",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_uclamp_max_show,
.write = cpu_uclamp_max_write,
},
#endif
{ } /* Terminate */
};
static int cpu_extra_stat_show(struct seq_file *sf,
struct cgroup_subsys_state *css)
{
#ifdef CONFIG_CFS_BANDWIDTH
{
struct task_group *tg = css_tg(css);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
u64 throttled_usec, burst_usec;
throttled_usec = cfs_b->throttled_time;
do_div(throttled_usec, NSEC_PER_USEC);
burst_usec = cfs_b->burst_time;
do_div(burst_usec, NSEC_PER_USEC);
seq_printf(sf, "nr_periods %d\n"
"nr_throttled %d\n"
"throttled_usec %llu\n"
"nr_bursts %d\n"
"burst_usec %llu\n",
cfs_b->nr_periods, cfs_b->nr_throttled,
throttled_usec, cfs_b->nr_burst, burst_usec);
}
#endif
return 0;
}
static int cpu_local_stat_show(struct seq_file *sf,
struct cgroup_subsys_state *css)
{
#ifdef CONFIG_CFS_BANDWIDTH
{
struct task_group *tg = css_tg(css);
u64 throttled_self_usec;
throttled_self_usec = throttled_time_self(tg);
do_div(throttled_self_usec, NSEC_PER_USEC);
seq_printf(sf, "throttled_usec %llu\n",
throttled_self_usec);
}
#endif
return 0;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct task_group *tg = css_tg(css);
u64 weight = scale_load_down(tg->shares);
return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
}
static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 weight)
{
/*
* cgroup weight knobs should use the common MIN, DFL and MAX
* values which are 1, 100 and 10000 respectively. While it loses
* a bit of range on both ends, it maps pretty well onto the shares
* value used by scheduler and the round-trip conversions preserve
* the original value over the entire range.
*/
if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
return -ERANGE;
weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
return sched_group_set_shares(css_tg(css), scale_load(weight));
}
static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
unsigned long weight = scale_load_down(css_tg(css)->shares);
int last_delta = INT_MAX;
int prio, delta;
/* find the closest nice value to the current weight */
for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
delta = abs(sched_prio_to_weight[prio] - weight);
if (delta >= last_delta)
break;
last_delta = delta;
}
return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
}
static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 nice)
{
unsigned long weight;
int idx;
if (nice < MIN_NICE || nice > MAX_NICE)
return -ERANGE;
idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
idx = array_index_nospec(idx, 40);
weight = sched_prio_to_weight[idx];
return sched_group_set_shares(css_tg(css), scale_load(weight));
}
#endif
static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
long period, long quota)
{
if (quota < 0)
seq_puts(sf, "max");
else
seq_printf(sf, "%ld", quota);
seq_printf(sf, " %ld\n", period);
}
/* caller should put the current value in *@periodp before calling */
static int __maybe_unused cpu_period_quota_parse(char *buf,
u64 *periodp, u64 *quotap)
{
char tok[21]; /* U64_MAX */
if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
return -EINVAL;
*periodp *= NSEC_PER_USEC;
if (sscanf(tok, "%llu", quotap))
*quotap *= NSEC_PER_USEC;
else if (!strcmp(tok, "max"))
*quotap = RUNTIME_INF;
else
return -EINVAL;
return 0;
}
#ifdef CONFIG_CFS_BANDWIDTH
static int cpu_max_show(struct seq_file *sf, void *v)
{
struct task_group *tg = css_tg(seq_css(sf));
cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
return 0;
}
static ssize_t cpu_max_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct task_group *tg = css_tg(of_css(of));
u64 period = tg_get_cfs_period(tg);
u64 burst = tg_get_cfs_burst(tg);
u64 quota;
int ret;
ret = cpu_period_quota_parse(buf, &period, "a);
if (!ret)
ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
return ret ?: nbytes;
}
#endif
static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "weight",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = cpu_weight_read_u64,
.write_u64 = cpu_weight_write_u64,
},
{
.name = "weight.nice",
.flags = CFTYPE_NOT_ON_ROOT,
.read_s64 = cpu_weight_nice_read_s64,
.write_s64 = cpu_weight_nice_write_s64,
},
{
.name = "idle",
.flags = CFTYPE_NOT_ON_ROOT,
.read_s64 = cpu_idle_read_s64,
.write_s64 = cpu_idle_write_s64,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
{
.name = "max",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_max_show,
.write = cpu_max_write,
},
{
.name = "max.burst",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = cpu_cfs_burst_read_u64,
.write_u64 = cpu_cfs_burst_write_u64,
},
#endif
#ifdef CONFIG_UCLAMP_TASK_GROUP
{
.name = "uclamp.min",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_uclamp_min_show,
.write = cpu_uclamp_min_write,
},
{
.name = "uclamp.max",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_uclamp_max_show,
.write = cpu_uclamp_max_write,
},
#endif
{ } /* terminate */
};
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
.css_online = cpu_cgroup_css_online,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.css_extra_stat_show = cpu_extra_stat_show,
.css_local_stat_show = cpu_local_stat_show,
#ifdef CONFIG_RT_GROUP_SCHED
.can_attach = cpu_cgroup_can_attach,
#endif
.attach = cpu_cgroup_attach,
.legacy_cftypes = cpu_legacy_files,
.dfl_cftypes = cpu_files,
.early_init = true,
.threaded = true,
};
#endif /* CONFIG_CGROUP_SCHED */
void dump_cpu_task(int cpu)
{
if (cpu == smp_processor_id() && in_hardirq()) {
struct pt_regs *regs;
regs = get_irq_regs();
if (regs) {
show_regs(regs);
return;
}
}
if (trigger_single_cpu_backtrace(cpu))
return;
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
/*
* Nice levels are multiplicative, with a gentle 10% change for every
* nice level changed. I.e. when a CPU-bound task goes from nice 0 to
* nice 1, it will get ~10% less CPU time than another CPU-bound task
* that remained on nice 0.
*
* The "10% effect" is relative and cumulative: from _any_ nice level,
* if you go up 1 level, it's -10% CPU usage, if you go down 1 level
* it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
* If a task goes up by ~10% and another task goes down by ~10% then
* the relative distance between them is ~25%.)
*/
const int sched_prio_to_weight[40] = {
/* -20 */ 88761, 71755, 56483, 46273, 36291,
/* -15 */ 29154, 23254, 18705, 14949, 11916,
/* -10 */ 9548, 7620, 6100, 4904, 3906,
/* -5 */ 3121, 2501, 1991, 1586, 1277,
/* 0 */ 1024, 820, 655, 526, 423,
/* 5 */ 335, 272, 215, 172, 137,
/* 10 */ 110, 87, 70, 56, 45,
/* 15 */ 36, 29, 23, 18, 15,
};
/*
* Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
*
* In cases where the weight does not change often, we can use the
* precalculated inverse to speed up arithmetics by turning divisions
* into multiplications:
*/
const u32 sched_prio_to_wmult[40] = {
/* -20 */ 48388, 59856, 76040, 92818, 118348,
/* -15 */ 147320, 184698, 229616, 287308, 360437,
/* -10 */ 449829, 563644, 704093, 875809, 1099582,
/* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
/* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
/* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
void call_trace_sched_update_nr_running(struct rq *rq, int count)
{
trace_sched_update_nr_running_tp(rq, count);
}
#ifdef CONFIG_SCHED_MM_CID
/*
* @cid_lock: Guarantee forward-progress of cid allocation.
*
* Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
* is only used when contention is detected by the lock-free allocation so
* forward progress can be guaranteed.
*/
DEFINE_RAW_SPINLOCK(cid_lock);
/*
* @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
*
* When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
* detected, it is set to 1 to ensure that all newly coming allocations are
* serialized by @cid_lock until the allocation which detected contention
* completes and sets @use_cid_lock back to 0. This guarantees forward progress
* of a cid allocation.
*/
int use_cid_lock;
/*
* mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
* concurrently with respect to the execution of the source runqueue context
* switch.
*
* There is one basic properties we want to guarantee here:
*
* (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
* used by a task. That would lead to concurrent allocation of the cid and
* userspace corruption.
*
* Provide this guarantee by introducing a Dekker memory ordering to guarantee
* that a pair of loads observe at least one of a pair of stores, which can be
* shown as:
*
* X = Y = 0
*
* w[X]=1 w[Y]=1
* MB MB
* r[Y]=y r[X]=x
*
* Which guarantees that x==0 && y==0 is impossible. But rather than using
* values 0 and 1, this algorithm cares about specific state transitions of the
* runqueue current task (as updated by the scheduler context switch), and the
* per-mm/cpu cid value.
*
* Let's introduce task (Y) which has task->mm == mm and task (N) which has
* task->mm != mm for the rest of the discussion. There are two scheduler state
* transitions on context switch we care about:
*
* (TSA) Store to rq->curr with transition from (N) to (Y)
*
* (TSB) Store to rq->curr with transition from (Y) to (N)
*
* On the remote-clear side, there is one transition we care about:
*
* (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
*
* There is also a transition to UNSET state which can be performed from all
* sides (scheduler, remote-clear). It is always performed with a cmpxchg which
* guarantees that only a single thread will succeed:
*
* (TMB) cmpxchg to *pcpu_cid to mark UNSET
*
* Just to be clear, what we do _not_ want to happen is a transition to UNSET
* when a thread is actively using the cid (property (1)).
*
* Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
*
* Scenario A) (TSA)+(TMA) (from next task perspective)
*
* CPU0 CPU1
*
* Context switch CS-1 Remote-clear
* - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
* (implied barrier after cmpxchg)
* - switch_mm_cid()
* - memory barrier (see switch_mm_cid()
* comment explaining how this barrier
* is combined with other scheduler
* barriers)
* - mm_cid_get (next)
* - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
*
* This Dekker ensures that either task (Y) is observed by the
* rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
* observed.
*
* If task (Y) store is observed by rcu_dereference(), it means that there is
* still an active task on the cpu. Remote-clear will therefore not transition
* to UNSET, which fulfills property (1).
*
* If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
* it will move its state to UNSET, which clears the percpu cid perhaps
* uselessly (which is not an issue for correctness). Because task (Y) is not
* observed, CPU1 can move ahead to set the state to UNSET. Because moving
* state to UNSET is done with a cmpxchg expecting that the old state has the
* LAZY flag set, only one thread will successfully UNSET.
*
* If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
* will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
* CPU1 will observe task (Y) and do nothing more, which is fine.
*
* What we are effectively preventing with this Dekker is a scenario where
* neither LAZY flag nor store (Y) are observed, which would fail property (1)
* because this would UNSET a cid which is actively used.
*/
void sched_mm_cid_migrate_from(struct task_struct *t)
{
t->migrate_from_cpu = task_cpu(t);
}
static
int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
struct task_struct *t,
struct mm_cid *src_pcpu_cid)
{
struct mm_struct *mm = t->mm;
struct task_struct *src_task;
int src_cid, last_mm_cid;
if (!mm)
return -1;
last_mm_cid = t->last_mm_cid;
/*
* If the migrated task has no last cid, or if the current
* task on src rq uses the cid, it means the source cid does not need
* to be moved to the destination cpu.
*/
if (last_mm_cid == -1)
return -1;
src_cid = READ_ONCE(src_pcpu_cid->cid);
if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
return -1;
/*
* If we observe an active task using the mm on this rq, it means we
* are not the last task to be migrated from this cpu for this mm, so
* there is no need to move src_cid to the destination cpu.
*/
rcu_read_lock();
src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock();
t->last_mm_cid = -1;
return -1;
}
rcu_read_unlock();
return src_cid;
}
static
int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
struct task_struct *t,
struct mm_cid *src_pcpu_cid,
int src_cid)
{
struct task_struct *src_task;
struct mm_struct *mm = t->mm;
int lazy_cid;
if (src_cid == -1)
return -1;
/*
* Attempt to clear the source cpu cid to move it to the destination
* cpu.
*/
lazy_cid = mm_cid_set_lazy_put(src_cid);
if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
return -1;
/*
* The implicit barrier after cmpxchg per-mm/cpu cid before loading
* rq->curr->mm matches the scheduler barrier in context_switch()
* between store to rq->curr and load of prev and next task's
* per-mm/cpu cid.
*
* The implicit barrier after cmpxchg per-mm/cpu cid before loading
* rq->curr->mm_cid_active matches the barrier in
* sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
* sched_mm_cid_after_execve() between store to t->mm_cid_active and
* load of per-mm/cpu cid.
*/
/*
* If we observe an active task using the mm on this rq after setting
* the lazy-put flag, this task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
rcu_read_lock();
src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock();
/*
* We observed an active task for this mm, there is therefore
* no point in moving this cid to the destination cpu.
*/
t->last_mm_cid = -1;
return -1;
}
rcu_read_unlock();
/*
* The src_cid is unused, so it can be unset.
*/
if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
return -1;
return src_cid;
}
/*
* Migration to dst cpu. Called with dst_rq lock held.
* Interrupts are disabled, which keeps the window of cid ownership without the
* source rq lock held small.
*/
void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
{
struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
struct mm_struct *mm = t->mm;
int src_cid, dst_cid, src_cpu;
struct rq *src_rq;
lockdep_assert_rq_held(dst_rq);
if (!mm)
return;
src_cpu = t->migrate_from_cpu;
if (src_cpu == -1) {
t->last_mm_cid = -1;
return;
}
/*
* Move the src cid if the dst cid is unset. This keeps id
* allocation closest to 0 in cases where few threads migrate around
* many cpus.
*
* If destination cid is already set, we may have to just clear
* the src cid to ensure compactness in frequent migrations
* scenarios.
*
* It is not useful to clear the src cid when the number of threads is
* greater or equal to the number of allowed cpus, because user-space
* can expect that the number of allowed cids can reach the number of
* allowed cpus.
*/
dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
dst_cid = READ_ONCE(dst_pcpu_cid->cid);
if (!mm_cid_is_unset(dst_cid) &&
atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
return;
src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
src_rq = cpu_rq(src_cpu);
src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
if (src_cid == -1)
return;
src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
src_cid);
if (src_cid == -1)
return;
if (!mm_cid_is_unset(dst_cid)) {
__mm_cid_put(mm, src_cid);
return;
}
/* Move src_cid to dst cpu. */
mm_cid_snapshot_time(dst_rq, mm);
WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
}
static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *t;
unsigned long flags;
int cid, lazy_cid;
cid = READ_ONCE(pcpu_cid->cid);
if (!mm_cid_is_valid(cid))
return;
/*
* Clear the cpu cid if it is set to keep cid allocation compact. If
* there happens to be other tasks left on the source cpu using this
* mm, the next task using this mm will reallocate its cid on context
* switch.
*/
lazy_cid = mm_cid_set_lazy_put(cid);
if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
return;
/*
* The implicit barrier after cmpxchg per-mm/cpu cid before loading
* rq->curr->mm matches the scheduler barrier in context_switch()
* between store to rq->curr and load of prev and next task's
* per-mm/cpu cid.
*
* The implicit barrier after cmpxchg per-mm/cpu cid before loading
* rq->curr->mm_cid_active matches the barrier in
* sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
* sched_mm_cid_after_execve() between store to t->mm_cid_active and
* load of per-mm/cpu cid.
*/
/*
* If we observe an active task using the mm on this rq after setting
* the lazy-put flag, that task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
rcu_read_lock();
t = rcu_dereference(rq->curr);
if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
/*
* The cid is unused, so it can be unset.
* Disable interrupts to keep the window of cid ownership without rq
* lock small.
*/
local_irq_save(flags);
if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
__mm_cid_put(mm, cid);
local_irq_restore(flags);
}
static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct mm_cid *pcpu_cid;
struct task_struct *curr;
u64 rq_clock;
/*
* rq->clock load is racy on 32-bit but one spurious clear once in a
* while is irrelevant.
*/
rq_clock = READ_ONCE(rq->clock);
pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
/*
* In order to take care of infrequently scheduled tasks, bump the time
* snapshot associated with this cid if an active task using the mm is
* observed on this rq.
*/
rcu_read_lock();
curr = rcu_dereference(rq->curr);
if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
WRITE_ONCE(pcpu_cid->time, rq_clock);
rcu_read_unlock();
return;
}
rcu_read_unlock();
if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
return;
sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
}
static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
int weight)
{
struct mm_cid *pcpu_cid;
int cid;
pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
cid = READ_ONCE(pcpu_cid->cid);
if (!mm_cid_is_valid(cid) || cid < weight)
return;
sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
}
static void task_mm_cid_work(struct callback_head *work)
{
unsigned long now = jiffies, old_scan, next_scan;
struct task_struct *t = current;
struct cpumask *cidmask;
struct mm_struct *mm;
int weight, cpu;
SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
work->next = work; /* Prevent double-add */
if (t->flags & PF_EXITING)
return;
mm = t->mm;
if (!mm)
return;
old_scan = READ_ONCE(mm->mm_cid_next_scan);
next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
if (!old_scan) {
unsigned long res;
res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
if (res != old_scan)
old_scan = res;
else
old_scan = next_scan;
}
if (time_before(now, old_scan))
return;
if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
return;
cidmask = mm_cidmask(mm);
/* Clear cids that were not recently used. */
for_each_possible_cpu(cpu)
sched_mm_cid_remote_clear_old(mm, cpu);
weight = cpumask_weight(cidmask);
/*
* Clear cids that are greater or equal to the cidmask weight to
* recompact it.
*/
for_each_possible_cpu(cpu)
sched_mm_cid_remote_clear_weight(mm, cpu, weight);
}
void init_sched_mm_cid(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
int mm_users = 0;
if (mm) {
mm_users = atomic_read(&mm->mm_users);
if (mm_users == 1)
mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
}
t->cid_work.next = &t->cid_work; /* Protect against double add */
init_task_work(&t->cid_work, task_mm_cid_work);
}
void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->cid_work;
unsigned long now = jiffies;
if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
work->next != work)
return;
if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
return;
task_work_add(curr, work, TWA_RESUME);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
return;
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
* Store t->mm_cid_active before loading per-mm/cpu cid.
* Matches barrier in sched_mm_cid_remote_clear_old().
*/
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_before_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
return;
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
* Store t->mm_cid_active before loading per-mm/cpu cid.
* Matches barrier in sched_mm_cid_remote_clear_old().
*/
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_after_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
return;
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 1);
/*
* Store t->mm_cid_active before loading per-mm/cpu cid.
* Matches barrier in sched_mm_cid_remote_clear_old().
*/
smp_mb();
t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
rq_unlock_irqrestore(rq, &rf);
rseq_set_notify_resume(t);
}
void sched_mm_cid_fork(struct task_struct *t)
{
WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
t->mm_cid_active = 1;
}
#endif
| linux-master | kernel/sched/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/debug.c
*
* Print the CFS rbtree and other debugging details
*
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*/
/*
* This allows printing both to /proc/sched_debug and
* to the console
*/
#define SEQ_printf(m, x...) \
do { \
if (m) \
seq_printf(m, x); \
else \
pr_cont(x); \
} while (0)
/*
* Ease the printing of nsec fields:
*/
static long long nsec_high(unsigned long long nsec)
{
if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
}
do_div(nsec, 1000000);
return nsec;
}
static unsigned long nsec_low(unsigned long long nsec)
{
if ((long long)nsec < 0)
nsec = -nsec;
return do_div(nsec, 1000000);
}
#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
#define SCHED_FEAT(name, enabled) \
#name ,
static const char * const sched_feat_names[] = {
#include "features.h"
};
#undef SCHED_FEAT
static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
}
seq_puts(m, "\n");
return 0;
}
#ifdef CONFIG_JUMP_LABEL
#define jump_label_key__true STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
#define SCHED_FEAT(name, enabled) \
jump_label_key__##enabled ,
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
#undef SCHED_FEAT
static void sched_feat_disable(int i)
{
static_key_disable_cpuslocked(&sched_feat_keys[i]);
}
static void sched_feat_enable(int i)
{
static_key_enable_cpuslocked(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* CONFIG_JUMP_LABEL */
static int sched_feat_set(char *cmp)
{
int i;
int neg = 0;
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
cmp += 3;
}
i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
if (i < 0)
return i;
if (neg) {
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
return 0;
}
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int ret;
struct inode *inode;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
/* Ensure the static_key remains in a consistent state */
inode = file_inode(filp);
cpus_read_lock();
inode_lock(inode);
ret = sched_feat_set(cmp);
inode_unlock(inode);
cpus_read_unlock();
if (ret < 0)
return ret;
*ppos += cnt;
return cnt;
}
static int sched_feat_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_feat_show, NULL);
}
static const struct file_operations sched_feat_fops = {
.open = sched_feat_open,
.write = sched_feat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef CONFIG_SMP
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[16];
unsigned int scaling;
if (cnt > 15)
cnt = 15;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
if (kstrtouint(buf, 10, &scaling))
return -EINVAL;
if (scaling >= SCHED_TUNABLESCALING_END)
return -EINVAL;
sysctl_sched_tunable_scaling = scaling;
if (sched_update_scaling())
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_scaling_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
return 0;
}
static int sched_scaling_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_scaling_show, NULL);
}
static const struct file_operations sched_scaling_fops = {
.open = sched_scaling_open,
.write = sched_scaling_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* SMP */
#ifdef CONFIG_PREEMPT_DYNAMIC
static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[16];
int mode;
if (cnt > 15)
cnt = 15;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
mode = sched_dynamic_mode(strstrip(buf));
if (mode < 0)
return mode;
sched_dynamic_update(mode);
*ppos += cnt;
return cnt;
}
static int sched_dynamic_show(struct seq_file *m, void *v)
{
static const char * preempt_modes[] = {
"none", "voluntary", "full"
};
int i;
for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
if (preempt_dynamic_mode == i)
seq_puts(m, "(");
seq_puts(m, preempt_modes[i]);
if (preempt_dynamic_mode == i)
seq_puts(m, ")");
seq_puts(m, " ");
}
seq_puts(m, "\n");
return 0;
}
static int sched_dynamic_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_dynamic_show, NULL);
}
static const struct file_operations sched_dynamic_fops = {
.open = sched_dynamic_open,
.write = sched_dynamic_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PREEMPT_DYNAMIC */
__read_mostly bool sched_debug_verbose;
#ifdef CONFIG_SMP
static struct dentry *sd_dentry;
static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
ssize_t result;
bool orig;
cpus_read_lock();
mutex_lock(&sched_domains_mutex);
orig = sched_debug_verbose;
result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
if (sched_debug_verbose && !orig)
update_sched_domain_debugfs();
else if (!sched_debug_verbose && orig) {
debugfs_remove(sd_dentry);
sd_dentry = NULL;
}
mutex_unlock(&sched_domains_mutex);
cpus_read_unlock();
return result;
}
#else
#define sched_verbose_write debugfs_write_file_bool
#endif
static const struct file_operations sched_verbose_fops = {
.read = debugfs_read_file_bool,
.write = sched_verbose_write,
.open = simple_open,
.llseek = default_llseek,
};
static const struct seq_operations sched_debug_sops;
static int sched_debug_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &sched_debug_sops);
}
static const struct file_operations sched_debug_fops = {
.open = sched_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct dentry *debugfs_sched;
static __init int sched_init_debug(void)
{
struct dentry __maybe_unused *numa;
debugfs_sched = debugfs_create_dir("sched", NULL);
debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
#ifdef CONFIG_PREEMPT_DYNAMIC
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif
debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
mutex_lock(&sched_domains_mutex);
update_sched_domain_debugfs();
mutex_unlock(&sched_domains_mutex);
#endif
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
#endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
return 0;
}
late_initcall(sched_init_debug);
#ifdef CONFIG_SMP
static cpumask_var_t sd_sysctl_cpus;
static int sd_flags_show(struct seq_file *m, void *v)
{
unsigned long flags = *(unsigned int *)m->private;
int idx;
for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
seq_puts(m, sd_flag_debug[idx].name);
seq_puts(m, " ");
}
seq_puts(m, "\n");
return 0;
}
static int sd_flags_open(struct inode *inode, struct file *file)
{
return single_open(file, sd_flags_show, inode->i_private);
}
static const struct file_operations sd_flags_fops = {
.open = sd_flags_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void register_sd(struct sched_domain *sd, struct dentry *parent)
{
#define SDM(type, mode, member) \
debugfs_create_##type(#member, mode, parent, &sd->member)
SDM(ulong, 0644, min_interval);
SDM(ulong, 0644, max_interval);
SDM(u64, 0644, max_newidle_lb_cost);
SDM(u32, 0644, busy_factor);
SDM(u32, 0644, imbalance_pct);
SDM(u32, 0644, cache_nice_tries);
SDM(str, 0444, name);
#undef SDM
debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
}
void update_sched_domain_debugfs(void)
{
int cpu, i;
/*
* This can unfortunately be invoked before sched_debug_init() creates
* the debug directory. Don't touch sd_sysctl_cpus until then.
*/
if (!debugfs_sched)
return;
if (!sched_debug_verbose)
return;
if (!cpumask_available(sd_sysctl_cpus)) {
if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
return;
cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
}
if (!sd_dentry) {
sd_dentry = debugfs_create_dir("domains", debugfs_sched);
/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
if (cpumask_empty(sd_sysctl_cpus))
cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
}
for_each_cpu(cpu, sd_sysctl_cpus) {
struct sched_domain *sd;
struct dentry *d_cpu;
char buf[32];
snprintf(buf, sizeof(buf), "cpu%d", cpu);
debugfs_lookup_and_remove(buf, sd_dentry);
d_cpu = debugfs_create_dir(buf, sd_dentry);
i = 0;
for_each_domain(cpu, sd) {
struct dentry *d_sd;
snprintf(buf, sizeof(buf), "domain%d", i);
d_sd = debugfs_create_dir(buf, d_cpu);
register_sd(sd, d_sd);
i++;
}
__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
}
}
void dirty_sched_domain_sysctl(int cpu)
{
if (cpumask_available(sd_sysctl_cpus))
__cpumask_set_cpu(cpu, sd_sysctl_cpus);
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
struct sched_entity *se = tg->se[cpu];
#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
#F, (long long)schedstat_val(stats->F))
#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
#F, SPLIT_NS((long long)schedstat_val(stats->F)))
if (!se)
return;
PN(se->exec_start);
PN(se->vruntime);
PN(se->sum_exec_runtime);
if (schedstat_enabled()) {
struct sched_statistics *stats;
stats = __schedstats_from_se(se);
PN_SCHEDSTAT(wait_start);
PN_SCHEDSTAT(sleep_start);
PN_SCHEDSTAT(block_start);
PN_SCHEDSTAT(sleep_max);
PN_SCHEDSTAT(block_max);
PN_SCHEDSTAT(exec_max);
PN_SCHEDSTAT(slice_max);
PN_SCHEDSTAT(wait_max);
PN_SCHEDSTAT(wait_sum);
P_SCHEDSTAT(wait_count);
}
P(se->load.weight);
#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
P(se->avg.runnable_avg);
#endif
#undef PN_SCHEDSTAT
#undef PN
#undef P_SCHEDSTAT
#undef P
}
#endif
#ifdef CONFIG_CGROUP_SCHED
static DEFINE_SPINLOCK(sched_debug_lock);
static char group_path[PATH_MAX];
static void task_group_path(struct task_group *tg, char *path, int plen)
{
if (autogroup_path(tg, path, plen))
return;
cgroup_path(tg->css.cgroup, path, plen);
}
/*
* Only 1 SEQ_printf_task_group_path() caller can use the full length
* group_path[] for cgroup path. Other simultaneous callers will have
* to use a shorter stack buffer. A "..." suffix is appended at the end
* of the stack buffer so that it will show up in case the output length
* matches the given buffer size to indicate possible path name truncation.
*/
#define SEQ_printf_task_group_path(m, tg, fmt...) \
{ \
if (spin_trylock(&sched_debug_lock)) { \
task_group_path(tg, group_path, sizeof(group_path)); \
SEQ_printf(m, fmt, group_path); \
spin_unlock(&sched_debug_lock); \
} else { \
char buf[128]; \
char *bufend = buf + sizeof(buf) - 3; \
task_group_path(tg, buf, bufend - buf); \
strcpy(bufend - 1, "..."); \
SEQ_printf(m, fmt, buf); \
} \
}
#endif
static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
if (task_current(rq, p))
SEQ_printf(m, ">R");
else
SEQ_printf(m, " %c", task_state_to_char(p));
SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
SPLIT_NS(p->se.deadline),
SPLIT_NS(p->se.slice),
SPLIT_NS(p->se.sum_exec_runtime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
#ifdef CONFIG_CGROUP_SCHED
SEQ_printf_task_group_path(m, task_group(p), " %s")
#endif
SEQ_printf(m, "\n");
}
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
struct task_struct *g, *p;
SEQ_printf(m, "\n");
SEQ_printf(m, "runnable tasks:\n");
SEQ_printf(m, " S task PID tree-key switches prio"
" wait-time sum-exec sum-sleep\n");
SEQ_printf(m, "-------------------------------------------------------"
"------------------------------------------------------\n");
rcu_read_lock();
for_each_process_thread(g, p) {
if (task_cpu(p) != rq_cpu)
continue;
print_task(m, rq, p);
}
rcu_read_unlock();
}
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, spread;
struct sched_entity *last, *first;
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, "\n");
SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
#else
SEQ_printf(m, "\n");
SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
#endif
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags);
first = __pick_first_entity(cfs_rq);
if (first)
left_vruntime = first->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
right_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
raw_spin_rq_unlock_irqrestore(rq, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
SPLIT_NS(left_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
SPLIT_NS(min_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
SPLIT_NS(avg_vruntime(cfs_rq)));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
cfs_rq->idle_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
cfs_rq->idle_h_nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
cfs_rq->avg.runnable_avg);
SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
cfs_rq->avg.util_avg);
SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
cfs_rq->avg.util_est.enqueued);
SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
cfs_rq->removed.load_avg);
SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
cfs_rq->removed.util_avg);
SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
cfs_rq->removed.runnable_avg);
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
cfs_rq->tg_load_avg_contrib);
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
#endif
#endif
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
cfs_rq->throttle_count);
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(m, cpu, cfs_rq->tg);
#endif
}
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
#ifdef CONFIG_RT_GROUP_SCHED
SEQ_printf(m, "\n");
SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
#else
SEQ_printf(m, "\n");
SEQ_printf(m, "rt_rq[%d]:\n", cpu);
#endif
#define P(x) \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
#define PU(x) \
SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
#define PN(x) \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
PU(rt_nr_running);
#ifdef CONFIG_SMP
PU(rt_nr_migratory);
#endif
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
#undef PN
#undef PU
#undef P
}
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{
struct dl_bw *dl_bw;
SEQ_printf(m, "\n");
SEQ_printf(m, "dl_rq[%d]:\n", cpu);
#define PU(x) \
SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
PU(dl_nr_running);
#ifdef CONFIG_SMP
PU(dl_nr_migratory);
dl_bw = &cpu_rq(cpu)->rd->dl_bw;
#else
dl_bw = &dl_rq->dl_bw;
#endif
SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
#undef PU
}
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
#ifdef CONFIG_X86
{
unsigned int freq = cpu_khz ? : 1;
SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
cpu, freq / 1000, (freq % 1000));
}
#else
SEQ_printf(m, "cpu#%d\n", cpu);
#endif
#define P(x) \
do { \
if (sizeof(rq->x) == 4) \
SEQ_printf(m, " .%-30s: %d\n", #x, (int)(rq->x)); \
else \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
} while (0)
#define PN(x) \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
P(nr_switches);
P(nr_uninterruptible);
PN(next_balance);
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
PN(clock_task);
#undef P
#undef PN
#ifdef CONFIG_SMP
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
P64(avg_idle);
P64(max_idle_balance_cost);
#undef P64
#endif
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
if (schedstat_enabled()) {
P(yld_count);
P(sched_count);
P(sched_goidle);
P(ttwu_count);
P(ttwu_local);
}
#undef P
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
print_dl_stats(m, cpu);
print_rq(m, rq, cpu);
SEQ_printf(m, "\n");
}
static const char *sched_tunable_scaling_names[] = {
"none",
"logarithmic",
"linear"
};
static void sched_debug_header(struct seq_file *m)
{
u64 ktime, sched_clk, cpu_clk;
unsigned long flags;
local_irq_save(flags);
ktime = ktime_to_ns(ktime_get());
sched_clk = sched_clock();
cpu_clk = local_clock();
local_irq_restore(flags);
SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
#define P(x) \
SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
PN(ktime);
PN(sched_clk);
PN(cpu_clk);
P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
P(sched_clock_stable());
#endif
#undef PN
#undef P
SEQ_printf(m, "\n");
SEQ_printf(m, "sysctl_sched\n");
#define P(x) \
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
PN(sysctl_sched_base_slice);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#undef PN
#undef P
SEQ_printf(m, " .%-40s: %d (%s)\n",
"sysctl_sched_tunable_scaling",
sysctl_sched_tunable_scaling,
sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
SEQ_printf(m, "\n");
}
static int sched_debug_show(struct seq_file *m, void *v)
{
int cpu = (unsigned long)(v - 2);
if (cpu != -1)
print_cpu(m, cpu);
else
sched_debug_header(m);
return 0;
}
void sysrq_sched_debug_show(void)
{
int cpu;
sched_debug_header(NULL);
for_each_online_cpu(cpu) {
/*
* Need to reset softlockup watchdogs on all CPUs, because
* another CPU might be blocked waiting for us to process
* an IPI or stop_machine.
*/
touch_nmi_watchdog();
touch_all_softlockup_watchdogs();
print_cpu(NULL, cpu);
}
}
/*
* This iterator needs some explanation.
* It returns 1 for the header position.
* This means 2 is CPU 0.
* In a hotplugged system some CPUs, including CPU 0, may be missing so we have
* to use cpumask_* to iterate over the CPUs.
*/
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
unsigned long n = *offset;
if (n == 0)
return (void *) 1;
n--;
if (n > 0)
n = cpumask_next(n - 1, cpu_online_mask);
else
n = cpumask_first(cpu_online_mask);
*offset = n + 1;
if (n < nr_cpu_ids)
return (void *)(unsigned long)(n + 2);
return NULL;
}
static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
(*offset)++;
return sched_debug_start(file, offset);
}
static void sched_debug_stop(struct seq_file *file, void *data)
{
}
static const struct seq_operations sched_debug_sops = {
.start = sched_debug_start,
.next = sched_debug_next,
.stop = sched_debug_stop,
.show = sched_debug_show,
};
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
#define __P(F) __PS(#F, F)
#define P(F) __PS(#F, p->F)
#define PM(F, M) __PS(#F, p->F & (M))
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
#define __PN(F) __PSN(#F, F)
#define PN(F) __PSN(#F, p->F)
#ifdef CONFIG_NUMA_BALANCING
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
SEQ_printf(m, "numa_faults node=%d ", node);
SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
}
#endif
static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{
#ifdef CONFIG_NUMA_BALANCING
if (p->mm)
P(mm->numa_scan_seq);
P(numa_pages_migrated);
P(numa_preferred_nid);
P(total_numa_faults);
SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
task_node(p), task_numa_group_id(p));
show_numa_stats(p, m);
#endif
}
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
struct seq_file *m)
{
unsigned long nr_switches;
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
get_nr_threads(p));
SEQ_printf(m,
"---------------------------------------------------------"
"----------\n");
#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
nr_switches = p->nvcsw + p->nivcsw;
P(se.nr_migrations);
if (schedstat_enabled()) {
u64 avg_atom, avg_per_cpu;
PN_SCHEDSTAT(sum_sleep_runtime);
PN_SCHEDSTAT(sum_block_runtime);
PN_SCHEDSTAT(wait_start);
PN_SCHEDSTAT(sleep_start);
PN_SCHEDSTAT(block_start);
PN_SCHEDSTAT(sleep_max);
PN_SCHEDSTAT(block_max);
PN_SCHEDSTAT(exec_max);
PN_SCHEDSTAT(slice_max);
PN_SCHEDSTAT(wait_max);
PN_SCHEDSTAT(wait_sum);
P_SCHEDSTAT(wait_count);
PN_SCHEDSTAT(iowait_sum);
P_SCHEDSTAT(iowait_count);
P_SCHEDSTAT(nr_migrations_cold);
P_SCHEDSTAT(nr_failed_migrations_affine);
P_SCHEDSTAT(nr_failed_migrations_running);
P_SCHEDSTAT(nr_failed_migrations_hot);
P_SCHEDSTAT(nr_forced_migrations);
P_SCHEDSTAT(nr_wakeups);
P_SCHEDSTAT(nr_wakeups_sync);
P_SCHEDSTAT(nr_wakeups_migrate);
P_SCHEDSTAT(nr_wakeups_local);
P_SCHEDSTAT(nr_wakeups_remote);
P_SCHEDSTAT(nr_wakeups_affine);
P_SCHEDSTAT(nr_wakeups_affine_attempts);
P_SCHEDSTAT(nr_wakeups_passive);
P_SCHEDSTAT(nr_wakeups_idle);
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
avg_atom = div64_ul(avg_atom, nr_switches);
else
avg_atom = -1LL;
avg_per_cpu = p->se.sum_exec_runtime;
if (p->se.nr_migrations) {
avg_per_cpu = div64_u64(avg_per_cpu,
p->se.nr_migrations);
} else {
avg_per_cpu = -1LL;
}
__PN(avg_atom);
__PN(avg_per_cpu);
#ifdef CONFIG_SCHED_CORE
PN_SCHEDSTAT(core_forceidle_sum);
#endif
}
__P(nr_switches);
__PS("nr_voluntary_switches", p->nvcsw);
__PS("nr_involuntary_switches", p->nivcsw);
P(se.load.weight);
#ifdef CONFIG_SMP
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
P(se.avg.load_avg);
P(se.avg.runnable_avg);
P(se.avg.util_avg);
P(se.avg.last_update_time);
P(se.avg.util_est.ewma);
PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
#endif
P(policy);
P(prio);
if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
}
#undef PN_SCHEDSTAT
#undef P_SCHEDSTAT
{
unsigned int this_cpu = raw_smp_processor_id();
u64 t0, t1;
t0 = cpu_clock(this_cpu);
t1 = cpu_clock(this_cpu);
__PS("clock-delta", t1-t0);
}
sched_show_numa(p, m);
}
void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
memset(&p->stats, 0, sizeof(p->stats));
#endif
}
void resched_latency_warn(int cpu, u64 latency)
{
static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
WARN(__ratelimit(&latency_check_ratelimit),
"sched: CPU %d need_resched set for > %llu ns (%d ticks) "
"without schedule\n",
cpu, latency, cpu_rq(cpu)->ticks_without_resched);
}
| linux-master | kernel/sched/debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic wait-for-completion handler;
*
* It differs from semaphores in that their default case is the opposite,
* wait_for_completion default blocks whereas semaphore default non-block. The
* interface also makes it easy to 'complete' multiple waiting threads,
* something which isn't entirely natural for semaphores.
*
* But more importantly, the primitive documents the usage. Semaphores would
* typically be used for exclusion which gives rise to priority inversion.
* Waiting for completion is a typically sync point, but not an exclusion point.
*/
static void complete_with_flags(struct completion *x, int wake_flags)
{
unsigned long flags;
raw_spin_lock_irqsave(&x->wait.lock, flags);
if (x->done != UINT_MAX)
x->done++;
swake_up_locked(&x->wait, wake_flags);
raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
void complete_on_current_cpu(struct completion *x)
{
return complete_with_flags(x, WF_CURRENT_CPU);
}
/**
* complete: - signals a single thread waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up a single thread waiting on this completion. Threads will be
* awakened in the same order in which they were queued.
*
* See also complete_all(), wait_for_completion() and related routines.
*
* If this function wakes up a task, it executes a full memory barrier before
* accessing the task state.
*/
void complete(struct completion *x)
{
complete_with_flags(x, 0);
}
EXPORT_SYMBOL(complete);
/**
* complete_all: - signals all threads waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up all threads waiting on this particular completion event.
*
* If this function wakes up a task, it executes a full memory barrier before
* accessing the task state.
*
* Since complete_all() sets the completion of @x permanently to done
* to allow multiple waiters to finish, a call to reinit_completion()
* must be used on @x if @x is to be used again. The code must make
* sure that all waiters have woken and finished before reinitializing
* @x. Also note that the function completion_done() can not be used
* to know if there are still waiters after complete_all() has been called.
*/
void complete_all(struct completion *x)
{
unsigned long flags;
lockdep_assert_RT_in_threaded_ctx();
raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done = UINT_MAX;
swake_up_all_locked(&x->wait);
raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
static inline long __sched
do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
DECLARE_SWAITQUEUE(wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__prepare_to_swait(&x->wait, &wait);
__set_current_state(state);
raw_spin_unlock_irq(&x->wait.lock);
timeout = action(timeout);
raw_spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__finish_swait(&x->wait, &wait);
if (!x->done)
return timeout;
}
if (x->done != UINT_MAX)
x->done--;
return timeout ?: 1;
}
static inline long __sched
__wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
might_sleep();
complete_acquire(x);
raw_spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state);
raw_spin_unlock_irq(&x->wait.lock);
complete_release(x);
return timeout;
}
static long __sched
wait_for_common(struct completion *x, long timeout, int state)
{
return __wait_for_common(x, schedule_timeout, timeout, state);
}
static long __sched
wait_for_common_io(struct completion *x, long timeout, int state)
{
return __wait_for_common(x, io_schedule_timeout, timeout, state);
}
/**
* wait_for_completion: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout.
*
* See also similar routines (i.e. wait_for_completion_timeout()) with timeout
* and interrupt capability. Also see complete().
*/
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion);
/**
* wait_for_completion_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
* Return: 0 if timed out, and positive (at least 1, or number of jiffies left
* till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_timeout);
/**
* wait_for_completion_io: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout. The caller is accounted as waiting
* for IO (which traditionally means blkio only).
*/
void __sched wait_for_completion_io(struct completion *x)
{
wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_io);
/**
* wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO (which traditionally
* means blkio only).
*
* Return: 0 if timed out, and positive (at least 1, or number of jiffies left
* till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
{
return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_io_timeout);
/**
* wait_for_completion_interruptible: - waits for completion of a task (w/intr)
* @x: holds the state of this particular completion
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
* Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
/**
* wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
* Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
* or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
/**
* wait_for_completion_killable: - waits for completion of a task (killable)
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
* Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_killable);
int __sched wait_for_completion_state(struct completion *x, unsigned int state)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_state);
/**
* wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
* Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
* or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_KILLABLE);
}
EXPORT_SYMBOL(wait_for_completion_killable_timeout);
/**
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
* Return: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
* attempt to decrement the counter without blocking. This
* enables us to avoid waiting if the resource the completion
* is protecting is not available.
*/
bool try_wait_for_completion(struct completion *x)
{
unsigned long flags;
bool ret = true;
/*
* Since x->done will need to be locked only
* in the non-blocking case, we check x->done
* first without taking the lock so we can
* return early in the blocking case.
*/
if (!READ_ONCE(x->done))
return false;
raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = false;
else if (x->done != UINT_MAX)
x->done--;
raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
/**
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
* Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
* Note, this will always return true if complete_all() was called on @X.
*/
bool completion_done(struct completion *x)
{
unsigned long flags;
if (!READ_ONCE(x->done))
return false;
/*
* If ->done, we need to wait for complete() to release ->wait.lock
* otherwise we can end up freeing the completion before complete()
* is done referencing it.
*/
raw_spin_lock_irqsave(&x->wait.lock, flags);
raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return true;
}
EXPORT_SYMBOL(completion_done);
| linux-master | kernel/sched/completion.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* These are the scheduling policy related scheduler files, built
* in a single compilation unit for build efficiency reasons.
*
* ( Incidentally, the size of the compilation unit is roughly
* comparable to core.c and fair.c, the other two big
* compilation units. This helps balance build time, while
* coalescing source files to amortize header inclusion
* cost. )
*
* core.c and fair.c are built separately.
*/
/* Headers: */
#include <linux/sched/clock.h>
#include <linux/sched/cputime.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/posix-timers.h>
#include <linux/sched/rt.h>
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/livepatch.h>
#include <linux/psi.h>
#include <linux/seqlock_api.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/tsacct_kern.h>
#include <linux/vtime.h>
#include <uapi/linux/sched/types.h>
#include "sched.h"
#include "smp.h"
#include "autogroup.h"
#include "stats.h"
#include "pelt.h"
/* Source code modules: */
#include "idle.c"
#include "rt.c"
#ifdef CONFIG_SMP
# include "cpudeadline.c"
# include "pelt.c"
#endif
#include "cputime.c"
#include "deadline.c"
| linux-master | kernel/sched/build_policy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Scheduler code and data structures related to cpufreq.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
*/
DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
/**
* cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
* @cpu: The CPU to set the pointer for.
* @data: New pointer value.
* @func: Callback function to set for the CPU.
*
* Set and publish the update_util_data pointer for the given CPU.
*
* The update_util_data pointer of @cpu is set to @data and the callback
* function pointer in the target struct update_util_data is set to @func.
* That function will be called by cpufreq_update_util() from RCU-sched
* read-side critical sections, so it must not sleep. @data will always be
* passed to it as the first argument which allows the function to get to the
* target update_util_data structure and its container.
*
* The update_util_data pointer of @cpu must be NULL when this function is
* called or it will WARN() and return with no effect.
*/
void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags))
{
if (WARN_ON(!data || !func))
return;
if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
return;
data->func = func;
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
}
EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
/**
* cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
* @cpu: The CPU to clear the pointer for.
*
* Clear the update_util_data pointer for the given CPU.
*
* Callers must use RCU callbacks to free any memory that might be
* accessed via the old update_util_data pointer or invoke synchronize_rcu()
* right after this function to avoid use-after-free.
*/
void cpufreq_remove_update_util_hook(int cpu)
{
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
/**
* cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
* @policy: cpufreq policy to check.
*
* Return 'true' if:
* - the local and remote CPUs share @policy,
* - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
* offline (in which case it is not expected to run cpufreq updates any more).
*/
bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
{
return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
(policy->dvfs_possible_from_any_cpu &&
rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
}
| linux-master | kernel/sched/cpufreq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Per Entity Load Tracking
*
* Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <[email protected]>
*
* Interactivity improvements by Mike Galbraith
* (C) 2007 Mike Galbraith <[email protected]>
*
* Various enhancements by Dmitry Adamushko.
* (C) 2007 Dmitry Adamushko <[email protected]>
*
* Group scheduling enhancements by Srivatsa Vaddagiri
* Copyright IBM Corporation, 2007
* Author: Srivatsa Vaddagiri <[email protected]>
*
* Scaled math optimizations by Thomas Gleixner
* Copyright (C) 2007, Thomas Gleixner <[email protected]>
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* Move PELT related code from fair.c into this pelt.c file
* Author: Vincent Guittot <[email protected]>
*/
/*
* Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
*/
static u64 decay_load(u64 val, u64 n)
{
unsigned int local_n;
if (unlikely(n > LOAD_AVG_PERIOD * 63))
return 0;
/* after bounds checking we can collapse to 32-bit */
local_n = n;
/*
* As y^PERIOD = 1/2, we can combine
* y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
* With a look-up table which covers y^n (n<PERIOD)
*
* To achieve constant time decay_load.
*/
if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
val >>= local_n / LOAD_AVG_PERIOD;
local_n %= LOAD_AVG_PERIOD;
}
val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
return val;
}
static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
{
u32 c1, c2, c3 = d3; /* y^0 == 1 */
/*
* c1 = d1 y^p
*/
c1 = decay_load((u64)d1, periods);
/*
* p-1
* c2 = 1024 \Sum y^n
* n=1
*
* inf inf
* = 1024 ( \Sum y^n - \Sum y^n - y^0 )
* n=0 n=p
*/
c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
return c1 + c2 + c3;
}
/*
* Accumulate the three separate parts of the sum; d1 the remainder
* of the last (incomplete) period, d2 the span of full periods and d3
* the remainder of the (incomplete) current period.
*
* d1 d2 d3
* ^ ^ ^
* | | |
* |<->|<----------------->|<--->|
* ... |---x---|------| ... |------|-----x (now)
*
* p-1
* u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
* n=1
*
* = u y^p + (Step 1)
*
* p-1
* d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
* n=1
*/
static __always_inline u32
accumulate_sum(u64 delta, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
delta += sa->period_contrib;
periods = delta / 1024; /* A period is 1024us (~1ms) */
/*
* Step 1: decay old *_sum if we crossed period boundaries.
*/
if (periods) {
sa->load_sum = decay_load(sa->load_sum, periods);
sa->runnable_sum =
decay_load(sa->runnable_sum, periods);
sa->util_sum = decay_load((u64)(sa->util_sum), periods);
/*
* Step 2
*/
delta %= 1024;
if (load) {
/*
* This relies on the:
*
* if (!load)
* runnable = running = 0;
*
* clause from ___update_load_sum(); this results in
* the below usage of @contrib to disappear entirely,
* so no point in calculating it.
*/
contrib = __accumulate_pelt_segments(periods,
1024 - sa->period_contrib, delta);
}
}
sa->period_contrib = delta;
if (load)
sa->load_sum += load * contrib;
if (runnable)
sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
if (running)
sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
return periods;
}
/*
* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
* history into segments of approximately 1ms (1024us); label the segment that
* occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
*
* [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
* p0 p1 p2
* (now) (~1ms ago) (~2ms ago)
*
* Let u_i denote the fraction of p_i that the entity was runnable.
*
* We then designate the fractions u_i as our co-efficients, yielding the
* following representation of historical load:
* u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
*
* We choose y based on the with of a reasonably scheduling period, fixing:
* y^32 = 0.5
*
* This means that the contribution to load ~32ms ago (u_32) will be weighted
* approximately half as much as the contribution to load within the last ms
* (u_0).
*
* When a period "rolls over" and we have new u_0`, multiplying the previous
* sum again by y is sufficient to update:
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u64 delta;
delta = now - sa->last_update_time;
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.
*/
if ((s64)delta < 0) {
sa->last_update_time = now;
return 0;
}
/*
* Use 1024ns as the unit of measurement since it's a reasonable
* approximation of 1us and fast to compute.
*/
delta >>= 10;
if (!delta)
return 0;
sa->last_update_time += delta << 10;
/*
* running is a subset of runnable (weight) so running can't be set if
* runnable is clear. But there are some corner cases where the current
* se has been already dequeued but cfs_rq->curr still points to it.
* This means that weight will be 0 but not running for a sched_entity
* but also for a cfs_rq if the latter becomes idle. As an example,
* this happens during idle_balance() which calls
* update_blocked_averages().
*
* Also see the comment in accumulate_sum().
*/
if (!load)
runnable = running = 0;
/*
* Now we know we crossed measurement unit boundaries. The *_avg
* accrues by two steps:
*
* Step 1: accumulate *_sum since last_update_time. If we haven't
* crossed period boundaries, finish.
*/
if (!accumulate_sum(delta, sa, load, runnable, running))
return 0;
return 1;
}
/*
* When syncing *_avg with *_sum, we must take into account the current
* position in the PELT segment otherwise the remaining part of the segment
* will be considered as idle time whereas it's not yet elapsed and this will
* generate unwanted oscillation in the range [1002..1024[.
*
* The max value of *_sum varies with the position in the time segment and is
* equals to :
*
* LOAD_AVG_MAX*y + sa->period_contrib
*
* which can be simplified into:
*
* LOAD_AVG_MAX - 1024 + sa->period_contrib
*
* because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
*
* The same care must be taken when a sched entity is added, updated or
* removed from a cfs_rq and we need to update sched_avg. Scheduler entities
* and the cfs rq, to which they are attached, have the same position in the
* time segment because they use the same clock. This means that we can use
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
* if it's more convenient.
*/
static __always_inline void
___update_load_avg(struct sched_avg *sa, unsigned long load)
{
u32 divider = get_pelt_divider(sa);
/*
* Step 2: update *_avg.
*/
sa->load_avg = div_u64(load * sa->load_sum, divider);
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
/*
* sched_entity:
*
* task:
* se_weight() = se->load.weight
* se_runnable() = !!on_rq
*
* group: [ see update_cfs_group() ]
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
* se_runnable() = grq->h_nr_running
*
* runnable_sum = se_runnable() * runnable = grq->runnable_sum
* runnable_avg = runnable_sum
*
* load_sum := runnable
* load_avg = se_weight(se) * load_sum
*
* cfq_rq:
*
* runnable_sum = \Sum se->avg.runnable_sum
* runnable_avg = \Sum se->avg.runnable_avg
*
* load_sum = \Sum se_weight(se) * se->avg.load_sum
* load_avg = \Sum se->avg.load_avg
*/
int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se));
trace_pelt_se_tp(se);
return 1;
}
return 0;
}
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
cfs_rq->curr == se)) {
___update_load_avg(&se->avg, se_weight(se));
cfs_se_util_change(&se->avg);
trace_pelt_se_tp(se);
return 1;
}
return 0;
}
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
cfs_rq->h_nr_running,
cfs_rq->curr != NULL)) {
___update_load_avg(&cfs_rq->avg, 1);
trace_pelt_cfs_tp(cfs_rq);
return 1;
}
return 0;
}
/*
* rt_rq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_sum = util_sum
*
* load_avg and runnable_avg are not supported and meaningless.
*
*/
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, &rq->avg_rt,
running,
running,
running)) {
___update_load_avg(&rq->avg_rt, 1);
trace_pelt_rt_tp(rq);
return 1;
}
return 0;
}
/*
* dl_rq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_sum = util_sum
*
* load_avg and runnable_avg are not supported and meaningless.
*
*/
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, &rq->avg_dl,
running,
running,
running)) {
___update_load_avg(&rq->avg_dl, 1);
trace_pelt_dl_tp(rq);
return 1;
}
return 0;
}
#ifdef CONFIG_SCHED_THERMAL_PRESSURE
/*
* thermal:
*
* load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
*
* util_avg and runnable_load_avg are not supported and meaningless.
*
* Unlike rt/dl utilization tracking that track time spent by a cpu
* running a rt/dl task through util_avg, the average thermal pressure is
* tracked through load_avg. This is because thermal pressure signal is
* time weighted "delta" capacity unlike util_avg which is binary.
* "delta capacity" = actual capacity -
* capped capacity a cpu due to a thermal event.
*/
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
{
if (___update_load_sum(now, &rq->avg_thermal,
capacity,
capacity,
capacity)) {
___update_load_avg(&rq->avg_thermal, 1);
trace_pelt_thermal_tp(rq);
return 1;
}
return 0;
}
#endif
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/*
* irq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_sum = util_sum
*
* load_avg and runnable_avg are not supported and meaningless.
*
*/
int update_irq_load_avg(struct rq *rq, u64 running)
{
int ret = 0;
/*
* We can't use clock_pelt because irq time is not accounted in
* clock_task. Instead we directly scale the running time to
* reflect the real amount of computation
*/
running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
/*
* We know the time that has been used by interrupt since last update
* but we don't when. Let be pessimistic and assume that interrupt has
* happened just before the update. This is not so far from reality
* because interrupt will most probably wake up task and trig an update
* of rq clock during which the metric is updated.
* We start to decay with normal context time and then we add the
* interrupt context time.
* We can safely remove running from rq->clock because
* rq->clock += delta with delta >= running
*/
ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
0,
0,
0);
ret += ___update_load_sum(rq->clock, &rq->avg_irq,
1,
1,
1);
if (ret) {
___update_load_avg(&rq->avg_irq, 1);
trace_pelt_irq_tp(rq);
}
return ret;
}
#endif
| linux-master | kernel/sched/pelt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The implementation of the wait_bit*() and related waiting APIs:
*/
#define WAIT_TABLE_BITS 8
#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
wait_queue_head_t *bit_waitqueue(void *word, int bit)
{
const int shift = BITS_PER_LONG == 32 ? 5 : 6;
unsigned long val = (unsigned long)word << shift | bit;
return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
}
EXPORT_SYMBOL(bit_waitqueue);
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
if (wait_bit->key.flags != key->flags ||
wait_bit->key.bit_nr != key->bit_nr ||
test_bit(key->bit_nr, key->flags))
return 0;
return autoremove_wake_function(wq_entry, mode, sync, key);
}
EXPORT_SYMBOL(wake_bit_function);
/*
* To allow interruptible waiting and asynchronous (i.e. nonblocking)
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
* permitted return codes. Nonzero return codes halt waiting and return.
*/
int __sched
__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
wait_bit_action_f *action, unsigned mode)
{
int ret = 0;
do {
prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
ret = (*action)(&wbq_entry->key, mode);
} while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
finish_wait(wq_head, &wbq_entry->wq_entry);
return ret;
}
EXPORT_SYMBOL(__wait_on_bit);
int __sched out_of_line_wait_on_bit(void *word, int bit,
wait_bit_action_f *action, unsigned mode)
{
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wq_entry, word, bit);
return __wait_on_bit(wq_head, &wq_entry, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit);
int __sched out_of_line_wait_on_bit_timeout(
void *word, int bit, wait_bit_action_f *action,
unsigned mode, unsigned long timeout)
{
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wq_entry, word, bit);
wq_entry.key.timeout = jiffies + timeout;
return __wait_on_bit(wq_head, &wq_entry, action, mode);
}
EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
int __sched
__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
wait_bit_action_f *action, unsigned mode)
{
int ret = 0;
for (;;) {
prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
ret = action(&wbq_entry->key, mode);
/*
* See the comment in prepare_to_wait_event().
* finish_wait() does not necessarily takes wwq_head->lock,
* but test_and_set_bit() implies mb() which pairs with
* smp_mb__after_atomic() before wake_up_page().
*/
if (ret)
finish_wait(wq_head, &wbq_entry->wq_entry);
}
if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
if (!ret)
finish_wait(wq_head, &wbq_entry->wq_entry);
return 0;
} else if (ret) {
return ret;
}
}
}
EXPORT_SYMBOL(__wait_on_bit_lock);
int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
wait_bit_action_f *action, unsigned mode)
{
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wq_entry, word, bit);
return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq_head))
__wake_up(wq_head, TASK_NORMAL, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);
/**
* wake_up_bit - wake up a waiter on a bit
* @word: the word being waited on, a kernel virtual address
* @bit: the bit of the word being waited on
*
* There is a standard hashed waitqueue table for generic use. This
* is the part of the hashtable's accessor API that wakes up waiters
* on a bit. For instance, if one were to have waiters on a bitflag,
* one would call wake_up_bit() after clearing the bit.
*
* In order for this to function properly, as it uses waitqueue_active()
* internally, some kind of memory barrier must be done prior to calling
* this. Typically, this will be smp_mb__after_atomic(), but in some
* cases where bitflags are manipulated non-atomically under a lock, one
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
* because spin_unlock() does not guarantee a memory barrier.
*/
void wake_up_bit(void *word, int bit)
{
__wake_up_bit(bit_waitqueue(word, bit), word, bit);
}
EXPORT_SYMBOL(wake_up_bit);
wait_queue_head_t *__var_waitqueue(void *p)
{
return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
}
EXPORT_SYMBOL(__var_waitqueue);
static int
var_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue_entry *wbq_entry =
container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
if (wbq_entry->key.flags != key->flags ||
wbq_entry->key.bit_nr != key->bit_nr)
return 0;
return autoremove_wake_function(wq_entry, mode, sync, key);
}
void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags)
{
*wbq_entry = (struct wait_bit_queue_entry){
.key = {
.flags = (var),
.bit_nr = -1,
},
.wq_entry = {
.flags = flags,
.private = current,
.func = var_wake_function,
.entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
},
};
}
EXPORT_SYMBOL(init_wait_var_entry);
void wake_up_var(void *var)
{
__wake_up_bit(__var_waitqueue(var), var, -1);
}
EXPORT_SYMBOL(wake_up_var);
__sched int bit_wait(struct wait_bit_key *word, int mode)
{
schedule();
if (signal_pending_state(mode, current))
return -EINTR;
return 0;
}
EXPORT_SYMBOL(bit_wait);
__sched int bit_wait_io(struct wait_bit_key *word, int mode)
{
io_schedule();
if (signal_pending_state(mode, current))
return -EINTR;
return 0;
}
EXPORT_SYMBOL(bit_wait_io);
__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
{
unsigned long now = READ_ONCE(jiffies);
if (time_after_eq(now, word->timeout))
return -EAGAIN;
schedule_timeout(word->timeout - now);
if (signal_pending_state(mode, current))
return -EINTR;
return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
{
unsigned long now = READ_ONCE(jiffies);
if (time_after_eq(now, word->timeout))
return -EAGAIN;
io_schedule_timeout(word->timeout - now);
if (signal_pending_state(mode, current))
return -EINTR;
return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
void __init wait_bit_init(void)
{
int i;
for (i = 0; i < WAIT_TABLE_SIZE; i++)
init_waitqueue_head(bit_wait_table + i);
}
| linux-master | kernel/sched/wait_bit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
int sched_rr_timeslice = RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
#ifdef CONFIG_SYSCTL
static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
static struct ctl_table sched_rt_sysctls[] = {
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rt_runtime_us",
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rr_timeslice_ms",
.data = &sysctl_sched_rr_timeslice,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rr_handler,
},
{}
};
static int __init sched_rt_sysctl_init(void)
{
register_sysctl_init("kernel", sched_rt_sysctls);
return 0;
}
late_initcall(sched_rt_sysctl_init);
#endif
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
int idle = 0;
int overrun;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
if (!overrun)
break;
raw_spin_unlock(&rt_b->rt_runtime_lock);
idle = do_sched_rt_period_timer(rt_b, overrun);
raw_spin_lock(&rt_b->rt_runtime_lock);
}
if (idle)
rt_b->rt_period_active = 0;
raw_spin_unlock(&rt_b->rt_runtime_lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_HARD);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1;
/*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer,
HRTIMER_MODE_ABS_PINNED_HARD);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
do_start_rt_bandwidth(rt_b);
}
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
rt_rq->highest_prio.next = MAX_RT_PRIO-1;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
return rt_rq->rq;
}
void unregister_rt_sched_group(struct task_group *tg)
{
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
}
void free_rt_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_rt_rq(rt_rq);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
return task_rq(p);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct rq *rq = rq_of_rt_se(rt_se);
return &rq->rt;
}
void unregister_rt_sched_group(struct task_group *tg) { }
void free_rt_sched_group(struct task_group *tg) { }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
return rq->online && rq->rt.highest_prio.curr > prev->prio;
}
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
}
static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
*
* Matched by the barrier in pull_rt_task().
*/
smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
static void update_rt_migration(struct rt_rq *rt_rq)
{
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
}
} else if (rt_rq->overloaded) {
rt_clear_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 0;
}
}
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void rt_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void rt_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the new highest prio pushable task */
if (has_pushable_tasks(rq)) {
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
} else {
rq->rt.highest_prio.next = MAX_RT_PRIO-1;
}
}
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline void rt_queue_push_tasks(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->on_rq;
}
#ifdef CONFIG_UCLAMP_TASK
/*
* Verify the fitness of task @p to run on @cpu taking into account the uclamp
* settings.
*
* This check is only important for heterogeneous systems where uclamp_min value
* is higher than the capacity of a @cpu. For non-heterogeneous system this
* function will always return true.
*
* The function will return true if the capacity of the @cpu is >= the
* uclamp_min and false otherwise.
*
* Note that uclamp_min will be clamped to uclamp_max if uclamp_min
* > uclamp_max.
*/
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
unsigned int min_cap;
unsigned int max_cap;
unsigned int cpu_cap;
/* Only heterogeneous systems can benefit from this check */
if (!sched_asym_cpucap_active())
return true;
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
max_cap = uclamp_eff_value(p, UCLAMP_MAX);
cpu_cap = capacity_orig_of(cpu);
return cpu_cap >= min(min_cap, max_cap);
}
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
return true;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
return RUNTIME_INF;
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
do {
tg = list_entry_rcu(tg->list.next,
typeof(struct task_group), list);
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
if (&tg->list == &task_groups)
tg = NULL;
return tg;
}
#define for_each_rt_rq(rt_rq, iter, rq) \
for (iter = container_of(&task_groups, typeof(*iter), list); \
(iter = next_task_group(iter)) && \
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->my_q;
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct rq *rq = rq_of_rt_rq(rt_rq);
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq);
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (!rt_se)
enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq);
}
}
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se) {
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
}
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
struct task_struct *p;
if (rt_rq)
return !!rt_rq->rt_nr_boosted;
p = rt_task_of(rt_se);
return p->prio != p->normal_prio;
}
#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
return this_rq()->rd->span;
}
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
#endif
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &rt_rq->tg->rt_bandwidth;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
typedef struct rt_rq *rt_rq_iter_t;
#define for_each_rt_rq(rt_rq, iter, rq) \
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return NULL;
}
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
if (!rt_rq->rt_nr_running)
return;
enqueue_top_rt_rq(rt_rq);
resched_curr(rq);
}
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return &cpu_rq(cpu)->rt;
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &def_rt_bandwidth;
}
#endif /* CONFIG_RT_GROUP_SCHED */
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_rq->rt_time < rt_b->rt_runtime);
}
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
*/
static void do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
int i, weight;
u64 rt_period;
weight = cpumask_weight(rd->span);
raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
if (iter == rt_rq)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
* indicate its been disabled and disallow stealing.
*/
if (iter->rt_runtime == RUNTIME_INF)
goto next;
/*
* From runqueues with spare time, take 1/n part of their
* spare time, but no more than our period.
*/
diff = iter->rt_runtime - iter->rt_time;
if (diff > 0) {
diff = div_u64((u64)diff, weight);
if (rt_rq->rt_runtime + diff > rt_period)
diff = rt_period - rt_rq->rt_runtime;
iter->rt_runtime -= diff;
rt_rq->rt_runtime += diff;
if (rt_rq->rt_runtime == rt_period) {
raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
raw_spin_unlock(&iter->rt_runtime_lock);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
/*
* Ensure this RQ takes back all the runtime it lend to its neighbours.
*/
static void __disable_runtime(struct rq *rq)
{
struct root_domain *rd = rq->rd;
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
s64 want;
int i;
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
* exactly the right amount of runtime to take out.
*/
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
* and what we current have, that's the amount of runtime
* we lend and now have to reclaim.
*/
want = rt_b->rt_runtime - rt_rq->rt_runtime;
/*
* Greedy reclaim, take back as much as we can.
*/
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
/*
* Can't reclaim from ourselves or disabled runqueues.
*/
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
want -= diff;
} else {
iter->rt_runtime -= want;
want -= want;
}
raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
*/
WARN_ON_ONCE(want);
balanced:
/*
* Disable all the borrow logic by pretending we have inf
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
/* Make rt_rq available for pick_next_task() */
sched_rt_rq_enqueue(rt_rq);
}
}
static void __enable_runtime(struct rq *rq)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
/*
* Reset each runqueue's bandwidth settings
*/
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
static void balance_runtime(struct rt_rq *rt_rq)
{
if (!sched_feat(RT_RUNTIME_SHARE))
return;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
do_balance_runtime(rt_rq);
raw_spin_lock(&rt_rq->rt_runtime_lock);
}
}
#else /* !CONFIG_SMP */
static inline void balance_runtime(struct rt_rq *rt_rq) {}
#endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1, throttled = 0;
const struct cpumask *span;
span = sched_rt_period_mask();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
#endif
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
struct rq_flags rf;
int skip;
/*
* When span == cpu_online_mask, taking each rq->lock
* can be time-consuming. Try to avoid it when possible.
*/
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
rt_rq->rt_runtime = rt_b->rt_runtime;
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (skip)
continue;
rq_lock(rq, &rf);
update_rq_clock(rq);
if (rt_rq->rt_time) {
u64 runtime;
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
/*
* When we're idle and a woken (rt) task is
* throttled check_preempt_curr() will set
* skip_update and the time between the wakeup
* and this unthrottle will get accounted as
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_cancel_skipupdate(rq);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) {
idle = 0;
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
if (rt_rq->rt_throttled)
throttled = 1;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
rq_unlock(rq, &rf);
}
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
return 1;
return idle;
}
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
return rt_rq->highest_prio.curr;
#endif
return rt_task_of(rt_se)->prio;
}
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
if (runtime >= sched_rt_period(rt_rq))
return 0;
balance_runtime(rt_rq);
runtime = sched_rt_runtime(rt_rq);
if (runtime == RUNTIME_INF)
return 0;
if (rt_rq->rt_time > runtime) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
/*
* Don't actually throttle groups that have no runtime assigned
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
rt_rq->rt_throttled = 1;
printk_deferred_once("sched: RT throttling activated\n");
} else {
/*
* In case we did anyway, make it go away,
* replenishment is a joke, since it will replenish us
* with exactly 0 ns.
*/
rt_rq->rt_time = 0;
}
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
}
}
return 0;
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
u64 delta_exec;
u64 now;
if (curr->sched_class != &rt_sched_class)
return;
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
trace_sched_stat_runtime(curr, delta_exec, 0);
update_current_exec_runtime(curr, now, delta_exec);
if (!rt_bandwidth_enabled())
return;
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
int exceeded;
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
exceeded = sched_rt_runtime_exceeded(rt_rq);
if (exceeded)
resched_curr(rq);
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (exceeded)
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
}
}
}
static void
dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (!rt_rq->rt_queued)
return;
BUG_ON(!rq->nr_running);
sub_nr_running(rq, count);
rt_rq->rt_queued = 0;
}
static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (rt_rq->rt_queued)
return;
if (rt_rq_throttled(rt_rq))
return;
if (rt_rq->rt_nr_running) {
add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
}
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
}
#if defined CONFIG_SMP
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
#else /* CONFIG_SMP */
static inline
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (prio < prev_prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
}
static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (rt_rq->rt_nr_running) {
WARN_ON(prio < prev_prio);
/*
* This may have been our highest task, and therefore
* we may have some recomputation to do
*/
if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active;
rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap);
}
} else {
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
}
dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
#else
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
}
#else /* CONFIG_RT_GROUP_SCHED */
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
if (group_rq)
return group_rq->rt_nr_running;
else
return 1;
}
static inline
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct task_struct *tsk;
if (group_rq)
return group_rq->rr_nr_running;
tsk = rt_task_of(rt_se);
return (tsk->policy == SCHED_RR) ? 1 : 0;
}
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
inc_rt_prio(rt_rq, prio);
inc_rt_migration(rt_se, rt_rq);
inc_rt_group(rt_se, rt_rq);
}
static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
dec_rt_migration(rt_se, rt_rq);
dec_rt_group(rt_se, rt_rq);
}
/*
* Change rt_se->run_list location unless SAVE && !MOVE
*
* assumes ENQUEUE/DEQUEUE flags match
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;
return true;
}
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
list_del_init(&rt_se->run_list);
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 0;
}
static inline struct sched_statistics *
__schedstats_from_rt_se(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
/* schedstats is not supported for rt group. */
if (!rt_entity_is_task(rt_se))
return NULL;
#endif
return &rt_task_of(rt_se)->stats;
}
static inline void
update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct sched_statistics *stats;
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
if (rt_entity_is_task(rt_se))
p = rt_task_of(rt_se);
stats = __schedstats_from_rt_se(rt_se);
if (!stats)
return;
__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
}
static inline void
update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct sched_statistics *stats;
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
if (rt_entity_is_task(rt_se))
p = rt_task_of(rt_se);
stats = __schedstats_from_rt_se(rt_se);
if (!stats)
return;
__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
}
static inline void
update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
int flags)
{
if (!schedstat_enabled())
return;
if (flags & ENQUEUE_WAKEUP)
update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
}
static inline void
update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct sched_statistics *stats;
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
if (rt_entity_is_task(rt_se))
p = rt_task_of(rt_se);
stats = __schedstats_from_rt_se(rt_se);
if (!stats)
return;
__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
}
static inline void
update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
int flags)
{
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
if (rt_entity_is_task(rt_se))
p = rt_task_of(rt_se);
if ((flags & DEQUEUE_SLEEP) && p) {
unsigned int state;
state = READ_ONCE(p->__state);
if (state & TASK_INTERRUPTIBLE)
__schedstat_set(p->stats.sleep_start,
rq_clock(rq_of_rt_rq(rt_rq)));
if (state & TASK_UNINTERRUPTIBLE)
__schedstat_set(p->stats.block_start,
rq_clock(rq_of_rt_rq(rt_rq)));
}
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se);
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
__delist_rt_entity(rt_se, array);
return;
}
if (move_entity(flags)) {
WARN_ON_ONCE(rt_se->on_list);
if (flags & ENQUEUE_HEAD)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 1;
}
rt_se->on_rq = 1;
inc_rt_tasks(rt_se, rt_rq);
}
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
if (move_entity(flags)) {
WARN_ON_ONCE(!rt_se->on_list);
__delist_rt_entity(rt_se, array);
}
rt_se->on_rq = 0;
dec_rt_tasks(rt_se, rt_rq);
}
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct sched_rt_entity *back = NULL;
unsigned int rt_nr_running;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}
rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se, flags);
}
dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se, flags);
enqueue_top_rt_rq(&rq->rt);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se, flags);
}
enqueue_top_rt_rq(&rq->rt);
}
/*
* Adding/removing a task to/from a priority array:
*/
static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
check_schedstat_required();
update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p);
}
/*
* Put task to the head or the end of the run list without the overhead of
* dequeue followed by enqueue.
*/
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
if (on_rt_rq(rt_se)) {
struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
}
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se, head);
}
}
static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr, 0);
}
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
static int
select_task_rq_rt(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
struct rq *rq;
bool test;
/* For anything but wake ups, just return the task_cpu */
if (!(flags & (WF_TTWU | WF_FORK)))
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
* If the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
* We want to avoid overloading runqueues. If the woken
* task is a higher priority, then it will stay on this CPU
* and the lower prio task should be moved to another CPU.
* Even though this will probably make the lower prio task
* lose its cache, we do not want to bounce a higher task
* around just because it gave up its CPU, perhaps for a
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*
* We take into account the capacity of the CPU to ensure it fits the
* requirement of the task - which is only important on heterogeneous
* systems like big.LITTLE.
*/
test = curr &&
unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);
/*
* Bail out if we were forcing a migration to find a better
* fitting CPU but our search failed.
*/
if (!test && target != -1 && !rt_task_fits_capacity(p, target))
goto out_unlock;
/*
* Don't bother moving it if the destination CPU is
* not running a lower priority task.
*/
if (target != -1 &&
p->prio < cpu_rq(target)->rt.highest_prio.curr)
cpu = target;
}
out_unlock:
rcu_read_unlock();
out:
return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpupri_find(&rq->rd->cpupri, p, NULL))
return;
/*
* There appear to be other CPUs that can accept
* the current task but none can run 'p', so lets reschedule
* to try and push the current task away:
*/
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_rt_task(rq);
rq_repin_lock(rq, rf);
}
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
}
#endif /* CONFIG_SMP */
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* If:
*
* - the newly woken task is of equal priority to the current task
* - the newly woken task is non-migratable while current is migratable
* - current will be preempted on the next reschedule
*
* we should check to see if current can readily move to a different
* cpu. If so, we will reschedule to allow the push logic to try
* to move current somewhere else, making room for our non-migratable
* task.
*/
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
check_preempt_equal_prio(rq, p);
#endif
}
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
p->se.exec_start = rq_clock_task(rq);
if (on_rt_rq(&p->rt))
update_stats_wait_end_rt(rt_rq, rt_se);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
if (!first)
return;
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
* rt task
*/
if (rq->curr->sched_class != &rt_sched_class)
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
rt_queue_push_tasks(rq);
}
static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *next = NULL;
struct list_head *queue;
int idx;
idx = sched_find_first_bit(array->bitmap);
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
if (SCHED_WARN_ON(list_empty(queue)))
return NULL;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
}
static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
struct rt_rq *rt_rq = &rq->rt;
do {
rt_se = pick_next_rt_entity(rt_rq);
if (unlikely(!rt_se))
return NULL;
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
return rt_task_of(rt_se);
}
static struct task_struct *pick_task_rt(struct rq *rq)
{
struct task_struct *p;
if (!sched_rt_runnable(rq))
return NULL;
p = _pick_next_task_rt(rq);
return p;
}
static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p = pick_task_rt(rq);
if (p)
set_next_task_rt(rq, p, true);
return p;
}
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
if (on_rt_rq(&p->rt))
update_stats_wait_start_rt(rt_rq, rt_se);
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define RT_MAX_TRIES 3
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_on_cpu(rq, p) &&
cpumask_test_cpu(cpu, &p->cpus_mask))
return 1;
return 0;
}
/*
* Return the highest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise
*/
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
struct plist_head *head = &rq->rt.pushable_tasks;
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
plist_for_each_entry(p, head, pushable_tasks) {
if (pick_rt_task(rq, p, cpu))
return p;
}
return NULL;
}
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
int ret;
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */
/*
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (sched_asym_cpucap_active()) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
rt_task_fits_capacity);
} else {
ret = cpupri_find(&task_rq(task)->rd->cpupri,
task, lowest_mask);
}
if (!ret)
return -1; /* No targets found */
/*
* At this point we have built a mask of CPUs representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
*
* We prioritize the last CPU that the task executed on since
* it is most likely cache-hot in that location.
*/
if (cpumask_test_cpu(cpu, lowest_mask))
return cpu;
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which CPU is logically closest to our hot cache data.
*/
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* "this_cpu" is cheaper to preempt than a
* remote processor.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_any_and_distribute(lowest_mask,
sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* And finally, if there were no matches within the domains
* just give the caller *something* to work with from the compatible
* locations.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any_distribute(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
struct rq *lowest_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
lowest_rq = cpu_rq(cpu);
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
/*
* Target rq has tasks of equal or higher priority,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
lowest_rq = NULL;
break;
}
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
* It is possible the task was scheduled, set
* "migrate_disabled" and then got preempted, so we must
* check the task migration disable flag here too.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!rt_task(task) ||
is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
break;
}
}
/* If this rq is still suitable use it. */
if (lowest_rq->rt.highest_prio.curr > task->prio)
break;
/* try again */
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
return lowest_rq;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task
* of lesser priority.
*/
static int push_rt_task(struct rq *rq, bool pull)
{
struct task_struct *next_task;
struct rq *lowest_rq;
int ret = 0;
if (!rq->rt.overloaded)
return 0;
next_task = pick_next_pushable_task(rq);
if (!next_task)
return 0;
retry:
/*
* It's possible that the next_task slipped in of
* higher priority than current. If that's the case
* just reschedule current.
*/
if (unlikely(next_task->prio < rq->curr->prio)) {
resched_curr(rq);
return 0;
}
if (is_migration_disabled(next_task)) {
struct task_struct *push_task = NULL;
int cpu;
if (!pull || rq->push_busy)
return 0;
/*
* Invoking find_lowest_rq() on anything but an RT task doesn't
* make sense. Per the above priority check, curr has to
* be of higher priority than next_task, so no need to
* reschedule when bailing out.
*
* Note that the stoppers are masqueraded as SCHED_FIFO
* (cf. sched_set_stop_task()), so we can't rely on rt_task().
*/
if (rq->curr->sched_class != &rt_sched_class)
return 0;
cpu = find_lowest_rq(rq->curr);
if (cpu == -1 || cpu == rq->cpu)
return 0;
/*
* Given we found a CPU with lower priority than @next_task,
* therefore it should be running. However we cannot migrate it
* to this other CPU, instead attempt to push the current
* running task on this CPU away.
*/
push_task = get_push_task(rq);
if (push_task) {
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
push_task, &rq->push_work);
raw_spin_rq_lock(rq);
}
return 0;
}
if (WARN_ON(next_task == rq->curr))
return 0;
/* We might release rq lock */
get_task_struct(next_task);
/* find_lock_lowest_rq locks the rq if found */
lowest_rq = find_lock_lowest_rq(next_task, rq);
if (!lowest_rq) {
struct task_struct *task;
/*
* find_lock_lowest_rq releases rq->lock
* so it is possible that next_task has migrated.
*
* We need to make sure that the task is still on the same
* run-queue and is also still the next task eligible for
* pushing.
*/
task = pick_next_pushable_task(rq);
if (task == next_task) {
/*
* The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since
* other CPUs will pull from us when ready.
*/
goto out;
}
if (!task)
/* No more tasks, just exit */
goto out;
/*
* Something has shifted, try again.
*/
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
resched_curr(lowest_rq);
ret = 1;
double_unlock_balance(rq, lowest_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
while (push_rt_task(rq, false))
;
}
#ifdef HAVE_RT_PUSH_IPI
/*
* When a high priority task schedules out from a CPU and a lower priority
* task is scheduled in, a check is made to see if there's any RT tasks
* on other CPUs that are waiting to run because a higher priority RT task
* is currently running on its CPU. In this case, the CPU with multiple RT
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
* up that may be able to run one of its non-running queued RT tasks.
*
* All CPUs with overloaded RT tasks need to be notified as there is currently
* no way to know which of these CPUs have the highest priority task waiting
* to run. Instead of trying to take a spinlock on each of these CPUs,
* which has shown to cause large latency when done on machines with many
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
* RT tasks waiting to run.
*
* Just sending an IPI to each of the CPUs is also an issue, as on large
* count CPU machines, this can cause an IPI storm on a CPU, especially
* if its the only CPU with multiple RT tasks queued, and a large number
* of CPUs scheduling a lower priority task at the same time.
*
* Each root domain has its own irq work function that can iterate over
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
* task must be checked if there's one or many CPUs that are lowering
* their priority, there's a single irq work iterator that will try to
* push off RT tasks that are waiting to run.
*
* When a CPU schedules a lower priority task, it will kick off the
* irq work iterator that will jump to each CPU with overloaded RT tasks.
* As it only takes the first CPU that schedules a lower priority task
* to start the process, the rto_start variable is incremented and if
* the atomic result is one, then that CPU will try to take the rto_lock.
* This prevents high contention on the lock as the process handles all
* CPUs scheduling lower priority tasks.
*
* All CPUs that are scheduling a lower priority task will increment the
* rt_loop_next variable. This will make sure that the irq work iterator
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
* priority task, even if the iterator is in the middle of a scan. Incrementing
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
static int rto_next_cpu(struct root_domain *rd)
{
int next;
int cpu;
/*
* When starting the IPI RT pushing, the rto_cpu is set to -1,
* rt_next_cpu() will simply return the first CPU found in
* the rto_mask.
*
* If rto_next_cpu() is called with rto_cpu is a valid CPU, it
* will return the next CPU found in the rto_mask.
*
* If there are no more CPUs left in the rto_mask, then a check is made
* against rto_loop and rto_loop_next. rto_loop is only updated with
* the rto_lock held, but any CPU may increment the rto_loop_next
* without any locking.
*/
for (;;) {
/* When rto_cpu is -1 this acts like cpumask_first() */
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
rd->rto_cpu = cpu;
if (cpu < nr_cpu_ids)
return cpu;
rd->rto_cpu = -1;
/*
* ACQUIRE ensures we see the @rto_mask changes
* made prior to the @next value observed.
*
* Matches WMB in rt_set_overload().
*/
next = atomic_read_acquire(&rd->rto_loop_next);
if (rd->rto_loop == next)
break;
rd->rto_loop = next;
}
return -1;
}
static inline bool rto_start_trylock(atomic_t *v)
{
return !atomic_cmpxchg_acquire(v, 0, 1);
}
static inline void rto_start_unlock(atomic_t *v)
{
atomic_set_release(v, 0);
}
static void tell_cpu_to_push(struct rq *rq)
{
int cpu = -1;
/* Keep the loop going if the IPI is currently active */
atomic_inc(&rq->rd->rto_loop_next);
/* Only one CPU can initiate a loop at a time */
if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
raw_spin_lock(&rq->rd->rto_lock);
/*
* The rto_cpu is updated under the lock, if it has a valid CPU
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
cpu = rto_next_cpu(rq->rd);
raw_spin_unlock(&rq->rd->rto_lock);
rto_start_unlock(&rq->rd->rto_loop_start);
if (cpu >= 0) {
/* Make sure the rd does not get freed while pushing */
sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
}
/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
struct root_domain *rd =
container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;
rq = this_rq();
/*
* We do not need to grab the lock to check for has_pushable_tasks.
* When it gets updated, a check is made if a push is possible.
*/
if (has_pushable_tasks(rq)) {
raw_spin_rq_lock(rq);
while (push_rt_task(rq, true))
;
raw_spin_rq_unlock(rq);
}
raw_spin_lock(&rd->rto_lock);
/* Pass the IPI to the next rt overloaded queue */
cpu = rto_next_cpu(rd);
raw_spin_unlock(&rd->rto_lock);
if (cpu < 0) {
sched_put_rd(rd);
return;
}
/* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p, *push_task;
struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
if (likely(!rt_overload_count))
return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
* see overloaded we must also see the rto_mask bit.
*/
smp_rmb();
/* If we are the only overloaded CPU do nothing */
if (rt_overload_count == 1 &&
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
return;
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
return;
}
#endif
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* Don't bother taking the src_rq->lock if the next highest
* task is known to be lower-priority than our current task.
* This may look racy, but if this value is about to go
* logically higher, the src_rq will push this task away.
* And if its going logically lower, we do not care
*/
if (src_rq->rt.highest_prio.next >=
this_rq->rt.highest_prio.curr)
continue;
/*
* We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could
* alter this_rq
*/
push_task = NULL;
double_lock_balance(this_rq, src_rq);
/*
* We can pull only a task, which is pushable
* on its rq, and no others.
*/
p = pick_highest_pushable_task(src_rq, this_cpu);
/*
* Do we have an RT task that preempts
* the to-be-scheduled task?
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* There's a chance that p is higher in priority
* than what's currently running on its CPU.
* This is just that p is waking up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
* current task on the run queue
*/
if (p->prio < src_rq->curr->prio)
goto skip;
if (is_migration_disabled(p)) {
push_task = get_push_task(src_rq);
} else {
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
resched = true;
}
/*
* We continue with the search, just in
* case there's an even higher prio task
* in another runqueue. (low likelihood
* but possible)
*/
}
skip:
double_unlock_balance(this_rq, src_rq);
if (push_task) {
raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
raw_spin_rq_lock(this_rq);
}
}
if (resched)
resched_curr(this_rq);
}
/*
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
bool need_to_push = !task_on_cpu(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 ||
rq->curr->prio <= p->prio);
if (need_to_push)
push_rt_tasks(rq);
}
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
__enable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
__disable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
}
/*
* When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues.
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
* we may need to handle the pulling of RT tasks
* now.
*/
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;
rt_queue_pull_task(rq);
}
void __init init_sched_rt_class(void)
{
unsigned int i;
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
}
}
#endif /* CONFIG_SMP */
/*
* When switching a task to RT, we may overload the runqueue
* with RT tasks. In this case we try to push them off to
* other runqueues.
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
/*
* If we are running, update the avg_rt tracking, as the running time
* will now on be accounted into the latter.
*/
if (task_current(rq, p)) {
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return;
}
/*
* If we are not running we may need to preempt the current
* running task. If that current running task is also an RT task
* then see if we can move to another run queue.
*/
if (task_on_rq_queued(p)) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rt_queue_push_tasks(rq);
#endif /* CONFIG_SMP */
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
/*
* Priority of the task has changed. This may cause
* us to initiate a push or pull.
*/
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
if (task_current(rq, p)) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
* may need to pull tasks to this runqueue.
*/
if (oldprio < p->prio)
rt_queue_pull_task(rq);
/*
* If there's a higher priority task waiting to run
* then reschedule.
*/
if (p->prio > rq->rt.highest_prio.curr)
resched_curr(rq);
#else
/* For UP simply resched on drop of prio */
if (oldprio < p->prio)
resched_curr(rq);
#endif /* CONFIG_SMP */
} else {
/*
* This task is not running, but if it is
* greater than the current running task
* then reschedule.
*/
if (p->prio < rq->curr->prio)
resched_curr(rq);
}
}
#ifdef CONFIG_POSIX_TIMERS
static void watchdog(struct rq *rq, struct task_struct *p)
{
unsigned long soft, hard;
/* max may change after cur was read, this will be fixed next tick */
soft = task_rlimit(p, RLIMIT_RTTIME);
hard = task_rlimit_max(p, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
unsigned long next;
if (p->rt.watchdog_stamp != jiffies) {
p->rt.timeout++;
p->rt.watchdog_stamp = jiffies;
}
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next) {
posix_cputimers_rt_watchdog(&p->posix_cputimers,
p->se.sum_exec_runtime);
}
}
}
#else
static inline void watchdog(struct rq *rq, struct task_struct *p) { }
#endif
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
watchdog(rq, p);
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
*/
if (p->policy != SCHED_RR)
return;
if (--p->rt.time_slice)
return;
p->rt.time_slice = sched_rr_timeslice;
/*
* Requeue to the end of queue if we (and all of our ancestors) are not
* the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
requeue_task_rt(rq, p, 0);
resched_curr(rq);
return;
}
}
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
return sched_rr_timeslice;
else
return 0;
}
#ifdef CONFIG_SCHED_CORE
static int task_is_throttled_rt(struct task_struct *p, int cpu)
{
struct rt_rq *rt_rq;
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq = task_group(p)->rt_rq[cpu];
#else
rt_rq = &cpu_rq(cpu)->rt;
#endif
return rt_rq_throttled(rt_rq);
}
#endif
DEFINE_SCHED_CLASS(rt) = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
.check_preempt_curr = check_preempt_curr_rt,
.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
.balance = balance_rt,
.pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
.find_lock_rq = find_lock_lowest_rq,
#endif
.task_tick = task_tick_rt,
.get_rr_interval = get_rr_interval_rt,
.prio_changed = prio_changed_rt,
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_CORE
.task_is_throttled = task_is_throttled_rt,
#endif
#ifdef CONFIG_UCLAMP_TASK
.uclamp_enabled = 1,
#endif
};
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *task;
struct css_task_iter it;
int ret = 0;
/*
* Autogroups do not have RT tasks; see autogroup_create().
*/
if (task_group_is_autogroup(tg))
return 0;
css_task_iter_start(&tg->css, 0, &it);
while (!ret && (task = css_task_iter_next(&it)))
ret |= rt_task(task);
css_task_iter_end(&it);
return ret;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks if runtime turns zero.
*/
if (rt_bandwidth_enabled() && !runtime &&
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;
/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;
/*
* Bound quota to defend quota against overflow during bandwidth shift.
*/
if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
u64 rt_runtime, rt_period;
if (rt_period_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
rt_period = rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
#ifdef CONFIG_SYSCTL
static int sched_rt_global_constraints(void)
{
int ret = 0;
mutex_lock(&rt_constraints_mutex);
ret = __rt_schedulable(NULL, 0, 0);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
#endif /* CONFIG_SYSCTL */
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SYSCTL
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SYSCTL
static int sched_rt_global_validate(void)
{
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
((u64)sysctl_sched_rt_runtime *
NSEC_PER_USEC > max_rt_runtime)))
return -EINVAL;
return 0;
}
static void sched_rt_do_global(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
int ret;
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
if (ret)
goto undo;
ret = sched_dl_global_validate();
if (ret)
goto undo;
ret = sched_rt_global_constraints();
if (ret)
goto undo;
sched_rt_do_global();
sched_dl_do_global();
}
if (0) {
undo:
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);
return ret;
}
static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* Make sure that internally we keep jiffies.
* Also, writing zero resets the timeslice to default:
*/
if (!ret && write) {
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
if (sysctl_sched_rr_timeslice <= 0)
sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
}
mutex_unlock(&mutex);
return ret;
}
#endif /* CONFIG_SYSCTL */
#ifdef CONFIG_SCHED_DEBUG
void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
rcu_read_lock();
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */
| linux-master | kernel/sched/rt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/cpupri.c
*
* CPU priority management
*
* Copyright (C) 2007-2008 Novell
*
* Author: Gregory Haskins <[email protected]>
*
* This code tracks the priority of each CPU so that global migration
* decisions are easy to calculate. Each CPU can be in a state as follows:
*
* (INVALID), NORMAL, RT1, ... RT99, HIGHER
*
* going from the lowest priority to the highest. CPUs in the INVALID state
* are not eligible for routing. The system maintains this state with
* a 2 dimensional bitmap (the first for priority class, the second for CPUs
* in that class). Therefore a typical application without affinity
* restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
* searches). For tasks with affinity restrictions, the algorithm has a
* worst case complexity of O(min(101, nr_domcpus)), though the scenario that
* yields the worst case search is fairly contrived.
*/
/*
* p->rt_priority p->prio newpri cpupri
*
* -1 -1 (CPUPRI_INVALID)
*
* 99 0 (CPUPRI_NORMAL)
*
* 1 98 98 1
* ...
* 49 50 50 49
* 50 49 49 50
* ...
* 99 0 0 99
*
* 100 100 (CPUPRI_HIGHER)
*/
static int convert_prio(int prio)
{
int cpupri;
switch (prio) {
case CPUPRI_INVALID:
cpupri = CPUPRI_INVALID; /* -1 */
break;
case 0 ... 98:
cpupri = MAX_RT_PRIO-1 - prio; /* 1 ... 99 */
break;
case MAX_RT_PRIO-1:
cpupri = CPUPRI_NORMAL; /* 0 */
break;
case MAX_RT_PRIO:
cpupri = CPUPRI_HIGHER; /* 100 */
break;
}
return cpupri;
}
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask, int idx)
{
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
int skip = 0;
if (!atomic_read(&(vec)->count))
skip = 1;
/*
* When looking at the vector, we need to read the counter,
* do a memory barrier, then read the mask.
*
* Note: This is still all racy, but we can deal with it.
* Ideally, we only want to look at masks that are set.
*
* If a mask is not set, then the only thing wrong is that we
* did a little more work than necessary.
*
* If we read a zero count but the mask is set, because of the
* memory barriers, that can only happen when the highest prio
* task for a run queue has left the run queue, in which case,
* it will be followed by a pull. If the task we are processing
* fails to find a proper place to go, that pull request will
* pull this task if the run queue is running at a lower
* priority.
*/
smp_rmb();
/* Need to do the rmb for every iteration */
if (skip)
return 0;
if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
return 0;
if (lowest_mask) {
cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
/*
* We have to ensure that we have at least one bit
* still set in the array, since the map could have
* been concurrently emptied between the first and
* second reads of vec->mask. If we hit this
* condition, simply act as though we never hit this
* priority level and continue on.
*/
if (cpumask_empty(lowest_mask))
return 0;
}
return 1;
}
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
{
return cpupri_find_fitness(cp, p, lowest_mask, NULL);
}
/**
* cpupri_find_fitness - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context
* @p: The task
* @lowest_mask: A mask to fill in with selected CPUs (or NULL)
* @fitness_fn: A pointer to a function to do custom checks whether the CPU
* fits a specific criteria so that we only return those CPUs.
*
* Note: This function returns the recommended CPUs as calculated during the
* current invocation. By the time the call returns, the CPUs may have in
* fact changed priorities any number of times. While not ideal, it is not
* an issue of correctness since the normal rebalancer logic will correct
* any discrepancies created by racing against the uncertainty of the current
* priority configuration.
*
* Return: (int)bool - CPUs were found
*/
int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask,
bool (*fitness_fn)(struct task_struct *p, int cpu))
{
int task_pri = convert_prio(p->prio);
int idx, cpu;
WARN_ON_ONCE(task_pri >= CPUPRI_NR_PRIORITIES);
for (idx = 0; idx < task_pri; idx++) {
if (!__cpupri_find(cp, p, lowest_mask, idx))
continue;
if (!lowest_mask || !fitness_fn)
return 1;
/* Ensure the capacity of the CPUs fit the task */
for_each_cpu(cpu, lowest_mask) {
if (!fitness_fn(p, cpu))
cpumask_clear_cpu(cpu, lowest_mask);
}
/*
* If no CPU at the current priority can fit the task
* continue looking
*/
if (cpumask_empty(lowest_mask))
continue;
return 1;
}
/*
* If we failed to find a fitting lowest_mask, kick off a new search
* but without taking into account any fitness criteria this time.
*
* This rule favours honouring priority over fitting the task in the
* correct CPU (Capacity Awareness being the only user now).
* The idea is that if a higher priority task can run, then it should
* run even if this ends up being on unfitting CPU.
*
* The cost of this trade-off is not entirely clear and will probably
* be good for some workloads and bad for others.
*
* The main idea here is that if some CPUs were over-committed, we try
* to spread which is what the scheduler traditionally did. Sys admins
* must do proper RT planning to avoid overloading the system if they
* really care.
*/
if (fitness_fn)
return cpupri_find(cp, p, lowest_mask);
return 0;
}
/**
* cpupri_set - update the CPU priority setting
* @cp: The cpupri context
* @cpu: The target CPU
* @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
*
* Note: Assumes cpu_rq(cpu)->lock is locked
*
* Returns: (void)
*/
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
{
int *currpri = &cp->cpu_to_pri[cpu];
int oldpri = *currpri;
int do_mb = 0;
newpri = convert_prio(newpri);
BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
if (newpri == oldpri)
return;
/*
* If the CPU was currently mapped to a different value, we
* need to map it to the new value then remove the old value.
* Note, we must add the new value first, otherwise we risk the
* cpu being missed by the priority loop in cpupri_find.
*/
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
cpumask_set_cpu(cpu, vec->mask);
/*
* When adding a new vector, we update the mask first,
* do a write memory barrier, and then update the count, to
* make sure the vector is visible when count is set.
*/
smp_mb__before_atomic();
atomic_inc(&(vec)->count);
do_mb = 1;
}
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
/*
* Because the order of modification of the vec->count
* is important, we must make sure that the update
* of the new prio is seen before we decrement the
* old prio. This makes sure that the loop sees
* one or the other when we raise the priority of
* the run queue. We don't care about when we lower the
* priority, as that will trigger an rt pull anyway.
*
* We only need to do a memory barrier if we updated
* the new priority vec.
*/
if (do_mb)
smp_mb__after_atomic();
/*
* When removing from the vector, we decrement the counter first
* do a memory barrier and then clear the mask.
*/
atomic_dec(&(vec)->count);
smp_mb__after_atomic();
cpumask_clear_cpu(cpu, vec->mask);
}
*currpri = newpri;
}
/**
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
*
* Return: -ENOMEM on memory allocation failure.
*/
int cpupri_init(struct cpupri *cp)
{
int i;
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
atomic_set(&vec->count, 0);
if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
goto cleanup;
}
cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
if (!cp->cpu_to_pri)
goto cleanup;
for_each_possible_cpu(i)
cp->cpu_to_pri[i] = CPUPRI_INVALID;
return 0;
cleanup:
for (i--; i >= 0; i--)
free_cpumask_var(cp->pri_to_cpu[i].mask);
return -ENOMEM;
}
/**
* cpupri_cleanup - clean up the cpupri structure
* @cp: The cpupri context
*/
void cpupri_cleanup(struct cpupri *cp)
{
int i;
kfree(cp->cpu_to_pri);
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
free_cpumask_var(cp->pri_to_cpu[i].mask);
}
| linux-master | kernel/sched/cpupri.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Housekeeping management. Manage the targets for routine code that can run on
* any CPU: unbound workqueues, timers, kthreads and any offloadable work.
*
* Copyright (C) 2017 Red Hat, Inc., Frederic Weisbecker
* Copyright (C) 2017-2018 SUSE, Frederic Weisbecker
*
*/
enum hk_flags {
HK_FLAG_TIMER = BIT(HK_TYPE_TIMER),
HK_FLAG_RCU = BIT(HK_TYPE_RCU),
HK_FLAG_MISC = BIT(HK_TYPE_MISC),
HK_FLAG_SCHED = BIT(HK_TYPE_SCHED),
HK_FLAG_TICK = BIT(HK_TYPE_TICK),
HK_FLAG_DOMAIN = BIT(HK_TYPE_DOMAIN),
HK_FLAG_WQ = BIT(HK_TYPE_WQ),
HK_FLAG_MANAGED_IRQ = BIT(HK_TYPE_MANAGED_IRQ),
HK_FLAG_KTHREAD = BIT(HK_TYPE_KTHREAD),
};
DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
EXPORT_SYMBOL_GPL(housekeeping_overridden);
struct housekeeping {
cpumask_var_t cpumasks[HK_TYPE_MAX];
unsigned long flags;
};
static struct housekeeping housekeeping;
bool housekeeping_enabled(enum hk_type type)
{
return !!(housekeeping.flags & BIT(type));
}
EXPORT_SYMBOL_GPL(housekeeping_enabled);
int housekeeping_any_cpu(enum hk_type type)
{
int cpu;
if (static_branch_unlikely(&housekeeping_overridden)) {
if (housekeeping.flags & BIT(type)) {
cpu = sched_numa_find_closest(housekeeping.cpumasks[type], smp_processor_id());
if (cpu < nr_cpu_ids)
return cpu;
return cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask);
}
}
return smp_processor_id();
}
EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
return housekeeping.cpumasks[type];
return cpu_possible_mask;
}
EXPORT_SYMBOL_GPL(housekeeping_cpumask);
void housekeeping_affine(struct task_struct *t, enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
set_cpus_allowed_ptr(t, housekeeping.cpumasks[type]);
}
EXPORT_SYMBOL_GPL(housekeeping_affine);
bool housekeeping_test_cpu(int cpu, enum hk_type type)
{
if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping.flags & BIT(type))
return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]);
return true;
}
EXPORT_SYMBOL_GPL(housekeeping_test_cpu);
void __init housekeeping_init(void)
{
enum hk_type type;
if (!housekeeping.flags)
return;
static_branch_enable(&housekeeping_overridden);
if (housekeeping.flags & HK_FLAG_TICK)
sched_tick_offload_init();
for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) {
/* We need at least one CPU to handle housekeeping work */
WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type]));
}
}
static void __init housekeeping_setup_type(enum hk_type type,
cpumask_var_t housekeeping_staging)
{
alloc_bootmem_cpumask_var(&housekeeping.cpumasks[type]);
cpumask_copy(housekeeping.cpumasks[type],
housekeeping_staging);
}
static int __init housekeeping_setup(char *str, unsigned long flags)
{
cpumask_var_t non_housekeeping_mask, housekeeping_staging;
int err = 0;
if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) {
if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) {
pr_warn("Housekeeping: nohz unsupported."
" Build with CONFIG_NO_HZ_FULL\n");
return 0;
}
}
alloc_bootmem_cpumask_var(&non_housekeeping_mask);
if (cpulist_parse(str, non_housekeeping_mask) < 0) {
pr_warn("Housekeeping: nohz_full= or isolcpus= incorrect CPU range\n");
goto free_non_housekeeping_mask;
}
alloc_bootmem_cpumask_var(&housekeeping_staging);
cpumask_andnot(housekeeping_staging,
cpu_possible_mask, non_housekeeping_mask);
if (!cpumask_intersects(cpu_present_mask, housekeeping_staging)) {
__cpumask_set_cpu(smp_processor_id(), housekeeping_staging);
__cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
if (!housekeeping.flags) {
pr_warn("Housekeeping: must include one present CPU, "
"using boot CPU:%d\n", smp_processor_id());
}
}
if (!housekeeping.flags) {
/* First setup call ("nohz_full=" or "isolcpus=") */
enum hk_type type;
for_each_set_bit(type, &flags, HK_TYPE_MAX)
housekeeping_setup_type(type, housekeeping_staging);
} else {
/* Second setup call ("nohz_full=" after "isolcpus=" or the reverse) */
enum hk_type type;
unsigned long iter_flags = flags & housekeeping.flags;
for_each_set_bit(type, &iter_flags, HK_TYPE_MAX) {
if (!cpumask_equal(housekeeping_staging,
housekeeping.cpumasks[type])) {
pr_warn("Housekeeping: nohz_full= must match isolcpus=\n");
goto free_housekeeping_staging;
}
}
iter_flags = flags & ~housekeeping.flags;
for_each_set_bit(type, &iter_flags, HK_TYPE_MAX)
housekeeping_setup_type(type, housekeeping_staging);
}
if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK))
tick_nohz_full_setup(non_housekeeping_mask);
housekeeping.flags |= flags;
err = 1;
free_housekeeping_staging:
free_bootmem_cpumask_var(housekeeping_staging);
free_non_housekeeping_mask:
free_bootmem_cpumask_var(non_housekeeping_mask);
return err;
}
static int __init housekeeping_nohz_full_setup(char *str)
{
unsigned long flags;
flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU |
HK_FLAG_MISC | HK_FLAG_KTHREAD;
return housekeeping_setup(str, flags);
}
__setup("nohz_full=", housekeeping_nohz_full_setup);
static int __init housekeeping_isolcpus_setup(char *str)
{
unsigned long flags = 0;
bool illegal = false;
char *par;
int len;
while (isalpha(*str)) {
if (!strncmp(str, "nohz,", 5)) {
str += 5;
flags |= HK_FLAG_TICK;
continue;
}
if (!strncmp(str, "domain,", 7)) {
str += 7;
flags |= HK_FLAG_DOMAIN;
continue;
}
if (!strncmp(str, "managed_irq,", 12)) {
str += 12;
flags |= HK_FLAG_MANAGED_IRQ;
continue;
}
/*
* Skip unknown sub-parameter and validate that it is not
* containing an invalid character.
*/
for (par = str, len = 0; *str && *str != ','; str++, len++) {
if (!isalpha(*str) && *str != '_')
illegal = true;
}
if (illegal) {
pr_warn("isolcpus: Invalid flag %.*s\n", len, par);
return 0;
}
pr_info("isolcpus: Skipped unknown flag %.*s\n", len, par);
str++;
}
/* Default behaviour for isolcpus without flags */
if (!flags)
flags |= HK_FLAG_DOMAIN;
return housekeeping_setup(str, flags);
}
__setup("isolcpus=", housekeeping_isolcpus_setup);
| linux-master | kernel/sched/isolation.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CPU accounting code for task groups.
*
* Based on the work by Paul Menage ([email protected]) and Balbir Singh
* ([email protected]).
*/
/* Time spent by the tasks of the CPU accounting group executing in ... */
enum cpuacct_stat_index {
CPUACCT_STAT_USER, /* ... user mode */
CPUACCT_STAT_SYSTEM, /* ... kernel mode */
CPUACCT_STAT_NSTATS,
};
static const char * const cpuacct_stat_desc[] = {
[CPUACCT_STAT_USER] = "user",
[CPUACCT_STAT_SYSTEM] = "system",
};
/* track CPU usage of a group of tasks and its child groups */
struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every CPU */
u64 __percpu *cpuusage;
struct kernel_cpustat __percpu *cpustat;
};
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct cpuacct, css) : NULL;
}
/* Return CPU accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
return css_ca(task_css(tsk, cpuacct_cgrp_id));
}
static inline struct cpuacct *parent_ca(struct cpuacct *ca)
{
return css_ca(ca->css.parent);
}
static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
static struct cpuacct root_cpuacct = {
.cpustat = &kernel_cpustat,
.cpuusage = &root_cpuacct_cpuusage,
};
/* Create a new CPU accounting group */
static struct cgroup_subsys_state *
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuacct *ca;
if (!parent_css)
return &root_cpuacct.css;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto out;
ca->cpuusage = alloc_percpu(u64);
if (!ca->cpuusage)
goto out_free_ca;
ca->cpustat = alloc_percpu(struct kernel_cpustat);
if (!ca->cpustat)
goto out_free_cpuusage;
return &ca->css;
out_free_cpuusage:
free_percpu(ca->cpuusage);
out_free_ca:
kfree(ca);
out:
return ERR_PTR(-ENOMEM);
}
/* Destroy an existing CPU accounting group */
static void cpuacct_css_free(struct cgroup_subsys_state *css)
{
struct cpuacct *ca = css_ca(css);
free_percpu(ca->cpustat);
free_percpu(ca->cpuusage);
kfree(ca);
}
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
enum cpuacct_stat_index index)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
u64 data;
/*
* We allow index == CPUACCT_STAT_NSTATS here to read
* the sum of usages.
*/
if (WARN_ON_ONCE(index > CPUACCT_STAT_NSTATS))
return 0;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
raw_spin_rq_lock_irq(cpu_rq(cpu));
#endif
switch (index) {
case CPUACCT_STAT_USER:
data = cpustat[CPUTIME_USER] + cpustat[CPUTIME_NICE];
break;
case CPUACCT_STAT_SYSTEM:
data = cpustat[CPUTIME_SYSTEM] + cpustat[CPUTIME_IRQ] +
cpustat[CPUTIME_SOFTIRQ];
break;
case CPUACCT_STAT_NSTATS:
data = *cpuusage;
break;
}
#ifndef CONFIG_64BIT
raw_spin_rq_unlock_irq(cpu_rq(cpu));
#endif
return data;
}
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
/* Don't allow to reset global kernel_cpustat */
if (ca == &root_cpuacct)
return;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
raw_spin_rq_lock_irq(cpu_rq(cpu));
#endif
*cpuusage = 0;
cpustat[CPUTIME_USER] = cpustat[CPUTIME_NICE] = 0;
cpustat[CPUTIME_SYSTEM] = cpustat[CPUTIME_IRQ] = 0;
cpustat[CPUTIME_SOFTIRQ] = 0;
#ifndef CONFIG_64BIT
raw_spin_rq_unlock_irq(cpu_rq(cpu));
#endif
}
/* Return total CPU usage (in nanoseconds) of a group */
static u64 __cpuusage_read(struct cgroup_subsys_state *css,
enum cpuacct_stat_index index)
{
struct cpuacct *ca = css_ca(css);
u64 totalcpuusage = 0;
int i;
for_each_possible_cpu(i)
totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
return totalcpuusage;
}
static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return __cpuusage_read(css, CPUACCT_STAT_USER);
}
static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
}
static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
}
static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val)
{
struct cpuacct *ca = css_ca(css);
int cpu;
/*
* Only allow '0' here to do a reset.
*/
if (val)
return -EINVAL;
for_each_possible_cpu(cpu)
cpuacct_cpuusage_write(ca, cpu);
return 0;
}
static int __cpuacct_percpu_seq_show(struct seq_file *m,
enum cpuacct_stat_index index)
{
struct cpuacct *ca = css_ca(seq_css(m));
u64 percpu;
int i;
for_each_possible_cpu(i) {
percpu = cpuacct_cpuusage_read(ca, i, index);
seq_printf(m, "%llu ", (unsigned long long) percpu);
}
seq_printf(m, "\n");
return 0;
}
static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
{
return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
}
static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
{
return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
}
static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
{
return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
}
static int cpuacct_all_seq_show(struct seq_file *m, void *V)
{
struct cpuacct *ca = css_ca(seq_css(m));
int index;
int cpu;
seq_puts(m, "cpu");
for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
seq_printf(m, " %s", cpuacct_stat_desc[index]);
seq_puts(m, "\n");
for_each_possible_cpu(cpu) {
seq_printf(m, "%d", cpu);
for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
seq_printf(m, " %llu",
cpuacct_cpuusage_read(ca, cpu, index));
seq_puts(m, "\n");
}
return 0;
}
static int cpuacct_stats_show(struct seq_file *sf, void *v)
{
struct cpuacct *ca = css_ca(seq_css(sf));
struct task_cputime cputime;
u64 val[CPUACCT_STAT_NSTATS];
int cpu;
int stat;
memset(&cputime, 0, sizeof(cputime));
for_each_possible_cpu(cpu) {
u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
cputime.utime += cpustat[CPUTIME_USER];
cputime.utime += cpustat[CPUTIME_NICE];
cputime.stime += cpustat[CPUTIME_SYSTEM];
cputime.stime += cpustat[CPUTIME_IRQ];
cputime.stime += cpustat[CPUTIME_SOFTIRQ];
cputime.sum_exec_runtime += *per_cpu_ptr(ca->cpuusage, cpu);
}
cputime_adjust(&cputime, &seq_css(sf)->cgroup->prev_cputime,
&val[CPUACCT_STAT_USER], &val[CPUACCT_STAT_SYSTEM]);
for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
seq_printf(sf, "%s %llu\n", cpuacct_stat_desc[stat],
nsec_to_clock_t(val[stat]));
}
return 0;
}
static struct cftype files[] = {
{
.name = "usage",
.read_u64 = cpuusage_read,
.write_u64 = cpuusage_write,
},
{
.name = "usage_user",
.read_u64 = cpuusage_user_read,
},
{
.name = "usage_sys",
.read_u64 = cpuusage_sys_read,
},
{
.name = "usage_percpu",
.seq_show = cpuacct_percpu_seq_show,
},
{
.name = "usage_percpu_user",
.seq_show = cpuacct_percpu_user_seq_show,
},
{
.name = "usage_percpu_sys",
.seq_show = cpuacct_percpu_sys_seq_show,
},
{
.name = "usage_all",
.seq_show = cpuacct_all_seq_show,
},
{
.name = "stat",
.seq_show = cpuacct_stats_show,
},
{ } /* terminate */
};
/*
* charge this task's execution time to its accounting group.
*
* called with rq->lock held.
*/
void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
unsigned int cpu = task_cpu(tsk);
struct cpuacct *ca;
lockdep_assert_rq_held(cpu_rq(cpu));
for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
*per_cpu_ptr(ca->cpuusage, cpu) += cputime;
}
/*
* Add user/system time to cpuacct.
*
* Note: it's the caller that updates the account of the root cgroup.
*/
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
{
struct cpuacct *ca;
for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
__this_cpu_add(ca->cpustat->cpustat[index], val);
}
struct cgroup_subsys cpuacct_cgrp_subsys = {
.css_alloc = cpuacct_css_alloc,
.css_free = cpuacct_css_free,
.legacy_cftypes = files,
.early_init = true,
};
| linux-master | kernel/sched/cpuacct.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010-2017 Mathieu Desnoyers <[email protected]>
*
* membarrier system call
*/
/*
* For documentation purposes, here are some membarrier ordering
* scenarios to keep in mind:
*
* A) Userspace thread execution after IPI vs membarrier's memory
* barrier before sending the IPI
*
* Userspace variables:
*
* int x = 0, y = 0;
*
* The memory barrier at the start of membarrier() on CPU0 is necessary in
* order to enforce the guarantee that any writes occurring on CPU0 before
* the membarrier() is executed will be visible to any code executing on
* CPU1 after the IPI-induced memory barrier:
*
* CPU0 CPU1
*
* x = 1
* membarrier():
* a: smp_mb()
* b: send IPI IPI-induced mb
* c: smp_mb()
* r2 = y
* y = 1
* barrier()
* r1 = x
*
* BUG_ON(r1 == 0 && r2 == 0)
*
* The write to y and load from x by CPU1 are unordered by the hardware,
* so it's possible to have "r1 = x" reordered before "y = 1" at any
* point after (b). If the memory barrier at (a) is omitted, then "x = 1"
* can be reordered after (a) (although not after (c)), so we get r1 == 0
* and r2 == 0. This violates the guarantee that membarrier() is
* supposed by provide.
*
* The timing of the memory barrier at (a) has to ensure that it executes
* before the IPI-induced memory barrier on CPU1.
*
* B) Userspace thread execution before IPI vs membarrier's memory
* barrier after completing the IPI
*
* Userspace variables:
*
* int x = 0, y = 0;
*
* The memory barrier at the end of membarrier() on CPU0 is necessary in
* order to enforce the guarantee that any writes occurring on CPU1 before
* the membarrier() is executed will be visible to any code executing on
* CPU0 after the membarrier():
*
* CPU0 CPU1
*
* x = 1
* barrier()
* y = 1
* r2 = y
* membarrier():
* a: smp_mb()
* b: send IPI IPI-induced mb
* c: smp_mb()
* r1 = x
* BUG_ON(r1 == 0 && r2 == 1)
*
* The writes to x and y are unordered by the hardware, so it's possible to
* have "r2 = 1" even though the write to x doesn't execute until (b). If
* the memory barrier at (c) is omitted then "r1 = x" can be reordered
* before (b) (although not before (a)), so we get "r1 = 0". This violates
* the guarantee that membarrier() is supposed to provide.
*
* The timing of the memory barrier at (c) has to ensure that it executes
* after the IPI-induced memory barrier on CPU1.
*
* C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
*
* CPU0 CPU1
*
* membarrier():
* a: smp_mb()
* d: switch to kthread (includes mb)
* b: read rq->curr->mm == NULL
* e: switch to user (includes mb)
* c: smp_mb()
*
* Using the scenario from (A), we can show that (a) needs to be paired
* with (e). Using the scenario from (B), we can show that (c) needs to
* be paired with (d).
*
* D) exit_mm vs membarrier
*
* Two thread groups are created, A and B. Thread group B is created by
* issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD.
* Let's assume we have a single thread within each thread group (Thread A
* and Thread B). Thread A runs on CPU0, Thread B runs on CPU1.
*
* CPU0 CPU1
*
* membarrier():
* a: smp_mb()
* exit_mm():
* d: smp_mb()
* e: current->mm = NULL
* b: read rq->curr->mm == NULL
* c: smp_mb()
*
* Using scenario (B), we can show that (c) needs to be paired with (d).
*
* E) kthread_{use,unuse}_mm vs membarrier
*
* CPU0 CPU1
*
* membarrier():
* a: smp_mb()
* kthread_unuse_mm()
* d: smp_mb()
* e: current->mm = NULL
* b: read rq->curr->mm == NULL
* kthread_use_mm()
* f: current->mm = mm
* g: smp_mb()
* c: smp_mb()
*
* Using the scenario from (A), we can show that (a) needs to be paired
* with (g). Using the scenario from (B), we can show that (c) needs to
* be paired with (d).
*/
/*
* Bitmask made from a "or" of all commands within enum membarrier_cmd,
* except MEMBARRIER_CMD_QUERY.
*/
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
#else
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
#endif
#ifdef CONFIG_RSEQ
#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
#else
#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
#endif
#define MEMBARRIER_CMD_BITMASK \
(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
| MEMBARRIER_CMD_GET_REGISTRATIONS)
static void ipi_mb(void *info)
{
smp_mb(); /* IPIs should be serializing but paranoid. */
}
static void ipi_sync_core(void *info)
{
/*
* The smp_mb() in membarrier after all the IPIs is supposed to
* ensure that memory on remote CPUs that occur before the IPI
* become visible to membarrier()'s caller -- see scenario B in
* the big comment at the top of this file.
*
* A sync_core() would provide this guarantee, but
* sync_core_before_usermode() might end up being deferred until
* after membarrier()'s smp_mb().
*/
smp_mb(); /* IPIs should be serializing but paranoid. */
sync_core_before_usermode();
}
static void ipi_rseq(void *info)
{
/*
* Ensure that all stores done by the calling thread are visible
* to the current task before the current task resumes. We could
* probably optimize this away on most architectures, but by the
* time we've already sent an IPI, the cost of the extra smp_mb()
* is negligible.
*/
smp_mb();
rseq_preempt(current);
}
static void ipi_sync_rq_state(void *info)
{
struct mm_struct *mm = (struct mm_struct *) info;
if (current->mm != mm)
return;
this_cpu_write(runqueues.membarrier_state,
atomic_read(&mm->membarrier_state));
/*
* Issue a memory barrier after setting
* MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
* guarantee that no memory access following registration is reordered
* before registration.
*/
smp_mb();
}
void membarrier_exec_mmap(struct mm_struct *mm)
{
/*
* Issue a memory barrier before clearing membarrier_state to
* guarantee that no memory access prior to exec is reordered after
* clearing this state.
*/
smp_mb();
atomic_set(&mm->membarrier_state, 0);
/*
* Keep the runqueue membarrier_state in sync with this mm
* membarrier_state.
*/
this_cpu_write(runqueues.membarrier_state, 0);
}
void membarrier_update_current_mm(struct mm_struct *next_mm)
{
struct rq *rq = this_rq();
int membarrier_state = 0;
if (next_mm)
membarrier_state = atomic_read(&next_mm->membarrier_state);
if (READ_ONCE(rq->membarrier_state) == membarrier_state)
return;
WRITE_ONCE(rq->membarrier_state, membarrier_state);
}
static int membarrier_global_expedited(void)
{
int cpu;
cpumask_var_t tmpmask;
if (num_online_cpus() == 1)
return 0;
/*
* Matches memory barriers around rq->curr modification in
* scheduler.
*/
smp_mb(); /* system call entry is not a mb. */
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
struct task_struct *p;
/*
* Skipping the current CPU is OK even through we can be
* migrated at any point. The current CPU, at the point
* where we read raw_smp_processor_id(), is ensured to
* be in program order with respect to the caller
* thread. Therefore, we can skip this CPU from the
* iteration.
*/
if (cpu == raw_smp_processor_id())
continue;
if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
MEMBARRIER_STATE_GLOBAL_EXPEDITED))
continue;
/*
* Skip the CPU if it runs a kernel thread which is not using
* a task mm.
*/
p = rcu_dereference(cpu_rq(cpu)->curr);
if (!p->mm)
continue;
__cpumask_set_cpu(cpu, tmpmask);
}
rcu_read_unlock();
preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable();
free_cpumask_var(tmpmask);
cpus_read_unlock();
/*
* Memory barrier on the caller thread _after_ we finished
* waiting for the last IPI. Matches memory barriers around
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
return 0;
}
static int membarrier_private_expedited(int flags, int cpu_id)
{
cpumask_var_t tmpmask;
struct mm_struct *mm = current->mm;
smp_call_func_t ipi_func = ipi_mb;
if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
return -EINVAL;
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
ipi_func = ipi_sync_core;
} else if (flags == MEMBARRIER_FLAG_RSEQ) {
if (!IS_ENABLED(CONFIG_RSEQ))
return -EINVAL;
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY))
return -EPERM;
ipi_func = ipi_rseq;
} else {
WARN_ON_ONCE(flags);
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
return -EPERM;
}
if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
(atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
return 0;
/*
* Matches memory barriers around rq->curr modification in
* scheduler.
*/
smp_mb(); /* system call entry is not a mb. */
if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
cpus_read_lock();
if (cpu_id >= 0) {
struct task_struct *p;
if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
goto out;
rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu_id)->curr);
if (!p || p->mm != mm) {
rcu_read_unlock();
goto out;
}
rcu_read_unlock();
} else {
int cpu;
rcu_read_lock();
for_each_online_cpu(cpu) {
struct task_struct *p;
p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
}
rcu_read_unlock();
}
if (cpu_id >= 0) {
/*
* smp_call_function_single() will call ipi_func() if cpu_id
* is the calling CPU.
*/
smp_call_function_single(cpu_id, ipi_func, NULL, 1);
} else {
/*
* For regular membarrier, we can save a few cycles by
* skipping the current cpu -- we're about to do smp_mb()
* below, and if we migrate to a different cpu, this cpu
* and the new cpu will execute a full barrier in the
* scheduler.
*
* For SYNC_CORE, we do need a barrier on the current cpu --
* otherwise, if we are migrated and replaced by a different
* task in the same mm just before, during, or after
* membarrier, we will end up with some thread in the mm
* running without a core sync.
*
* For RSEQ, don't rseq_preempt() the caller. User code
* is not supposed to issue syscalls at all from inside an
* rseq critical section.
*/
if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
preempt_disable();
smp_call_function_many(tmpmask, ipi_func, NULL, true);
preempt_enable();
} else {
on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
}
}
out:
if (cpu_id < 0)
free_cpumask_var(tmpmask);
cpus_read_unlock();
/*
* Memory barrier on the caller thread _after_ we finished
* waiting for the last IPI. Matches memory barriers around
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
return 0;
}
static int sync_runqueues_membarrier_state(struct mm_struct *mm)
{
int membarrier_state = atomic_read(&mm->membarrier_state);
cpumask_var_t tmpmask;
int cpu;
if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
this_cpu_write(runqueues.membarrier_state, membarrier_state);
/*
* For single mm user, we can simply issue a memory barrier
* after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
* mm and in the current runqueue to guarantee that no memory
* access following registration is reordered before
* registration.
*/
smp_mb();
return 0;
}
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
/*
* For mm with multiple users, we need to ensure all future
* scheduler executions will observe @mm's new membarrier
* state.
*/
synchronize_rcu();
/*
* For each cpu runqueue, if the task's mm match @mm, ensure that all
* @mm's membarrier state set bits are also set in the runqueue's
* membarrier state. This ensures that a runqueue scheduling
* between threads which are users of @mm has its membarrier state
* updated.
*/
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
struct task_struct *p;
p = rcu_dereference(rq->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
}
rcu_read_unlock();
on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
free_cpumask_var(tmpmask);
cpus_read_unlock();
return 0;
}
static int membarrier_register_global_expedited(void)
{
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
int ret;
if (atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
return 0;
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
ret = sync_runqueues_membarrier_state(mm);
if (ret)
return ret;
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
&mm->membarrier_state);
return 0;
}
static int membarrier_register_private_expedited(int flags)
{
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
ret;
if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
return -EINVAL;
ready_state =
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
} else if (flags == MEMBARRIER_FLAG_RSEQ) {
if (!IS_ENABLED(CONFIG_RSEQ))
return -EINVAL;
ready_state =
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY;
} else {
WARN_ON_ONCE(flags);
}
/*
* We need to consider threads belonging to different thread
* groups, which use the same mm. (CLONE_VM but not
* CLONE_THREAD).
*/
if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
return 0;
if (flags & MEMBARRIER_FLAG_SYNC_CORE)
set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
if (flags & MEMBARRIER_FLAG_RSEQ)
set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ;
atomic_or(set_state, &mm->membarrier_state);
ret = sync_runqueues_membarrier_state(mm);
if (ret)
return ret;
atomic_or(ready_state, &mm->membarrier_state);
return 0;
}
static int membarrier_get_registrations(void)
{
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
int registrations_mask = 0, membarrier_state, i;
static const int states[] = {
MEMBARRIER_STATE_GLOBAL_EXPEDITED |
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
MEMBARRIER_STATE_PRIVATE_EXPEDITED |
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE |
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY,
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ |
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
};
static const int registration_cmds[] = {
MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED,
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED,
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE,
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
};
BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds));
membarrier_state = atomic_read(&mm->membarrier_state);
for (i = 0; i < ARRAY_SIZE(states); ++i) {
if (membarrier_state & states[i]) {
registrations_mask |= registration_cmds[i];
membarrier_state &= ~states[i];
}
}
WARN_ON_ONCE(membarrier_state != 0);
return registrations_mask;
}
/**
* sys_membarrier - issue memory barriers on a set of threads
* @cmd: Takes command values defined in enum membarrier_cmd.
* @flags: Currently needs to be 0 for all commands other than
* MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
* case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
* contains the CPU on which to interrupt (= restart)
* the RSEQ critical section.
* @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
* RSEQ CS should be interrupted (@cmd must be
* MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
*
* If this system call is not implemented, -ENOSYS is returned. If the
* command specified does not exist, not available on the running
* kernel, or if the command argument is invalid, this system call
* returns -EINVAL. For a given command, with flags argument set to 0,
* if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
* always return the same value until reboot. In addition, it can return
* -ENOMEM if there is not enough memory available to perform the system
* call.
*
* All memory accesses performed in program order from each targeted thread
* is guaranteed to be ordered with respect to sys_membarrier(). If we use
* the semantic "barrier()" to represent a compiler barrier forcing memory
* accesses to be performed in program order across the barrier, and
* smp_mb() to represent explicit memory barriers forcing full memory
* ordering across the barrier, we have the following ordering table for
* each pair of barrier(), sys_membarrier() and smp_mb():
*
* The pair ordering is detailed as (O: ordered, X: not ordered):
*
* barrier() smp_mb() sys_membarrier()
* barrier() X X O
* smp_mb() X O O
* sys_membarrier() O O O
*/
SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id)
{
switch (cmd) {
case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
if (unlikely(flags && flags != MEMBARRIER_CMD_FLAG_CPU))
return -EINVAL;
break;
default:
if (unlikely(flags))
return -EINVAL;
}
if (!(flags & MEMBARRIER_CMD_FLAG_CPU))
cpu_id = -1;
switch (cmd) {
case MEMBARRIER_CMD_QUERY:
{
int cmd_mask = MEMBARRIER_CMD_BITMASK;
if (tick_nohz_full_enabled())
cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
return cmd_mask;
}
case MEMBARRIER_CMD_GLOBAL:
/* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
if (tick_nohz_full_enabled())
return -EINVAL;
if (num_online_cpus() > 1)
synchronize_rcu();
return 0;
case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
return membarrier_global_expedited();
case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
return membarrier_register_global_expedited();
case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
return membarrier_private_expedited(0, cpu_id);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
return membarrier_register_private_expedited(0);
case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, cpu_id);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ);
case MEMBARRIER_CMD_GET_REGISTRATIONS:
return membarrier_get_registrations();
default:
return -EINVAL;
}
}
| linux-master | kernel/sched/membarrier.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic waiting primitives.
*
* (C) 2004 Nadia Yvette Chambers, Oracle
*/
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
{
spin_lock_init(&wq_head->lock);
lockdep_set_class_and_name(&wq_head->lock, key, name);
INIT_LIST_HEAD(&wq_head->head);
}
EXPORT_SYMBOL(__init_waitqueue_head);
void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
__add_wait_queue(wq_head, wq_entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
__add_wait_queue_entry_tail(wq_head, wq_entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
spin_lock_irqsave(&wq_head->lock, flags);
__add_wait_queue(wq_head, wq_entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL_GPL(add_wait_queue_priority);
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
spin_lock_irqsave(&wq_head->lock, flags);
__remove_wait_queue(wq_head, wq_entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(remove_wait_queue);
/*
* Scan threshold to break wait queue walk.
* This allows a waker to take a break from holding the
* wait queue lock during the wait queue walk.
*/
#define WAITQUEUE_WALK_BREAK_CNT 64
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake that number of exclusive tasks, and potentially all
* the non-exclusive tasks. Normally, exclusive tasks will be at the end of
* the list and any non-exclusive tasks will be woken first. A priority task
* may be at the head of the list, and can consume the event without any other
* tasks being woken.
*
* There are circumstances in which we can try to wake a task which has already
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, int wake_flags, void *key,
wait_queue_entry_t *bookmark)
{
wait_queue_entry_t *curr, *next;
int cnt = 0;
lockdep_assert_held(&wq_head->lock);
if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
curr = list_next_entry(bookmark, entry);
list_del(&bookmark->entry);
bookmark->flags = 0;
} else
curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
if (&curr->entry == &wq_head->head)
return nr_exclusive;
list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
unsigned flags = curr->flags;
int ret;
if (flags & WQ_FLAG_BOOKMARK)
continue;
ret = curr->func(curr, mode, wake_flags, key);
if (ret < 0)
break;
if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
(&next->entry != &wq_head->head)) {
bookmark->flags = WQ_FLAG_BOOKMARK;
list_add_tail(&bookmark->entry, &next->entry);
break;
}
}
return nr_exclusive;
}
static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
unsigned long flags;
wait_queue_entry_t bookmark;
int remaining = nr_exclusive;
bookmark.flags = 0;
bookmark.private = NULL;
bookmark.func = NULL;
INIT_LIST_HEAD(&bookmark.entry);
do {
spin_lock_irqsave(&wq_head->lock, flags);
remaining = __wake_up_common(wq_head, mode, remaining,
wake_flags, key, &bookmark);
spin_unlock_irqrestore(&wq_head->lock, flags);
} while (bookmark.flags & WQ_FLAG_BOOKMARK);
return nr_exclusive - remaining;
}
/**
* __wake_up - wake up threads blocked on a waitqueue.
* @wq_head: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
* If this function wakes up a task, it executes a full memory barrier
* before accessing the task state. Returns the number of exclusive
* tasks that were awaken.
*/
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, void *key)
{
return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
}
EXPORT_SYMBOL(__wake_up);
void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key)
{
__wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key);
}
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
{
__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
{
__wake_up_common(wq_head, mode, 1, 0, key, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark)
{
__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
/**
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @wq_head: the waitqueue
* @mode: which threads
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
*
* If this function wakes up a task, it executes a full memory barrier before
* accessing the task state.
*/
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
void *key)
{
if (unlikely(!wq_head))
return;
__wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
/**
* __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
* @wq_head: the waitqueue
* @mode: which threads
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs in that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
*
* If this function wakes up a task, it executes a full memory barrier before
* accessing the task state.
*/
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
unsigned int mode, void *key)
{
__wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
/*
* __wake_up_sync - see __wake_up_sync_key()
*/
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
{
__wake_up_sync_key(wq_head, mode, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
void __wake_up_pollfree(struct wait_queue_head *wq_head)
{
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
/* POLLFREE must have cleared the queue. */
WARN_ON_ONCE(waitqueue_active(wq_head));
}
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the critical region).
*/
void
prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
if (list_empty(&wq_entry->entry))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
/* Returns true if we are the first waiter in the queue, false otherwise. */
bool
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
bool was_empty = false;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
if (list_empty(&wq_entry->entry)) {
was_empty = list_empty(&wq_head->head);
__add_wait_queue_entry_tail(wq_head, wq_entry);
}
set_current_state(state);
spin_unlock_irqrestore(&wq_head->lock, flags);
return was_empty;
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
{
wq_entry->flags = flags;
wq_entry->private = current;
wq_entry->func = autoremove_wake_function;
INIT_LIST_HEAD(&wq_entry->entry);
}
EXPORT_SYMBOL(init_wait_entry);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
long ret = 0;
spin_lock_irqsave(&wq_head->lock, flags);
if (signal_pending_state(state, current)) {
/*
* Exclusive waiter must not fail if it was selected by wakeup,
* it should "consume" the condition we were waiting for.
*
* The caller will recheck the condition and return success if
* we were already woken up, we can not miss the event because
* wakeup locks/unlocks the same wq_head->lock.
*
* But we need to ensure that set-condition + wakeup after that
* can't see us, it should wake up another exclusive waiter if
* we fail.
*/
list_del_init(&wq_entry->entry);
ret = -ERESTARTSYS;
} else {
if (list_empty(&wq_entry->entry)) {
if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
__add_wait_queue_entry_tail(wq_head, wq_entry);
else
__add_wait_queue(wq_head, wq_entry);
}
set_current_state(state);
}
spin_unlock_irqrestore(&wq_head->lock, flags);
return ret;
}
EXPORT_SYMBOL(prepare_to_wait_event);
/*
* Note! These two wait functions are entered with the
* wait-queue lock held (and interrupts off in the _irq
* case), so there is no race with testing the wakeup
* condition in the caller before they add the wait
* entry to the wake queue.
*/
int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
{
if (likely(list_empty(&wait->entry)))
__add_wait_queue_entry_tail(wq, wait);
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
return -ERESTARTSYS;
spin_unlock(&wq->lock);
schedule();
spin_lock(&wq->lock);
return 0;
}
EXPORT_SYMBOL(do_wait_intr);
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
{
if (likely(list_empty(&wait->entry)))
__add_wait_queue_entry_tail(wq, wait);
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
return -ERESTARTSYS;
spin_unlock_irq(&wq->lock);
schedule();
spin_lock_irq(&wq->lock);
return 0;
}
EXPORT_SYMBOL(do_wait_intr_irq);
/**
* finish_wait - clean up after waiting in a queue
* @wq_head: waitqueue waited on
* @wq_entry: wait descriptor
*
* Sets current thread back to running state and removes
* the wait descriptor from the given waitqueue if still
* queued.
*/
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
__set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wq_entry->entry)) {
spin_lock_irqsave(&wq_head->lock, flags);
list_del_init(&wq_entry->entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
}
EXPORT_SYMBOL(finish_wait);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
list_del_init_careful(&wq_entry->entry);
return ret;
}
EXPORT_SYMBOL(autoremove_wake_function);
/*
* DEFINE_WAIT_FUNC(wait, woken_wake_func);
*
* add_wait_queue(&wq_head, &wait);
* for (;;) {
* if (condition)
* break;
*
* // in wait_woken() // in woken_wake_function()
*
* p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
* smp_mb(); // A try_to_wake_up():
* if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
* schedule() if (p->state & mode)
* p->state = TASK_RUNNING; p->state = TASK_RUNNING;
* wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
* smp_mb(); // B condition = true;
* } smp_mb(); // C
* remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
*/
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
/*
* The below executes an smp_mb(), which matches with the full barrier
* executed by the try_to_wake_up() in woken_wake_function() such that
* either we see the store to wq_entry->flags in woken_wake_function()
* or woken_wake_function() sees our store to current->state.
*/
set_current_state(mode); /* A */
if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
/*
* The below executes an smp_mb(), which matches with the smp_mb() (C)
* in woken_wake_function() such that either we see the wait condition
* being true or the store to wq_entry->flags in woken_wake_function()
* follows ours in the coherence order.
*/
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
return timeout;
}
EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
/* Pairs with the smp_store_mb() in wait_woken(). */
smp_mb(); /* C */
wq_entry->flags |= WQ_FLAG_WOKEN;
return default_wake_function(wq_entry, mode, sync, key);
}
EXPORT_SYMBOL(woken_wake_function);
| linux-master | kernel/sched/wait.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
*
* Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <[email protected]>
*
* Interactivity improvements by Mike Galbraith
* (C) 2007 Mike Galbraith <[email protected]>
*
* Various enhancements by Dmitry Adamushko.
* (C) 2007 Dmitry Adamushko <[email protected]>
*
* Group scheduling enhancements by Srivatsa Vaddagiri
* Copyright IBM Corporation, 2007
* Author: Srivatsa Vaddagiri <[email protected]>
*
* Scaled math optimizations by Thomas Gleixner
* Copyright (C) 2007, Thomas Gleixner <[email protected]>
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
#include <linux/hugetlb_inline.h>
#include <linux/jiffies.h>
#include <linux/mm_api.h>
#include <linux/highmem.h>
#include <linux/spinlock_api.h>
#include <linux/cpumask_api.h>
#include <linux/lockdep_api.h>
#include <linux/softirq.h>
#include <linux/refcount_api.h>
#include <linux/topology.h>
#include <linux/sched/clock.h>
#include <linux/sched/cond_resched.h>
#include <linux/sched/cputime.h>
#include <linux/sched/isolation.h>
#include <linux/sched/nohz.h>
#include <linux/cpuidle.h>
#include <linux/interrupt.h>
#include <linux/memory-tiers.h>
#include <linux/mempolicy.h>
#include <linux/mutex_api.h>
#include <linux/profile.h>
#include <linux/psi.h>
#include <linux/ratelimit.h>
#include <linux/task_work.h>
#include <linux/rbtree_augmented.h>
#include <asm/switch_to.h>
#include <linux/sched/cond_resched.h>
#include "sched.h"
#include "stats.h"
#include "autogroup.h"
/*
* The initial- and re-scaling of tunables is configurable
*
* Options are:
*
* SCHED_TUNABLESCALING_NONE - unscaled, always *1
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
/*
* Minimal preemption granularity for CPU-bound tasks:
*
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
/*
* After fork, child runs first. If set to 0 (default) then
* parent will (try to) run first.
*/
unsigned int sysctl_sched_child_runs_first __read_mostly;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
int _shift = 0;
if (kstrtoint(str, 0, &_shift))
pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
sched_thermal_decay_shift = clamp(_shift, 0, 10);
return 1;
}
__setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
#ifdef CONFIG_SMP
/*
* For asym packing, by default the lower numbered CPU has higher priority.
*/
int __weak arch_asym_cpu_priority(int cpu)
{
return -cpu;
}
/*
* The margin used when comparing utilization with CPU capacity.
*
* (default: ~20%)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
/*
* The margin used when comparing CPU capacities.
* is 'cap1' noticeably greater than 'cap2'
*
* (default: ~5%)
*/
#define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
#endif
#ifdef CONFIG_CFS_BANDWIDTH
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
* each time a cfs_rq requests quota.
*
* Note: in the case that the slice exceeds the runtime remaining (either due
* to consumption or the quota being specified to be smaller than the slice)
* we will always only issue the remaining available time.
*
* (default: 5 msec, units: microseconds)
*/
static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
#ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#endif
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
.data = &sysctl_sched_cfs_bandwidth_slice,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
#endif
#ifdef CONFIG_NUMA_BALANCING
{
.procname = "numa_balancing_promote_rate_limit_MBps",
.data = &sysctl_numa_balancing_promote_rate_limit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
#endif /* CONFIG_NUMA_BALANCING */
{}
};
static int __init sched_fair_sysctl_init(void)
{
register_sysctl_init("kernel", sched_fair_sysctls);
return 0;
}
late_initcall(sched_fair_sysctl_init);
#endif
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
static inline void update_load_set(struct load_weight *lw, unsigned long w)
{
lw->weight = w;
lw->inv_weight = 0;
}
/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible
* to users decreases. But the relationship is not linear,
* so pick a second-best guess by going with the log2 of the
* number of CPUs.
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
unsigned int factor;
switch (sysctl_sched_tunable_scaling) {
case SCHED_TUNABLESCALING_NONE:
factor = 1;
break;
case SCHED_TUNABLESCALING_LINEAR:
factor = cpus;
break;
case SCHED_TUNABLESCALING_LOG:
default:
factor = 1 + ilog2(cpus);
break;
}
return factor;
}
static void update_sysctl(void)
{
unsigned int factor = get_update_sysctl_factor();
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
void __init sched_init_granularity(void)
{
update_sysctl();
}
#define WMULT_CONST (~0U)
#define WMULT_SHIFT 32
static void __update_inv_weight(struct load_weight *lw)
{
unsigned long w;
if (likely(lw->inv_weight))
return;
w = scale_load_down(lw->weight);
if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
lw->inv_weight = 1;
else if (unlikely(!w))
lw->inv_weight = WMULT_CONST;
else
lw->inv_weight = WMULT_CONST / w;
}
/*
* delta_exec * weight / lw.weight
* OR
* (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
*
* Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
* we're guaranteed shift stays positive because inv_weight is guaranteed to
* fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
*
* Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
* weight/lw.weight <= 1, and therefore our shift will also be positive.
*/
static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
{
u64 fact = scale_load_down(weight);
u32 fact_hi = (u32)(fact >> 32);
int shift = WMULT_SHIFT;
int fs;
__update_inv_weight(lw);
if (unlikely(fact_hi)) {
fs = fls(fact_hi);
shift -= fs;
fact >>= fs;
}
fact = mul_u32_u32(fact, lw->inv_weight);
fact_hi = (u32)(fact >> 32);
if (fact_hi) {
fs = fls(fact_hi);
shift -= fs;
fact >>= fs;
}
return mul_u64_u32_shr(delta_exec, fact, shift);
}
/*
* delta /= w
*/
static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
if (unlikely(se->load.weight != NICE_0_LOAD))
delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
return delta;
}
const struct sched_class fair_sched_class;
/**************************************************************
* CFS operations on generic schedulable entities:
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)
static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
if (cfs_rq->on_list)
return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
cfs_rq->on_list = 1;
/*
* Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is
* enqueued. The fact that we always enqueue bottom-up
* reduces this to two cases and a special case for the root
* cfs_rq. Furthermore, it also means that we will always reset
* tmp_alone_branch either when the branch is connected
* to a tree or when we reach the top of the tree
*/
if (cfs_rq->tg->parent &&
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
/*
* If parent is already on the list, we add the child
* just before. Thanks to circular linked property of
* the list, this means to put the child at the tail
* of the list that starts by parent.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
/*
* The branch is now connected to its tree so we can
* reset tmp_alone_branch to the beginning of the
* list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
return true;
}
if (!cfs_rq->tg->parent) {
/*
* cfs rq without parent should be put
* at the tail of the list.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq->leaf_cfs_rq_list);
/*
* We have reach the top of a tree so we can reset
* tmp_alone_branch to the beginning of the list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
return true;
}
/*
* The parent has not already been added so we want to
* make sure that it will be put after us.
* tmp_alone_branch points to the begin of the branch
* where we will add parent.
*/
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
/*
* update tmp_alone_branch to points to the new begin
* of the branch
*/
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
return false;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (cfs_rq->on_list) {
struct rq *rq = rq_of(cfs_rq);
/*
* With cfs_rq being unthrottled/throttled during an enqueue,
* it can happen the tmp_alone_branch points the a leaf that
* we finally want to del. In this case, tmp_alone_branch moves
* to the prev element but it will point to rq->leaf_cfs_rq_list
* at the end of the enqueue.
*/
if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
cfs_rq->on_list = 0;
}
}
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
}
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
static inline struct cfs_rq *
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
return se->cfs_rq;
return NULL;
}
static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
int se_depth, pse_depth;
/*
* preemption test can be made between sibling entities who are in the
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
* both tasks until we find their ancestors who are siblings of common
* parent.
*/
/* First walk up until both entities are at same depth */
se_depth = (*se)->depth;
pse_depth = (*pse)->depth;
while (se_depth > pse_depth) {
se_depth--;
*se = parent_entity(*se);
}
while (pse_depth > se_depth) {
pse_depth--;
*pse = parent_entity(*pse);
}
while (!is_same_group(*se, *pse)) {
*se = parent_entity(*se);
*pse = parent_entity(*pse);
}
}
static int tg_is_idle(struct task_group *tg)
{
return tg->idle > 0;
}
static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
{
return cfs_rq->idle > 0;
}
static int se_is_idle(struct sched_entity *se)
{
if (entity_is_task(se))
return task_has_idle_policy(task_of(se));
return cfs_rq_is_idle(group_cfs_rq(se));
}
#else /* !CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
for (; se; se = NULL)
static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
return true;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
}
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
}
static inline void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
}
static inline int tg_is_idle(struct task_group *tg)
{
return 0;
}
static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
{
return 0;
}
static int se_is_idle(struct sched_entity *se)
{
return 0;
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
if (delta > 0)
max_vruntime = vruntime;
return max_vruntime;
}
static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - min_vruntime);
if (delta < 0)
min_vruntime = vruntime;
return min_vruntime;
}
static inline bool entity_before(const struct sched_entity *a,
const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return (s64)(se->vruntime - cfs_rq->min_vruntime);
}
#define __node_2_se(node) \
rb_entry((node), struct sched_entity, run_node)
/*
* Compute virtual time from the per-task service numbers:
*
* Fair schedulers conserve lag:
*
* \Sum lag_i = 0
*
* Where lag_i is given by:
*
* lag_i = S - s_i = w_i * (V - v_i)
*
* Where S is the ideal service time and V is it's virtual time counterpart.
* Therefore:
*
* \Sum lag_i = 0
* \Sum w_i * (V - v_i) = 0
* \Sum w_i * V - w_i * v_i = 0
*
* From which we can solve an expression for V in v_i (which we have in
* se->vruntime):
*
* \Sum v_i * w_i \Sum v_i * w_i
* V = -------------- = --------------
* \Sum w_i W
*
* Specifically, this is the weighted average of all entity virtual runtimes.
*
* [[ NOTE: this is only equal to the ideal scheduler under the condition
* that join/leave operations happen at lag_i = 0, otherwise the
* virtual time has non-continguous motion equivalent to:
*
* V +-= lag_i / W
*
* Also see the comment in place_entity() that deals with this. ]]
*
* However, since v_i is u64, and the multiplcation could easily overflow
* transform it into a relative form that uses smaller quantities:
*
* Substitute: v_i == (v_i - v0) + v0
*
* \Sum ((v_i - v0) + v0) * w_i \Sum (v_i - v0) * w_i
* V = ---------------------------- = --------------------- + v0
* W W
*
* Which we track using:
*
* v0 := cfs_rq->min_vruntime
* \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
* \Sum w_i := cfs_rq->avg_load
*
* Since min_vruntime is a monotonic increasing variable that closely tracks
* the per-task service, these deltas: (v_i - v), will be in the order of the
* maximal (virtual) lag induced in the system due to quantisation.
*
* Also, we use scale_load_down() to reduce the size.
*
* As measured, the max (key * weight) value was ~44 bits for a kernel build.
*/
static void
avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long weight = scale_load_down(se->load.weight);
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime += key * weight;
cfs_rq->avg_load += weight;
}
static void
avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long weight = scale_load_down(se->load.weight);
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime -= key * weight;
cfs_rq->avg_load -= weight;
}
static inline
void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
{
/*
* v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
*/
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
}
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
unsigned long weight = scale_load_down(curr->load.weight);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
}
if (load)
avg = div_s64(avg, load);
return cfs_rq->min_vruntime + avg;
}
/*
* lag_i = S - s_i = w_i * (V - v_i)
*
* However, since V is approximated by the weighted average of all entities it
* is possible -- by addition/removal/reweight to the tree -- to move V around
* and end up with a larger lag than we started with.
*
* Limit this to either double the slice length with a minimum of TICK_NSEC
* since that is the timing granularity.
*
* EEVDF gives the following limit for a steady state system:
*
* -r_max < lag < max(r_max, q)
*
* XXX could add max_slice to the augmented data to track this.
*/
static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
s64 lag, limit;
SCHED_WARN_ON(!se->on_rq);
lag = avg_vruntime(cfs_rq) - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
se->vlag = clamp(lag, -limit, limit);
}
/*
* Entity is eligible once it received less service than it ought to have,
* eg. lag >= 0.
*
* lag_i = S - s_i = w_i*(V - v_i)
*
* lag_i >= 0 -> V >= v_i
*
* \Sum (v_i - v)*w_i
* V = ------------------ + v
* \Sum w_i
*
* lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
*
* Note: using 'avg_vruntime() > se->vruntime' is inacurate due
* to the loss in precision caused by the division.
*/
int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
unsigned long weight = scale_load_down(curr->load.weight);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
}
return avg >= entity_key(cfs_rq, se) * load;
}
static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
{
u64 min_vruntime = cfs_rq->min_vruntime;
/*
* open coded max_vruntime() to allow updating avg_vruntime
*/
s64 delta = (s64)(vruntime - min_vruntime);
if (delta > 0) {
avg_vruntime_update(cfs_rq, delta);
min_vruntime = vruntime;
}
return min_vruntime;
}
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
u64 vruntime = cfs_rq->min_vruntime;
if (curr) {
if (curr->on_rq)
vruntime = curr->vruntime;
else
curr = NULL;
}
if (se) {
if (!curr)
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
}
/* ensure we never gain time by being placed backwards. */
u64_u32_store(cfs_rq->min_vruntime,
__update_min_vruntime(cfs_rq, vruntime));
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
{
return entity_before(__node_2_se(a), __node_2_se(b));
}
#define deadline_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
{
if (node) {
struct sched_entity *rse = __node_2_se(node);
if (deadline_gt(min_deadline, se, rse))
se->min_deadline = rse->min_deadline;
}
}
/*
* se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
*/
static inline bool min_deadline_update(struct sched_entity *se, bool exit)
{
u64 old_min_deadline = se->min_deadline;
struct rb_node *node = &se->run_node;
se->min_deadline = se->deadline;
__update_min_deadline(se, node->rb_right);
__update_min_deadline(se, node->rb_left);
return se->min_deadline == old_min_deadline;
}
RB_DECLARE_CALLBACKS(static, min_deadline_cb, struct sched_entity,
run_node, min_deadline, min_deadline_update);
/*
* Enqueue an entity into the rb-tree:
*/
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
se->min_deadline = se->deadline;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
__entity_less, &min_deadline_cb);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_deadline_cb);
avg_vruntime_sub(cfs_rq, se);
}
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
if (!left)
return NULL;
return __node_2_se(left);
}
/*
* Earliest Eligible Virtual Deadline First
*
* In order to provide latency guarantees for different request sizes
* EEVDF selects the best runnable task from two criteria:
*
* 1) the task must be eligible (must be owed service)
*
* 2) from those tasks that meet 1), we select the one
* with the earliest virtual deadline.
*
* We can do this in O(log n) time due to an augmented RB-tree. The
* tree keeps the entries sorted on service, but also functions as a
* heap based on the deadline by keeping:
*
* se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
*
* Which allows an EDF like search on (sub)trees.
*/
static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
/*
* Once selected, run a task until it either becomes non-eligible or
* until it gets a new slice. See the HACK in set_next_entity().
*/
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
return curr;
while (node) {
struct sched_entity *se = __node_2_se(node);
/*
* If this entity is not eligible, try the left subtree.
*/
if (!entity_eligible(cfs_rq, se)) {
node = node->rb_left;
continue;
}
/*
* If this entity has an earlier deadline than the previous
* best, take this one. If it also has the earliest deadline
* of its subtree, we're done.
*/
if (!best || deadline_gt(deadline, best, se)) {
best = se;
if (best->deadline == best->min_deadline)
break;
}
/*
* If the earlest deadline in this subtree is in the fully
* eligible left half of our space, go there.
*/
if (node->rb_left &&
__node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
node = node->rb_left;
continue;
}
node = node->rb_right;
}
if (!best || (curr && deadline_gt(deadline, best, curr)))
best = curr;
if (unlikely(!best)) {
struct sched_entity *left = __pick_first_entity(cfs_rq);
if (left) {
pr_err("EEVDF scheduling fail, picking leftmost\n");
return left;
}
}
return best;
}
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
if (!last)
return NULL;
return __node_2_se(last);
}
/**************************************************************
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
WRT_SYSCTL(sched_base_slice);
#undef WRT_SYSCTL
return 0;
}
#endif
#endif
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
/*
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if ((s64)(se->vruntime - se->deadline) < 0)
return;
/*
* For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by
* sysctl_sched_base_slice.
*/
se->slice = sysctl_sched_base_slice;
/*
* EEVDF: vd_i = ve_i + r_i / w_i
*/
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
/*
* The task has consumed its request, reschedule.
*/
if (cfs_rq->nr_running > 1) {
resched_curr(rq_of(cfs_rq));
clear_buddies(cfs_rq, se);
}
}
#include "pelt.h"
#ifdef CONFIG_SMP
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
static unsigned long capacity_of(int cpu);
/* Give new sched_entity start runnable values to heavy its load in infant time */
void init_entity_runnable_average(struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
memset(sa, 0, sizeof(*sa));
/*
* Tasks are initialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level.
* Group entities are initialized with zero load to reflect the fact that
* nothing has been attached to the task group yet.
*/
if (entity_is_task(se))
sa->load_avg = scale_load_down(se->load.weight);
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
/*
* With new tasks being created, their initial util_avgs are extrapolated
* based on the cfs_rq's current util_avg:
*
* util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
*
* However, in many cases, the above util_avg does not give a desired
* value. Moreover, the sum of the util_avgs may be divergent, such
* as when the series is a harmonic series.
*
* To solve this problem, we also cap the util_avg of successive tasks to
* only 1/2 of the left utilization budget:
*
* util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
*
* where n denotes the nth task and cpu_scale the CPU capacity.
*
* For example, for a CPU with 1024 of capacity, a simplest series from
* the beginning would be like:
*
* task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
* cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
*
* Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
* if util_avg > util_avg_cap.
*/
void post_init_entity_util_avg(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (p->sched_class != &fair_sched_class) {
/*
* For !fair tasks do:
*
update_cfs_rq_load_avg(now, cfs_rq);
attach_entity_load_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
* such that the next switched_to_fair() has the
* expected state.
*/
se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
return;
}
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
sa->util_avg /= (cfs_rq->avg.load_avg + 1);
if (sa->util_avg > cap)
sa->util_avg = cap;
} else {
sa->util_avg = cap;
}
}
sa->runnable_avg = sa->util_avg;
}
#else /* !CONFIG_SMP */
void init_entity_runnable_average(struct sched_entity *se)
{
}
void post_init_entity_util_avg(struct task_struct *p)
{
}
static void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
}
#endif /* CONFIG_SMP */
/*
* Update the current task's runtime statistics.
*/
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
if (unlikely(!curr))
return;
delta_exec = now - curr->exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
curr->exec_start = now;
if (schedstat_enabled()) {
struct sched_statistics *stats;
stats = __schedstats_from_se(curr);
__schedstat_set(stats->exec_max,
max(delta_exec, stats->exec_max));
}
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cgroup_account_cputime(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
static void update_curr_fair(struct rq *rq)
{
update_curr(cfs_rq_of(&rq->curr->se));
}
static inline void
update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_statistics *stats;
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
stats = __schedstats_from_se(se);
if (entity_is_task(se))
p = task_of(se);
__update_stats_wait_start(rq_of(cfs_rq), p, stats);
}
static inline void
update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_statistics *stats;
struct task_struct *p = NULL;
if (!schedstat_enabled())
return;
stats = __schedstats_from_se(se);
/*
* When the sched_schedstat changes from 0 to 1, some sched se
* maybe already in the runqueue, the se->statistics.wait_start
* will be 0.So it will let the delta wrong. We need to avoid this
* scenario.
*/
if (unlikely(!schedstat_val(stats->wait_start)))
return;
if (entity_is_task(se))
p = task_of(se);
__update_stats_wait_end(rq_of(cfs_rq), p, stats);
}
static inline void
update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_statistics *stats;
struct task_struct *tsk = NULL;
if (!schedstat_enabled())
return;
stats = __schedstats_from_se(se);
if (entity_is_task(se))
tsk = task_of(se);
__update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
}
/*
* Task is being enqueued - update stats:
*/
static inline void
update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
if (!schedstat_enabled())
return;
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
if (se != cfs_rq->curr)
update_stats_wait_start_fair(cfs_rq, se);
if (flags & ENQUEUE_WAKEUP)
update_stats_enqueue_sleeper_fair(cfs_rq, se);
}
static inline void
update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
if (!schedstat_enabled())
return;
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
if (se != cfs_rq->curr)
update_stats_wait_end_fair(cfs_rq, se);
if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
unsigned int state;
/* XXX racy against TTWU */
state = READ_ONCE(tsk->__state);
if (state & TASK_INTERRUPTIBLE)
__schedstat_set(tsk->stats.sleep_start,
rq_clock(rq_of(cfs_rq)));
if (state & TASK_UNINTERRUPTIBLE)
__schedstat_set(tsk->stats.block_start,
rq_clock(rq_of(cfs_rq)));
}
}
/*
* We are picking a new current task - update its stats:
*/
static inline void
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/*
* We are starting a new run period:
*/
se->exec_start = rq_clock_task(rq_of(cfs_rq));
}
/**************************************************
* Scheduling class queueing methods:
*/
static inline bool is_core_idle(int cpu)
{
#ifdef CONFIG_SCHED_SMT
int sibling;
for_each_cpu(sibling, cpu_smt_mask(cpu)) {
if (cpu == sibling)
continue;
if (!idle_cpu(sibling))
return false;
}
#endif
return true;
}
#ifdef CONFIG_NUMA
#define NUMA_IMBALANCE_MIN 2
static inline long
adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
{
/*
* Allow a NUMA imbalance if busy CPUs is less than the maximum
* threshold. Above this threshold, individual tasks may be contending
* for both memory bandwidth and any shared HT resources. This is an
* approximation as the number of running tasks may not be related to
* the number of busy CPUs due to sched_setaffinity.
*/
if (dst_running > imb_numa_nr)
return imbalance;
/*
* Allow a small imbalance based on a simple pair of communicating
* tasks that remain local when the destination is lightly loaded.
*/
if (imbalance <= NUMA_IMBALANCE_MIN)
return 0;
return imbalance;
}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA_BALANCING
/*
* Approximate time to scan a full NUMA task in ms. The task scan period is
* calculated based on the tasks virtual memory size and
* numa_balancing_scan_size.
*/
unsigned int sysctl_numa_balancing_scan_period_min = 1000;
unsigned int sysctl_numa_balancing_scan_period_max = 60000;
/* Portion of address space to scan in MB */
unsigned int sysctl_numa_balancing_scan_size = 256;
/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
unsigned int sysctl_numa_balancing_scan_delay = 1000;
/* The page with hint page fault latency < threshold in ms is considered hot */
unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC;
struct numa_group {
refcount_t refcount;
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
pid_t gid;
int active_nodes;
struct rcu_head rcu;
unsigned long total_faults;
unsigned long max_faults_cpu;
/*
* faults[] array is split into two regions: faults_mem and faults_cpu.
*
* Faults_cpu is used to decide whether memory should move
* towards the CPU. As a consequence, these stats are weighted
* more by CPU use than by memory faults.
*/
unsigned long faults[];
};
/*
* For functions that can be called in multiple contexts that permit reading
* ->numa_group (see struct task_struct for locking rules).
*/
static struct numa_group *deref_task_numa_group(struct task_struct *p)
{
return rcu_dereference_check(p->numa_group, p == current ||
(lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
}
static struct numa_group *deref_curr_numa_group(struct task_struct *p)
{
return rcu_dereference_protected(p->numa_group, p == current);
}
static inline unsigned long group_faults_priv(struct numa_group *ng);
static inline unsigned long group_faults_shared(struct numa_group *ng);
static unsigned int task_nr_scan_windows(struct task_struct *p)
{
unsigned long rss = 0;
unsigned long nr_scan_pages;
/*
* Calculations based on RSS as non-present and empty pages are skipped
* by the PTE scanner and NUMA hinting faults should be trapped based
* on resident pages
*/
nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
rss = get_mm_rss(p->mm);
if (!rss)
rss = nr_scan_pages;
rss = round_up(rss, nr_scan_pages);
return rss / nr_scan_pages;
}
/* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
#define MAX_SCAN_WINDOW 2560
static unsigned int task_scan_min(struct task_struct *p)
{
unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
unsigned int scan, floor;
unsigned int windows = 1;
if (scan_size < MAX_SCAN_WINDOW)
windows = MAX_SCAN_WINDOW / scan_size;
floor = 1000 / windows;
scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
return max_t(unsigned int, floor, scan);
}
static unsigned int task_scan_start(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long period = smin;
struct numa_group *ng;
/* Scale the maximum scan period with the amount of shared memory. */
rcu_read_lock();
ng = rcu_dereference(p->numa_group);
if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
}
rcu_read_unlock();
return max(smin, period);
}
static unsigned int task_scan_max(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long smax;
struct numa_group *ng;
/* Watch for min being lower than max due to floor calculations */
smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
/* Scale the maximum scan period with the amount of shared memory. */
ng = deref_curr_numa_group(p);
if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
smax = max(smax, period);
}
return max(smin, smax);
}
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
}
static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
}
/* Shared or private faults. */
#define NR_NUMA_HINT_FAULT_TYPES 2
/* Memory and CPU locality */
#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
/* Averaged statistics, and temporary buffers. */
#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
pid_t task_numa_group_id(struct task_struct *p)
{
struct numa_group *ng;
pid_t gid = 0;
rcu_read_lock();
ng = rcu_dereference(p->numa_group);
if (ng)
gid = ng->gid;
rcu_read_unlock();
return gid;
}
/*
* The averaged statistics, shared & private, memory & CPU,
* occupy the first half of the array. The second half of the
* array is for current counters, which are averaged into the
* first set by task_numa_placement.
*/
static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
{
return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
}
static inline unsigned long task_faults(struct task_struct *p, int nid)
{
if (!p->numa_faults)
return 0;
return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults(struct task_struct *p, int nid)
{
struct numa_group *ng = deref_task_numa_group(p);
if (!ng)
return 0;
return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
{
return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
}
static inline unsigned long group_faults_priv(struct numa_group *ng)
{
unsigned long faults = 0;
int node;
for_each_online_node(node) {
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
return faults;
}
static inline unsigned long group_faults_shared(struct numa_group *ng)
{
unsigned long faults = 0;
int node;
for_each_online_node(node) {
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
}
return faults;
}
/*
* A node triggering more than 1/3 as many NUMA faults as the maximum is
* considered part of a numa group's pseudo-interleaving set. Migrations
* between these nodes are slowed down, to allow things to settle down.
*/
#define ACTIVE_NODE_FRACTION 3
static bool numa_is_active_node(int nid, struct numa_group *ng)
{
return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
}
/* Handle placement on systems where not all nodes are directly connected. */
static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
int lim_dist, bool task)
{
unsigned long score = 0;
int node, max_dist;
/*
* All nodes are directly connected, and the same distance
* from each other. No need for fancy placement algorithms.
*/
if (sched_numa_topology_type == NUMA_DIRECT)
return 0;
/* sched_max_numa_distance may be changed in parallel. */
max_dist = READ_ONCE(sched_max_numa_distance);
/*
* This code is called for each node, introducing N^2 complexity,
* which should be ok given the number of nodes rarely exceeds 8.
*/
for_each_online_node(node) {
unsigned long faults;
int dist = node_distance(nid, node);
/*
* The furthest away nodes in the system are not interesting
* for placement; nid was already counted.
*/
if (dist >= max_dist || node == nid)
continue;
/*
* On systems with a backplane NUMA topology, compare groups
* of nodes, and move tasks towards the group with the most
* memory accesses. When comparing two nodes at distance
* "hoplimit", only nodes closer by than "hoplimit" are part
* of each group. Skip other nodes.
*/
if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist)
continue;
/* Add up the faults from nearby nodes. */
if (task)
faults = task_faults(p, node);
else
faults = group_faults(p, node);
/*
* On systems with a glueless mesh NUMA topology, there are
* no fixed "groups of nodes". Instead, nodes that are not
* directly connected bounce traffic through intermediate
* nodes; a numa_group can occupy any set of nodes.
* The further away a node is, the less the faults count.
* This seems to result in good task placement.
*/
if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
faults *= (max_dist - dist);
faults /= (max_dist - LOCAL_DISTANCE);
}
score += faults;
}
return score;
}
/*
* These return the fraction of accesses done by a particular task, or
* task group, on a particular numa node. The group weight is given a
* larger multiplier, in order to group tasks together that are almost
* evenly spread out between numa nodes.
*/
static inline unsigned long task_weight(struct task_struct *p, int nid,
int dist)
{
unsigned long faults, total_faults;
if (!p->numa_faults)
return 0;
total_faults = p->total_numa_faults;
if (!total_faults)
return 0;
faults = task_faults(p, nid);
faults += score_nearby_nodes(p, nid, dist, true);
return 1000 * faults / total_faults;
}
static inline unsigned long group_weight(struct task_struct *p, int nid,
int dist)
{
struct numa_group *ng = deref_task_numa_group(p);
unsigned long faults, total_faults;
if (!ng)
return 0;
total_faults = ng->total_faults;
if (!total_faults)
return 0;
faults = group_faults(p, nid);
faults += score_nearby_nodes(p, nid, dist, false);
return 1000 * faults / total_faults;
}
/*
* If memory tiering mode is enabled, cpupid of slow memory page is
* used to record scan time instead of CPU and PID. When tiering mode
* is disabled at run time, the scan time (in cpupid) will be
* interpreted as CPU and PID. So CPU needs to be checked to avoid to
* access out of array bound.
*/
static inline bool cpupid_valid(int cpupid)
{
return cpupid_to_cpu(cpupid) < nr_cpu_ids;
}
/*
* For memory tiering mode, if there are enough free pages (more than
* enough watermark defined here) in fast memory node, to take full
* advantage of fast memory capacity, all recently accessed slow
* memory pages will be migrated to fast memory node without
* considering hot threshold.
*/
static bool pgdat_free_space_enough(struct pglist_data *pgdat)
{
int z;
unsigned long enough_wmark;
enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT,
pgdat->node_present_pages >> 4);
for (z = pgdat->nr_zones - 1; z >= 0; z--) {
struct zone *zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
if (zone_watermark_ok(zone, 0,
wmark_pages(zone, WMARK_PROMO) + enough_wmark,
ZONE_MOVABLE, 0))
return true;
}
return false;
}
/*
* For memory tiering mode, when page tables are scanned, the scan
* time will be recorded in struct page in addition to make page
* PROT_NONE for slow memory page. So when the page is accessed, in
* hint page fault handler, the hint page fault latency is calculated
* via,
*
* hint page fault latency = hint page fault time - scan time
*
* The smaller the hint page fault latency, the higher the possibility
* for the page to be hot.
*/
static int numa_hint_fault_latency(struct page *page)
{
int last_time, time;
time = jiffies_to_msecs(jiffies);
last_time = xchg_page_access_time(page, time);
return (time - last_time) & PAGE_ACCESS_TIME_MASK;
}
/*
* For memory tiering mode, too high promotion/demotion throughput may
* hurt application latency. So we provide a mechanism to rate limit
* the number of pages that are tried to be promoted.
*/
static bool numa_promotion_rate_limit(struct pglist_data *pgdat,
unsigned long rate_limit, int nr)
{
unsigned long nr_cand;
unsigned int now, start;
now = jiffies_to_msecs(jiffies);
mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
start = pgdat->nbp_rl_start;
if (now - start > MSEC_PER_SEC &&
cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
pgdat->nbp_rl_nr_cand = nr_cand;
if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
return true;
return false;
}
#define NUMA_MIGRATION_ADJUST_STEPS 16
static void numa_promotion_adjust_threshold(struct pglist_data *pgdat,
unsigned long rate_limit,
unsigned int ref_th)
{
unsigned int now, start, th_period, unit_th, th;
unsigned long nr_cand, ref_cand, diff_cand;
now = jiffies_to_msecs(jiffies);
th_period = sysctl_numa_balancing_scan_period_max;
start = pgdat->nbp_th_start;
if (now - start > th_period &&
cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
ref_cand = rate_limit *
sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC;
nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS;
th = pgdat->nbp_threshold ? : ref_th;
if (diff_cand > ref_cand * 11 / 10)
th = max(th - unit_th, unit_th);
else if (diff_cand < ref_cand * 9 / 10)
th = min(th + unit_th, ref_th * 2);
pgdat->nbp_th_nr_cand = nr_cand;
pgdat->nbp_threshold = th;
}
}
bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int src_nid, int dst_cpu)
{
struct numa_group *ng = deref_curr_numa_group(p);
int dst_nid = cpu_to_node(dst_cpu);
int last_cpupid, this_cpupid;
/*
* The pages in slow memory node should be migrated according
* to hot/cold instead of private/shared.
*/
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!node_is_toptier(src_nid)) {
struct pglist_data *pgdat;
unsigned long rate_limit;
unsigned int latency, th, def_th;
pgdat = NODE_DATA(dst_nid);
if (pgdat_free_space_enough(pgdat)) {
/* workload changed, reset hot threshold */
pgdat->nbp_threshold = 0;
return true;
}
def_th = sysctl_numa_balancing_hot_threshold;
rate_limit = sysctl_numa_balancing_promote_rate_limit << \
(20 - PAGE_SHIFT);
numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
th = pgdat->nbp_threshold ? : def_th;
latency = numa_hint_fault_latency(page);
if (latency >= th)
return false;
return !numa_promotion_rate_limit(pgdat, rate_limit,
thp_nr_pages(page));
}
this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
!node_is_toptier(src_nid) && !cpupid_valid(last_cpupid))
return false;
/*
* Allow first faults or private faults to migrate immediately early in
* the lifetime of a task. The magic number 4 is based on waiting for
* two full passes of the "multi-stage node selection" test that is
* executed below.
*/
if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
(cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
return true;
/*
* Multi-stage node selection is used in conjunction with a periodic
* migration fault to build a temporal task<->page relation. By using
* a two-stage filter we remove short/unlikely relations.
*
* Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
* a task's usage of a particular page (n_p) per total usage of this
* page (n_t) (in a given time-span) to a probability.
*
* Our periodic faults will sample this probability and getting the
* same result twice in a row, given these samples are fully
* independent, is then given by P(n)^2, provided our sample period
* is sufficiently short compared to the usage pattern.
*
* This quadric squishes small probabilities, making it less likely we
* act on an unlikely task<->page relation.
*/
if (!cpupid_pid_unset(last_cpupid) &&
cpupid_to_nid(last_cpupid) != dst_nid)
return false;
/* Always allow migrate on private faults */
if (cpupid_match_pid(p, last_cpupid))
return true;
/* A shared fault, but p->numa_group has not been set up yet. */
if (!ng)
return true;
/*
* Destination node is much more heavily used than the source
* node? Allow migration.
*/
if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
ACTIVE_NODE_FRACTION)
return true;
/*
* Distribute memory according to CPU & memory use on each node,
* with 3/4 hysteresis to avoid unnecessary memory migrations:
*
* faults_cpu(dst) 3 faults_cpu(src)
* --------------- * - > ---------------
* faults_mem(dst) 4 faults_mem(src)
*/
return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
/*
* 'numa_type' describes the node at the moment of load balancing.
*/
enum numa_type {
/* The node has spare capacity that can be used to run more tasks. */
node_has_spare = 0,
/*
* The node is fully used and the tasks don't compete for more CPU
* cycles. Nevertheless, some tasks might wait before running.
*/
node_fully_busy,
/*
* The node is overloaded and can't provide expected CPU cycles to all
* tasks.
*/
node_overloaded
};
/* Cached statistics for all CPUs within a node */
struct numa_stats {
unsigned long load;
unsigned long runnable;
unsigned long util;
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
unsigned int nr_running;
unsigned int weight;
enum numa_type node_type;
int idle_cpu;
};
struct task_numa_env {
struct task_struct *p;
int src_cpu, src_nid;
int dst_cpu, dst_nid;
int imb_numa_nr;
struct numa_stats src_stats, dst_stats;
int imbalance_pct;
int dist;
struct task_struct *best_task;
long best_imp;
int best_cpu;
};
static unsigned long cpu_load(struct rq *rq);
static unsigned long cpu_runnable(struct rq *rq);
static inline enum
numa_type numa_classify(unsigned int imbalance_pct,
struct numa_stats *ns)
{
if ((ns->nr_running > ns->weight) &&
(((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
return node_overloaded;
if ((ns->nr_running < ns->weight) ||
(((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
return node_has_spare;
return node_fully_busy;
}
#ifdef CONFIG_SCHED_SMT
/* Forward declarations of select_idle_sibling helpers */
static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
{
if (!static_branch_likely(&sched_smt_present) ||
idle_core >= 0 || !test_idle_cores(cpu))
return idle_core;
/*
* Prefer cores instead of packing HT siblings
* and triggering future load balancing.
*/
if (is_core_idle(cpu))
idle_core = cpu;
return idle_core;
}
#else
static inline int numa_idle_core(int idle_core, int cpu)
{
return idle_core;
}
#endif
/*
* Gather all necessary information to make NUMA balancing placement
* decisions that are compatible with standard load balancer. This
* borrows code and logic from update_sg_lb_stats but sharing a
* common implementation is impractical.
*/
static void update_numa_stats(struct task_numa_env *env,
struct numa_stats *ns, int nid,
bool find_idle)
{
int cpu, idle_core = -1;
memset(ns, 0, sizeof(*ns));
ns->idle_cpu = -1;
rcu_read_lock();
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
ns->load += cpu_load(rq);
ns->runnable += cpu_runnable(rq);
ns->util += cpu_util_cfs(cpu);
ns->nr_running += rq->cfs.h_nr_running;
ns->compute_capacity += capacity_of(cpu);
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
if (READ_ONCE(rq->numa_migrate_on) ||
!cpumask_test_cpu(cpu, env->p->cpus_ptr))
continue;
if (ns->idle_cpu == -1)
ns->idle_cpu = cpu;
idle_core = numa_idle_core(idle_core, cpu);
}
}
rcu_read_unlock();
ns->weight = cpumask_weight(cpumask_of_node(nid));
ns->node_type = numa_classify(env->imbalance_pct, ns);
if (idle_core >= 0)
ns->idle_cpu = idle_core;
}
static void task_numa_assign(struct task_numa_env *env,
struct task_struct *p, long imp)
{
struct rq *rq = cpu_rq(env->dst_cpu);
/* Check if run-queue part of active NUMA balance. */
if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
int cpu;
int start = env->dst_cpu;
/* Find alternative idle CPU. */
for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
if (cpu == env->best_cpu || !idle_cpu(cpu) ||
!cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
continue;
}
env->dst_cpu = cpu;
rq = cpu_rq(env->dst_cpu);
if (!xchg(&rq->numa_migrate_on, 1))
goto assign;
}
/* Failed to find an alternative idle CPU */
return;
}
assign:
/*
* Clear previous best_cpu/rq numa-migrate flag, since task now
* found a better CPU to move/swap.
*/
if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
rq = cpu_rq(env->best_cpu);
WRITE_ONCE(rq->numa_migrate_on, 0);
}
if (env->best_task)
put_task_struct(env->best_task);
if (p)
get_task_struct(p);
env->best_task = p;
env->best_imp = imp;
env->best_cpu = env->dst_cpu;
}
static bool load_too_imbalanced(long src_load, long dst_load,
struct task_numa_env *env)
{
long imb, old_imb;
long orig_src_load, orig_dst_load;
long src_capacity, dst_capacity;
/*
* The load is corrected for the CPU capacity available on each node.
*
* src_load dst_load
* ------------ vs ---------
* src_capacity dst_capacity
*/
src_capacity = env->src_stats.compute_capacity;
dst_capacity = env->dst_stats.compute_capacity;
imb = abs(dst_load * src_capacity - src_load * dst_capacity);
orig_src_load = env->src_stats.load;
orig_dst_load = env->dst_stats.load;
old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
/* Would this change make things worse? */
return (imb > old_imb);
}
/*
* Maximum NUMA importance can be 1998 (2*999);
* SMALLIMP @ 30 would be close to 1998/64.
* Used to deter task migration.
*/
#define SMALLIMP 30
/*
* This checks if the overall compute and NUMA accesses of the system would
* be improved if the source tasks was migrated to the target dst_cpu taking
* into account that it might be best if task running on the dst_cpu should
* be exchanged with the source task
*/
static bool task_numa_compare(struct task_numa_env *env,
long taskimp, long groupimp, bool maymove)
{
struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
long imp = p_ng ? groupimp : taskimp;
struct task_struct *cur;
long src_load, dst_load;
int dist = env->dist;
long moveimp = imp;
long load;
bool stopsearch = false;
if (READ_ONCE(dst_rq->numa_migrate_on))
return false;
rcu_read_lock();
cur = rcu_dereference(dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
cur = NULL;
/*
* Because we have preemption enabled we can get migrated around and
* end try selecting ourselves (current == env->p) as a swap candidate.
*/
if (cur == env->p) {
stopsearch = true;
goto unlock;
}
if (!cur) {
if (maymove && moveimp >= env->best_imp)
goto assign;
else
goto unlock;
}
/* Skip this swap candidate if cannot move to the source cpu. */
if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
goto unlock;
/*
* Skip this swap candidate if it is not moving to its preferred
* node and the best task is.
*/
if (env->best_task &&
env->best_task->numa_preferred_nid == env->src_nid &&
cur->numa_preferred_nid != env->src_nid) {
goto unlock;
}
/*
* "imp" is the fault differential for the source task between the
* source and destination node. Calculate the total differential for
* the source task and potential destination task. The more negative
* the value is, the more remote accesses that would be expected to
* be incurred if the tasks were swapped.
*
* If dst and source tasks are in the same NUMA group, or not
* in any group then look only at task weights.
*/
cur_ng = rcu_dereference(cur->numa_group);
if (cur_ng == p_ng) {
/*
* Do not swap within a group or between tasks that have
* no group if there is spare capacity. Swapping does
* not address the load imbalance and helps one task at
* the cost of punishing another.
*/
if (env->dst_stats.node_type == node_has_spare)
goto unlock;
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*
* Add some hysteresis to prevent swapping the
* tasks within a group over tiny differences.
*/
if (cur_ng)
imp -= imp / 16;
} else {
/*
* Compare the group weights. If a task is all by itself
* (not part of a group), use the task weight instead.
*/
if (cur_ng && p_ng)
imp += group_weight(cur, env->src_nid, dist) -
group_weight(cur, env->dst_nid, dist);
else
imp += task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
}
/* Discourage picking a task already on its preferred node */
if (cur->numa_preferred_nid == env->dst_nid)
imp -= imp / 16;
/*
* Encourage picking a task that moves to its preferred node.
* This potentially makes imp larger than it's maximum of
* 1998 (see SMALLIMP and task_weight for why) but in this
* case, it does not matter.
*/
if (cur->numa_preferred_nid == env->src_nid)
imp += imp / 8;
if (maymove && moveimp > imp && moveimp > env->best_imp) {
imp = moveimp;
cur = NULL;
goto assign;
}
/*
* Prefer swapping with a task moving to its preferred node over a
* task that is not.
*/
if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
env->best_task->numa_preferred_nid != env->src_nid) {
goto assign;
}
/*
* If the NUMA importance is less than SMALLIMP,
* task migration might only result in ping pong
* of tasks and also hurt performance due to cache
* misses.
*/
if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
goto unlock;
/*
* In the overloaded case, try and keep the load balanced.
*/
load = task_h_load(env->p) - task_h_load(cur);
if (!load)
goto assign;
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
assign:
/* Evaluate an idle CPU for a task numa move. */
if (!cur) {
int cpu = env->dst_stats.idle_cpu;
/* Nothing cached so current CPU went idle since the search. */
if (cpu < 0)
cpu = env->dst_cpu;
/*
* If the CPU is no longer truly idle and the previous best CPU
* is, keep using it.
*/
if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
idle_cpu(env->best_cpu)) {
cpu = env->best_cpu;
}
env->dst_cpu = cpu;
}
task_numa_assign(env, cur, imp);
/*
* If a move to idle is allowed because there is capacity or load
* balance improves then stop the search. While a better swap
* candidate may exist, a search is not free.
*/
if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
stopsearch = true;
/*
* If a swap candidate must be identified and the current best task
* moves its preferred node then stop the search.
*/
if (!maymove && env->best_task &&
env->best_task->numa_preferred_nid == env->src_nid) {
stopsearch = true;
}
unlock:
rcu_read_unlock();
return stopsearch;
}
static void task_numa_find_cpu(struct task_numa_env *env,
long taskimp, long groupimp)
{
bool maymove = false;
int cpu;
/*
* If dst node has spare capacity, then check if there is an
* imbalance that would be overruled by the load balancer.
*/
if (env->dst_stats.node_type == node_has_spare) {
unsigned int imbalance;
int src_running, dst_running;
/*
* Would movement cause an imbalance? Note that if src has
* more running tasks that the imbalance is ignored as the
* move improves the imbalance from the perspective of the
* CPU load balancer.
* */
src_running = env->src_stats.nr_running - 1;
dst_running = env->dst_stats.nr_running + 1;
imbalance = max(0, dst_running - src_running);
imbalance = adjust_numa_imbalance(imbalance, dst_running,
env->imb_numa_nr);
/* Use idle CPU if there is no imbalance */
if (!imbalance) {
maymove = true;
if (env->dst_stats.idle_cpu >= 0) {
env->dst_cpu = env->dst_stats.idle_cpu;
task_numa_assign(env, NULL, 0);
return;
}
}
} else {
long src_load, dst_load, load;
/*
* If the improvement from just moving env->p direction is better
* than swapping tasks around, check if a move is possible.
*/
load = task_h_load(env->p);
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
maymove = !load_too_imbalanced(src_load, dst_load, env);
}
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
continue;
env->dst_cpu = cpu;
if (task_numa_compare(env, taskimp, groupimp, maymove))
break;
}
}
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
.p = p,
.src_cpu = task_cpu(p),
.src_nid = task_node(p),
.imbalance_pct = 112,
.best_task = NULL,
.best_imp = 0,
.best_cpu = -1,
};
unsigned long taskweight, groupweight;
struct sched_domain *sd;
long taskimp, groupimp;
struct numa_group *ng;
struct rq *best_rq;
int nid, ret, dist;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
* imbalance and would be the first to start moving tasks about.
*
* And we want to avoid any moving of tasks about, as that would create
* random movement of tasks -- counter the numa conditions we're trying
* to satisfy here.
*/
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
if (sd) {
env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
env.imb_numa_nr = sd->imb_numa_nr;
}
rcu_read_unlock();
/*
* Cpusets can break the scheduler domain tree into smaller
* balance domains, some of which do not cross NUMA boundaries.
* Tasks that are "trapped" in such domains cannot be migrated
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
sched_setnuma(p, task_node(p));
return -EINVAL;
}
env.dst_nid = p->numa_preferred_nid;
dist = env.dist = node_distance(env.src_nid, env.dst_nid);
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
update_numa_stats(&env, &env.src_stats, env.src_nid, false);
taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
/* Try to find a spot on the preferred nid. */
task_numa_find_cpu(&env, taskimp, groupimp);
/*
* Look at other nodes in these cases:
* - there is no space available on the preferred_nid
* - the task is part of a numa_group that is interleaved across
* multiple NUMA nodes; in order to better consolidate the group,
* we need to check other locations.
*/
ng = deref_curr_numa_group(p);
if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
for_each_node_state(nid, N_CPU) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
dist = node_distance(env.src_nid, env.dst_nid);
if (sched_numa_topology_type == NUMA_BACKPLANE &&
dist != env.dist) {
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
}
/* Only consider nodes where both task and groups benefit */
taskimp = task_weight(p, nid, dist) - taskweight;
groupimp = group_weight(p, nid, dist) - groupweight;
if (taskimp < 0 && groupimp < 0)
continue;
env.dist = dist;
env.dst_nid = nid;
update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
task_numa_find_cpu(&env, taskimp, groupimp);
}
}
/*
* If the task is part of a workload that spans multiple NUMA nodes,
* and is migrating into one of the workload's active nodes, remember
* this node as the task's preferred numa node, so the workload can
* settle down.
* A task that migrated to a second choice node will be better off
* trying for a better one later. Do not set the preferred node here.
*/
if (ng) {
if (env.best_cpu == -1)
nid = env.src_nid;
else
nid = cpu_to_node(env.best_cpu);
if (nid != p->numa_preferred_nid)
sched_setnuma(p, nid);
}
/* No better CPU than the current one was found. */
if (env.best_cpu == -1) {
trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
return -EAGAIN;
}
best_rq = cpu_rq(env.best_cpu);
if (env.best_task == NULL) {
ret = migrate_task_to(p, env.best_cpu);
WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
return ret;
}
ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
put_task_struct(env.best_task);
return ret;
}
/* Attempt to migrate a task to a CPU on the preferred node. */
static void numa_migrate_preferred(struct task_struct *p)
{
unsigned long interval = HZ;
/* This task has no NUMA fault statistics yet */
if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
return;
/* Periodically retry migrating the task to the preferred node */
interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
p->numa_migrate_retry = jiffies + interval;
/* Success if task is already running on preferred CPU */
if (task_node(p) == p->numa_preferred_nid)
return;
/* Otherwise, try migrate to a CPU on the preferred node */
task_numa_migrate(p);
}
/*
* Find out how many nodes the workload is actively running on. Do this by
* tracking the nodes from which NUMA hinting faults are triggered. This can
* be different from the set of nodes where the workload's memory is currently
* located.
*/
static void numa_group_count_active_nodes(struct numa_group *numa_group)
{
unsigned long faults, max_faults = 0;
int nid, active_nodes = 0;
for_each_node_state(nid, N_CPU) {
faults = group_faults_cpu(numa_group, nid);
if (faults > max_faults)
max_faults = faults;
}
for_each_node_state(nid, N_CPU) {
faults = group_faults_cpu(numa_group, nid);
if (faults * ACTIVE_NODE_FRACTION > max_faults)
active_nodes++;
}
numa_group->max_faults_cpu = max_faults;
numa_group->active_nodes = active_nodes;
}
/*
* When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
* increments. The more local the fault statistics are, the higher the scan
* period will be for the next scan window. If local/(local+remote) ratio is
* below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
* the scan period will decrease. Aim for 70% local accesses.
*/
#define NUMA_PERIOD_SLOTS 10
#define NUMA_PERIOD_THRESHOLD 7
/*
* Increase the scan period (slow down scanning) if the majority of
* our memory is already on our local node, or if the majority of
* the page accesses are shared with other processes.
* Otherwise, decrease the scan period.
*/
static void update_task_scan_period(struct task_struct *p,
unsigned long shared, unsigned long private)
{
unsigned int period_slot;
int lr_ratio, ps_ratio;
int diff;
unsigned long remote = p->numa_faults_locality[0];
unsigned long local = p->numa_faults_locality[1];
/*
* If there were no record hinting faults then either the task is
* completely idle or all activity is in areas that are not of interest
* to automatic numa balancing. Related to that, if there were failed
* migration then it implies we are migrating too quickly or the local
* node is overloaded. In either case, scan slower
*/
if (local + shared == 0 || p->numa_faults_locality[2]) {
p->numa_scan_period = min(p->numa_scan_period_max,
p->numa_scan_period << 1);
p->mm->numa_next_scan = jiffies +
msecs_to_jiffies(p->numa_scan_period);
return;
}
/*
* Prepare to scale scan period relative to the current period.
* == NUMA_PERIOD_THRESHOLD scan period stays the same
* < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
* >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
*/
period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
/*
* Most memory accesses are local. There is no need to
* do fast NUMA scanning, since memory is already local.
*/
int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
if (!slot)
slot = 1;
diff = slot * period_slot;
} else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
/*
* Most memory accesses are shared with other tasks.
* There is no point in continuing fast NUMA scanning,
* since other tasks may just move the memory elsewhere.
*/
int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
if (!slot)
slot = 1;
diff = slot * period_slot;
} else {
/*
* Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
* yet they are not on the local NUMA node. Speed up
* NUMA scanning to get the memory moved over.
*/
int ratio = max(lr_ratio, ps_ratio);
diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
}
p->numa_scan_period = clamp(p->numa_scan_period + diff,
task_scan_min(p), task_scan_max(p));
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
/*
* Get the fraction of time the task has been running since the last
* NUMA placement cycle. The scheduler keeps similar statistics, but
* decays those on a 32ms period, which is orders of magnitude off
* from the dozens-of-seconds NUMA balancing period. Use the scheduler
* stats only if the task is so new there are no NUMA statistics yet.
*/
static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
{
u64 runtime, delta, now;
/* Use the start of this time slice to avoid calculations. */
now = p->se.exec_start;
runtime = p->se.sum_exec_runtime;
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
/* Avoid time going backwards, prevent potential divide error: */
if (unlikely((s64)*period < 0))
*period = 0;
} else {
delta = p->se.avg.load_sum;
*period = LOAD_AVG_MAX;
}
p->last_sum_exec_runtime = runtime;
p->last_task_numa_placement = now;
return delta;
}
/*
* Determine the preferred nid for a task in a numa_group. This needs to
* be done in a way that produces consistent results with group_weight,
* otherwise workloads might not converge.
*/
static int preferred_group_nid(struct task_struct *p, int nid)
{
nodemask_t nodes;
int dist;
/* Direct connections between all NUMA nodes. */
if (sched_numa_topology_type == NUMA_DIRECT)
return nid;
/*
* On a system with glueless mesh NUMA topology, group_weight
* scores nodes according to the number of NUMA hinting faults on
* both the node itself, and on nearby nodes.
*/
if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
unsigned long score, max_score = 0;
int node, max_node = nid;
dist = sched_max_numa_distance;
for_each_node_state(node, N_CPU) {
score = group_weight(p, node, dist);
if (score > max_score) {
max_score = score;
max_node = node;
}
}
return max_node;
}
/*
* Finding the preferred nid in a system with NUMA backplane
* interconnect topology is more involved. The goal is to locate
* tasks from numa_groups near each other in the system, and
* untangle workloads from different sides of the system. This requires
* searching down the hierarchy of node groups, recursively searching
* inside the highest scoring group of nodes. The nodemask tricks
* keep the complexity of the search down.
*/
nodes = node_states[N_CPU];
for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
unsigned long max_faults = 0;
nodemask_t max_group = NODE_MASK_NONE;
int a, b;
/* Are there nodes at this distance from each other? */
if (!find_numa_distance(dist))
continue;
for_each_node_mask(a, nodes) {
unsigned long faults = 0;
nodemask_t this_group;
nodes_clear(this_group);
/* Sum group's NUMA faults; includes a==b case. */
for_each_node_mask(b, nodes) {
if (node_distance(a, b) < dist) {
faults += group_faults(p, b);
node_set(b, this_group);
node_clear(b, nodes);
}
}
/* Remember the top group. */
if (faults > max_faults) {
max_faults = faults;
max_group = this_group;
/*
* subtle: at the smallest distance there is
* just one node left in each "group", the
* winner is the preferred nid.
*/
nid = a;
}
}
/* Next round, evaluate the nodes within max_group. */
if (!max_faults)
break;
nodes = max_group;
}
return nid;
}
static void task_numa_placement(struct task_struct *p)
{
int seq, nid, max_nid = NUMA_NO_NODE;
unsigned long max_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
u64 runtime, period;
spinlock_t *group_lock = NULL;
struct numa_group *ng;
/*
* The p->mm->numa_scan_seq field gets updated without
* exclusive access. Use READ_ONCE() here to ensure
* that the field is read in a single access:
*/
seq = READ_ONCE(p->mm->numa_scan_seq);
if (p->numa_scan_seq == seq)
return;
p->numa_scan_seq = seq;
p->numa_scan_period_max = task_scan_max(p);
total_faults = p->numa_faults_locality[0] +
p->numa_faults_locality[1];
runtime = numa_get_avg_runtime(p, &period);
/* If the task is part of a group prevent parallel updates to group stats */
ng = deref_curr_numa_group(p);
if (ng) {
group_lock = &ng->lock;
spin_lock_irq(group_lock);
}
/* Find the node with the highest number of faults */
for_each_online_node(nid) {
/* Keep track of the offsets in numa_faults array */
int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
unsigned long faults = 0, group_faults = 0;
int priv;
for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
long diff, f_diff, f_weight;
mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
/* Decay existing window, copy faults since last scan */
diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
fault_types[priv] += p->numa_faults[membuf_idx];
p->numa_faults[membuf_idx] = 0;
/*
* Normalize the faults_from, so all tasks in a group
* count according to CPU use, instead of by the raw
* number of faults. Tasks with little runtime have
* little over-all impact on throughput, and thus their
* faults are less important.
*/
f_weight = div64_u64(runtime << 16, period + 1);
f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
(total_faults + 1);
f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
p->numa_faults[cpubuf_idx] = 0;
p->numa_faults[mem_idx] += diff;
p->numa_faults[cpu_idx] += f_diff;
faults += p->numa_faults[mem_idx];
p->total_numa_faults += diff;
if (ng) {
/*
* safe because we can only change our own group
*
* mem_idx represents the offset for a given
* nid and priv in a specific region because it
* is at the beginning of the numa_faults array.
*/
ng->faults[mem_idx] += diff;
ng->faults[cpu_idx] += f_diff;
ng->total_faults += diff;
group_faults += ng->faults[mem_idx];
}
}
if (!ng) {
if (faults > max_faults) {
max_faults = faults;
max_nid = nid;
}
} else if (group_faults > max_faults) {
max_faults = group_faults;
max_nid = nid;
}
}
/* Cannot migrate task to CPU-less node */
if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) {
int near_nid = max_nid;
int distance, near_distance = INT_MAX;
for_each_node_state(nid, N_CPU) {
distance = node_distance(max_nid, nid);
if (distance < near_distance) {
near_nid = nid;
near_distance = distance;
}
}
max_nid = near_nid;
}
if (ng) {
numa_group_count_active_nodes(ng);
spin_unlock_irq(group_lock);
max_nid = preferred_group_nid(p, max_nid);
}
if (max_faults) {
/* Set the new preferred node */
if (max_nid != p->numa_preferred_nid)
sched_setnuma(p, max_nid);
}
update_task_scan_period(p, fault_types[0], fault_types[1]);
}
static inline int get_numa_group(struct numa_group *grp)
{
return refcount_inc_not_zero(&grp->refcount);
}
static inline void put_numa_group(struct numa_group *grp)
{
if (refcount_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
}
static void task_numa_group(struct task_struct *p, int cpupid, int flags,
int *priv)
{
struct numa_group *grp, *my_grp;
struct task_struct *tsk;
bool join = false;
int cpu = cpupid_to_cpu(cpupid);
int i;
if (unlikely(!deref_curr_numa_group(p))) {
unsigned int size = sizeof(struct numa_group) +
NR_NUMA_HINT_FAULT_STATS *
nr_node_ids * sizeof(unsigned long);
grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!grp)
return;
refcount_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
grp->gid = p->pid;
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] = p->numa_faults[i];
grp->total_faults = p->total_numa_faults;
grp->nr_tasks++;
rcu_assign_pointer(p->numa_group, grp);
}
rcu_read_lock();
tsk = READ_ONCE(cpu_rq(cpu)->curr);
if (!cpupid_match_pid(tsk, cpupid))
goto no_join;
grp = rcu_dereference(tsk->numa_group);
if (!grp)
goto no_join;
my_grp = deref_curr_numa_group(p);
if (grp == my_grp)
goto no_join;
/*
* Only join the other group if its bigger; if we're the bigger group,
* the other task will join us.
*/
if (my_grp->nr_tasks > grp->nr_tasks)
goto no_join;
/*
* Tie-break on the grp address.
*/
if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
goto no_join;
/* Always join threads in the same process. */
if (tsk->mm == current->mm)
join = true;
/* Simple filter to avoid false positives due to PID collisions */
if (flags & TNF_SHARED)
join = true;
/* Update priv based on whether false sharing was detected */
*priv = !join;
if (join && !get_numa_group(grp))
goto no_join;
rcu_read_unlock();
if (!join)
return;
WARN_ON_ONCE(irqs_disabled());
double_lock_irq(&my_grp->lock, &grp->lock);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
my_grp->faults[i] -= p->numa_faults[i];
grp->faults[i] += p->numa_faults[i];
}
my_grp->total_faults -= p->total_numa_faults;
grp->total_faults += p->total_numa_faults;
my_grp->nr_tasks--;
grp->nr_tasks++;
spin_unlock(&my_grp->lock);
spin_unlock_irq(&grp->lock);
rcu_assign_pointer(p->numa_group, grp);
put_numa_group(my_grp);
return;
no_join:
rcu_read_unlock();
return;
}
/*
* Get rid of NUMA statistics associated with a task (either current or dead).
* If @final is set, the task is dead and has reached refcount zero, so we can
* safely free all relevant data structures. Otherwise, there might be
* concurrent reads from places like load balancing and procfs, and we should
* reset the data back to default state without freeing ->numa_faults.
*/
void task_numa_free(struct task_struct *p, bool final)
{
/* safe: p either is current or is being freed by current */
struct numa_group *grp = rcu_dereference_raw(p->numa_group);
unsigned long *numa_faults = p->numa_faults;
unsigned long flags;
int i;
if (!numa_faults)
return;
if (grp) {
spin_lock_irqsave(&grp->lock, flags);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] -= p->numa_faults[i];
grp->total_faults -= p->total_numa_faults;
grp->nr_tasks--;
spin_unlock_irqrestore(&grp->lock, flags);
RCU_INIT_POINTER(p->numa_group, NULL);
put_numa_group(grp);
}
if (final) {
p->numa_faults = NULL;
kfree(numa_faults);
} else {
p->total_numa_faults = 0;
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
numa_faults[i] = 0;
}
}
/*
* Got a PROT_NONE fault for a page on @node.
*/
void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
{
struct task_struct *p = current;
bool migrated = flags & TNF_MIGRATED;
int cpu_node = task_node(current);
int local = !!(flags & TNF_FAULT_LOCAL);
struct numa_group *ng;
int priv;
if (!static_branch_likely(&sched_numa_balancing))
return;
/* for example, ksmd faulting in a user's mm */
if (!p->mm)
return;
/*
* NUMA faults statistics are unnecessary for the slow memory
* node for memory tiering mode.
*/
if (!node_is_toptier(mem_node) &&
(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ||
!cpupid_valid(last_cpupid)))
return;
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults)) {
int size = sizeof(*p->numa_faults) *
NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
if (!p->numa_faults)
return;
p->total_numa_faults = 0;
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
/*
* First accesses are treated as private, otherwise consider accesses
* to be private if the accessing pid has not changed
*/
if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
priv = 1;
} else {
priv = cpupid_match_pid(p, last_cpupid);
if (!priv && !(flags & TNF_NO_GROUP))
task_numa_group(p, last_cpupid, flags, &priv);
}
/*
* If a workload spans multiple NUMA nodes, a shared fault that
* occurs wholly within the set of nodes that the workload is
* actively using should be counted as local. This allows the
* scan rate to slow down when a workload has settled down.
*/
ng = deref_curr_numa_group(p);
if (!priv && !local && ng && ng->active_nodes > 1 &&
numa_is_active_node(cpu_node, ng) &&
numa_is_active_node(mem_node, ng))
local = 1;
/*
* Retry to migrate task to preferred node periodically, in case it
* previously failed, or the scheduler moved us.
*/
if (time_after(jiffies, p->numa_migrate_retry)) {
task_numa_placement(p);
numa_migrate_preferred(p);
}
if (migrated)
p->numa_pages_migrated += pages;
if (flags & TNF_MIGRATE_FAIL)
p->numa_faults_locality[2] += pages;
p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
p->numa_faults_locality[local] += pages;
}
static void reset_ptenuma_scan(struct task_struct *p)
{
/*
* We only did a read acquisition of the mmap sem, so
* p->mm->numa_scan_seq is written to without exclusive access
* and the update is not guaranteed to be atomic. That's not
* much of an issue though, since this is just used for
* statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
* expensive, to avoid any form of compiler optimizations:
*/
WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
p->mm->numa_scan_offset = 0;
}
static bool vma_is_accessed(struct vm_area_struct *vma)
{
unsigned long pids;
/*
* Allow unconditional access first two times, so that all the (pages)
* of VMAs get prot_none fault introduced irrespective of accesses.
* This is also done to avoid any side effect of task scanning
* amplifying the unfairness of disjoint set of VMAs' access.
*/
if (READ_ONCE(current->mm->numa_scan_seq) < 2)
return true;
pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1];
return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids);
}
#define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay)
/*
* The expensive part of numa migration is done from task_work context.
* Triggered from task_tick_numa().
*/
static void task_numa_work(struct callback_head *work)
{
unsigned long migrate, next_scan, now = jiffies;
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
u64 runtime = p->se.sum_exec_runtime;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long nr_pte_updates = 0;
long pages, virtpages;
struct vma_iterator vmi;
SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
work->next = work;
/*
* Who cares about NUMA placement when they're dying.
*
* NOTE: make sure not to dereference p->mm before this check,
* exit_task_work() happens _after_ exit_mm() so we could be called
* without p->mm even though we still had it when we enqueued this
* work.
*/
if (p->flags & PF_EXITING)
return;
if (!mm->numa_next_scan) {
mm->numa_next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
}
/*
* Enforce maximal scan/migration frequency..
*/
migrate = mm->numa_next_scan;
if (time_before(now, migrate))
return;
if (p->numa_scan_period == 0) {
p->numa_scan_period_max = task_scan_max(p);
p->numa_scan_period = task_scan_start(p);
}
next_scan = now + msecs_to_jiffies(p->numa_scan_period);
if (!try_cmpxchg(&mm->numa_next_scan, &migrate, next_scan))
return;
/*
* Delay this task enough that another task of this mm will likely win
* the next time around.
*/
p->node_stamp += 2 * TICK_NSEC;
start = mm->numa_scan_offset;
pages = sysctl_numa_balancing_scan_size;
pages <<= 20 - PAGE_SHIFT; /* MB in pages */
virtpages = pages * 8; /* Scan up to this much virtual space */
if (!pages)
return;
if (!mmap_read_trylock(mm))
return;
vma_iter_init(&vmi, mm, start);
vma = vma_next(&vmi);
if (!vma) {
reset_ptenuma_scan(p);
start = 0;
vma_iter_set(&vmi, start);
vma = vma_next(&vmi);
}
do {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
}
/*
* Shared library pages mapped by multiple processes are not
* migrated as it is expected they are cache replicated. Avoid
* hinting faults in read-only file-backed mappings or the vdso
* as migrating the pages will be of marginal benefit.
*/
if (!vma->vm_mm ||
(vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
continue;
/*
* Skip inaccessible VMAs to avoid any confusion between
* PROT_NONE and NUMA hinting ptes
*/
if (!vma_is_accessible(vma))
continue;
/* Initialise new per-VMA NUMAB state. */
if (!vma->numab_state) {
vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
GFP_KERNEL);
if (!vma->numab_state)
continue;
vma->numab_state->next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
/* Reset happens after 4 times scan delay of scan start */
vma->numab_state->next_pid_reset = vma->numab_state->next_scan +
msecs_to_jiffies(VMA_PID_RESET_PERIOD);
}
/*
* Scanning the VMA's of short lived tasks add more overhead. So
* delay the scan for new VMAs.
*/
if (mm->numa_scan_seq && time_before(jiffies,
vma->numab_state->next_scan))
continue;
/* Do not scan the VMA if task has not accessed */
if (!vma_is_accessed(vma))
continue;
/*
* RESET access PIDs regularly for old VMAs. Resetting after checking
* vma for recent access to avoid clearing PID info before access..
*/
if (mm->numa_scan_seq &&
time_after(jiffies, vma->numab_state->next_pid_reset)) {
vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset +
msecs_to_jiffies(VMA_PID_RESET_PERIOD);
vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]);
vma->numab_state->access_pids[1] = 0;
}
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
end = min(end, vma->vm_end);
nr_pte_updates = change_prot_numa(vma, start, end);
/*
* Try to scan sysctl_numa_balancing_size worth of
* hpages that have at least one present PTE that
* is not already pte-numa. If the VMA contains
* areas that are unused or already full of prot_numa
* PTEs, scan up to virtpages, to skip through those
* areas faster.
*/
if (nr_pte_updates)
pages -= (end - start) >> PAGE_SHIFT;
virtpages -= (end - start) >> PAGE_SHIFT;
start = end;
if (pages <= 0 || virtpages <= 0)
goto out;
cond_resched();
} while (end != vma->vm_end);
} for_each_vma(vmi, vma);
out:
/*
* It is possible to reach the end of the VMA list but the last few
* VMAs are not guaranteed to the vma_migratable. If they are not, we
* would find the !migratable VMA on the next scan but not reset the
* scanner to the start so check it now.
*/
if (vma)
mm->numa_scan_offset = start;
else
reset_ptenuma_scan(p);
mmap_read_unlock(mm);
/*
* Make sure tasks use at least 32x as much time to run other code
* than they used here, to limit NUMA PTE scanning overhead to 3% max.
* Usually update_task_scan_period slows down scanning enough; on an
* overloaded system we need to limit overhead on a per task basis.
*/
if (unlikely(p->se.sum_exec_runtime != runtime)) {
u64 diff = p->se.sum_exec_runtime - runtime;
p->node_stamp += 32 * diff;
}
}
void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
{
int mm_users = 0;
struct mm_struct *mm = p->mm;
if (mm) {
mm_users = atomic_read(&mm->mm_users);
if (mm_users == 1) {
mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
mm->numa_scan_seq = 0;
}
}
p->node_stamp = 0;
p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_migrate_retry = 0;
/* Protect against double add, see task_tick_numa and task_numa_work */
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
p->numa_pages_migrated = 0;
p->total_numa_faults = 0;
RCU_INIT_POINTER(p->numa_group, NULL);
p->last_task_numa_placement = 0;
p->last_sum_exec_runtime = 0;
init_task_work(&p->numa_work, task_numa_work);
/* New address space, reset the preferred nid */
if (!(clone_flags & CLONE_VM)) {
p->numa_preferred_nid = NUMA_NO_NODE;
return;
}
/*
* New thread, keep existing numa_preferred_nid which should be copied
* already by arch_dup_task_struct but stagger when scans start.
*/
if (mm) {
unsigned int delay;
delay = min_t(unsigned int, task_scan_max(current),
current->numa_scan_period * mm_users * NSEC_PER_MSEC);
delay += 2 * TICK_NSEC;
p->node_stamp = delay;
}
}
/*
* Drive the periodic memory faults..
*/
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->numa_work;
u64 period, now;
/*
* We don't care about NUMA placement if we don't have memory.
*/
if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
return;
/*
* Using runtime rather than walltime has the dual advantage that
* we (mostly) drive the selection from busy threads and that the
* task needs to have done some actual work before we bother with
* NUMA placement.
*/
now = curr->se.sum_exec_runtime;
period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
if (now > curr->node_stamp + period) {
if (!curr->node_stamp)
curr->numa_scan_period = task_scan_start(curr);
curr->node_stamp += period;
if (!time_before(jiffies, curr->mm->numa_next_scan))
task_work_add(curr, work, TWA_RESUME);
}
}
static void update_scan_period(struct task_struct *p, int new_cpu)
{
int src_nid = cpu_to_node(task_cpu(p));
int dst_nid = cpu_to_node(new_cpu);
if (!static_branch_likely(&sched_numa_balancing))
return;
if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
return;
if (src_nid == dst_nid)
return;
/*
* Allow resets if faults have been trapped before one scan
* has completed. This is most likely due to a new task that
* is pulled cross-node due to wakeups or load balancing.
*/
if (p->numa_scan_seq) {
/*
* Avoid scan adjustments if moving to the preferred
* node or if the task was not previously running on
* the preferred node.
*/
if (dst_nid == p->numa_preferred_nid ||
(p->numa_preferred_nid != NUMA_NO_NODE &&
src_nid != p->numa_preferred_nid))
return;
}
p->numa_scan_period = task_scan_start(p);
}
#else
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
}
static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
}
static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
}
static inline void update_scan_period(struct task_struct *p, int new_cpu)
{
}
#endif /* CONFIG_NUMA_BALANCING */
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
struct rq *rq = rq_of(cfs_rq);
account_numa_enqueue(rq, task_of(se));
list_add(&se->group_node, &rq->cfs_tasks);
}
#endif
cfs_rq->nr_running++;
if (se_is_idle(se))
cfs_rq->idle_nr_running++;
}
static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
list_del_init(&se->group_node);
}
#endif
cfs_rq->nr_running--;
if (se_is_idle(se))
cfs_rq->idle_nr_running--;
}
/*
* Signed add and clamp on underflow.
*
* Explicitly do a load-store to ensure the intermediate value never hits
* memory. This allows lockless observations without ever seeing the negative
* values.
*/
#define add_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
typeof(_val) val = (_val); \
typeof(*ptr) res, var = READ_ONCE(*ptr); \
\
res = var + val; \
\
if (val < 0 && res > var) \
res = 0; \
\
WRITE_ONCE(*ptr, res); \
} while (0)
/*
* Unsigned subtract and clamp on underflow.
*
* Explicitly do a load-store to ensure the intermediate value never hits
* memory. This allows lockless observations without ever seeing the negative
* values.
*/
#define sub_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
typeof(*ptr) val = (_val); \
typeof(*ptr) res, var = READ_ONCE(*ptr); \
res = var - val; \
if (res > var) \
res = 0; \
WRITE_ONCE(*ptr, res); \
} while (0)
/*
* Remove and clamp on negative, from a local variable.
*
* A variant of sub_positive(), which does not use explicit load-store
* and is thus optimized for local variable updates.
*/
#define lsub_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
*ptr -= min_t(typeof(*ptr), *ptr, _val); \
} while (0)
#ifdef CONFIG_SMP
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
}
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
#else
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
unsigned long old_weight = se->load.weight;
if (se->on_rq) {
/* commit outstanding execution time */
if (cfs_rq->curr == se)
update_curr(cfs_rq);
else
avg_vruntime_sub(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
update_load_set(&se->load, weight);
if (!se->on_rq) {
/*
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
* we need to scale se->vlag when w_i changes.
*/
se->vlag = div_s64(se->vlag * old_weight, weight);
} else {
s64 deadline = se->deadline - se->vruntime;
/*
* When the weight changes, the virtual time slope changes and
* we should adjust the relative virtual deadline accordingly.
*/
deadline = div_s64(deadline * old_weight, weight);
se->deadline = se->vruntime + deadline;
}
#ifdef CONFIG_SMP
do {
u32 divider = get_pelt_divider(&se->avg);
se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
} while (0);
#endif
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
if (cfs_rq->curr != se)
avg_vruntime_add(cfs_rq, se);
}
}
void reweight_task(struct task_struct *p, int prio)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct load_weight *load = &se->load;
unsigned long weight = scale_load(sched_prio_to_weight[prio]);
reweight_entity(cfs_rq, se, weight);
load->inv_weight = sched_prio_to_wmult[prio];
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
/*
* All this does is approximate the hierarchical proportion which includes that
* global sum we all love to hate.
*
* That is, the weight of a group entity, is the proportional share of the
* group weight based on the group runqueue weights. That is:
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (1)
* \Sum grq->load.weight
*
* Now, because computing that sum is prohibitively expensive to compute (been
* there, done that) we approximate it with this average stuff. The average
* moves slower and therefore the approximation is cheaper and more stable.
*
* So instead of the above, we substitute:
*
* grq->load.weight -> grq->avg.load_avg (2)
*
* which yields the following:
*
* tg->weight * grq->avg.load_avg
* ge->load.weight = ------------------------------ (3)
* tg->load_avg
*
* Where: tg->load_avg ~= \Sum grq->avg.load_avg
*
* That is shares_avg, and it is right (given the approximation (2)).
*
* The problem with it is that because the average is slow -- it was designed
* to be exactly that of course -- this leads to transients in boundary
* conditions. In specific, the case where the group was idle and we start the
* one task. It takes time for our CPU's grq->avg.load_avg to build up,
* yielding bad latency etc..
*
* Now, in that special case (1) reduces to:
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- = tg->weight (4)
* grp->load.weight
*
* That is, the sum collapses because all other CPUs are idle; the UP scenario.
*
* So what we do is modify our approximation (3) to approach (4) in the (near)
* UP case, like:
*
* ge->load.weight =
*
* tg->weight * grq->load.weight
* --------------------------------------------------- (5)
* tg->load_avg - grq->avg.load_avg + grq->load.weight
*
* But because grq->load.weight can drop to 0, resulting in a divide by zero,
* we need to use grq->avg.load_avg as its lower bound, which then gives:
*
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (6)
* tg_load_avg'
*
* Where:
*
* tg_load_avg' = tg->load_avg - grq->avg.load_avg +
* max(grq->load.weight, grq->avg.load_avg)
*
* And that is shares_weight and is icky. In the (near) UP case it approaches
* (4) while in the normal case it approaches (3). It consistently
* overestimates the ge->load.weight and therefore:
*
* \Sum ge->load.weight >= tg->weight
*
* hence icky!
*/
static long calc_group_shares(struct cfs_rq *cfs_rq)
{
long tg_weight, tg_shares, load, shares;
struct task_group *tg = cfs_rq->tg;
tg_shares = READ_ONCE(tg->shares);
load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
tg_weight = atomic_long_read(&tg->load_avg);
/* Ensure tg_weight >= load */
tg_weight -= cfs_rq->tg_load_avg_contrib;
tg_weight += load;
shares = (tg_shares * load);
if (tg_weight)
shares /= tg_weight;
/*
* MIN_SHARES has to be unscaled here to support per-CPU partitioning
* of a group with small tg->shares value. It is a floor value which is
* assigned as a minimum load.weight to the sched_entity representing
* the group on a CPU.
*
* E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
* on an 8-core system with 8 tasks each runnable on one CPU shares has
* to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
* case no task is runnable on a CPU MIN_SHARES=2 should be returned
* instead of 0.
*/
return clamp_t(long, shares, MIN_SHARES, tg_shares);
}
#endif /* CONFIG_SMP */
/*
* Recomputes the group entity based on the current state of its group
* runqueue.
*/
static void update_cfs_group(struct sched_entity *se)
{
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
long shares;
if (!gcfs_rq)
return;
if (throttled_hierarchy(gcfs_rq))
return;
#ifndef CONFIG_SMP
shares = READ_ONCE(gcfs_rq->tg->shares);
if (likely(se->load.weight == shares))
return;
#else
shares = calc_group_shares(gcfs_rq);
#endif
reweight_entity(cfs_rq_of(se), se, shares);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_cfs_group(struct sched_entity *se)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
{
struct rq *rq = rq_of(cfs_rq);
if (&rq->cfs == cfs_rq) {
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
* a real problem.
*
* It will not get called when we go idle, because the idle
* thread is a different class (!fair), nor will the utilization
* number include things like RT tasks.
*
* As is, the util number is not freq-invariant (we'd have to
* implement arch_scale_freq_capacity() for that).
*
* See cpu_util_cfs().
*/
cpufreq_update_util(rq, flags);
}
}
#ifdef CONFIG_SMP
static inline bool load_avg_is_decayed(struct sched_avg *sa)
{
if (sa->load_sum)
return false;
if (sa->util_sum)
return false;
if (sa->runnable_sum)
return false;
/*
* _avg must be null when _sum are null because _avg = _sum / divider
* Make sure that rounding and/or propagation of PELT values never
* break this.
*/
SCHED_WARN_ON(sa->load_avg ||
sa->util_avg ||
sa->runnable_avg);
return true;
}
static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
{
return u64_u32_load_copy(cfs_rq->avg.last_update_time,
cfs_rq->last_update_time_copy);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
* immediately before a parent cfs_rq, and cfs_rqs are removed from the list
* bottom-up, we only have to test whether the cfs_rq before us on the list
* is our child.
* If cfs_rq is not on the list, test whether a child needs its to be added to
* connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
*/
static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
{
struct cfs_rq *prev_cfs_rq;
struct list_head *prev;
if (cfs_rq->on_list) {
prev = cfs_rq->leaf_cfs_rq_list.prev;
} else {
struct rq *rq = rq_of(cfs_rq);
prev = rq->tmp_alone_branch;
}
prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
return (prev_cfs_rq->tg->parent == cfs_rq->tg);
}
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load.weight)
return false;
if (!load_avg_is_decayed(&cfs_rq->avg))
return false;
if (child_cfs_rq_on_list(cfs_rq))
return false;
return true;
}
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
*
* This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
* However, because tg->load_avg is a global value there are performance
* considerations.
*
* In order to avoid having to look at the other cfs_rq's, we use a
* differential update where we store the last value we propagated. This in
* turn allows skipping updates if the differential is 'small'.
*
* Updating tg's load_avg is necessary before update_cfs_share().
*/
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
/*
* No need to update load_avg for root_task_group as it is not used.
*/
if (cfs_rq->tg == &root_task_group)
return;
if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
}
}
/*
* Called within set_task_rq() right before setting a task's CPU. The
* caller only guarantees p->pi_lock is held; no other assumptions,
* including the state of rq->lock, should be made.
*/
void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next)
{
u64 p_last_update_time;
u64 n_last_update_time;
if (!sched_feat(ATTACH_AGE_LOAD))
return;
/*
* We are supposed to update the task to "current" time, then its up to
* date and ready to go to new CPU/cfs_rq. But we have difficulty in
* getting what current time is, so simply throw away the out-of-date
* time. This will result in the wakee task is less decayed, but giving
* the wakee more load sounds not bad.
*/
if (!(se->avg.last_update_time && prev))
return;
p_last_update_time = cfs_rq_last_update_time(prev);
n_last_update_time = cfs_rq_last_update_time(next);
__update_load_avg_blocked_se(p_last_update_time, se);
se->avg.last_update_time = n_last_update_time;
}
/*
* When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
* propagate its contribution. The key to this propagation is the invariant
* that for each group:
*
* ge->avg == grq->avg (1)
*
* _IFF_ we look at the pure running and runnable sums. Because they
* represent the very same entity, just at different points in the hierarchy.
*
* Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
* and simply copies the running/runnable sum over (but still wrong, because
* the group entity and group rq do not have their PELT windows aligned).
*
* However, update_tg_cfs_load() is more complex. So we have:
*
* ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
*
* And since, like util, the runnable part should be directly transferable,
* the following would _appear_ to be the straight forward approach:
*
* grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
*
* And per (1) we have:
*
* ge->avg.runnable_avg == grq->avg.runnable_avg
*
* Which gives:
*
* ge->load.weight * grq->avg.load_avg
* ge->avg.load_avg = ----------------------------------- (4)
* grq->load.weight
*
* Except that is wrong!
*
* Because while for entities historical weight is not important and we
* really only care about our future and therefore can consider a pure
* runnable sum, runqueues can NOT do this.
*
* We specifically want runqueues to have a load_avg that includes
* historical weights. Those represent the blocked load, the load we expect
* to (shortly) return to us. This only works by keeping the weights as
* integral part of the sum. We therefore cannot decompose as per (3).
*
* Another reason this doesn't work is that runnable isn't a 0-sum entity.
* Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
* rq itself is runnable anywhere between 2/3 and 1 depending on how the
* runnable section of these tasks overlap (or not). If they were to perfectly
* align the rq as a whole would be runnable 2/3 of the time. If however we
* always have at least 1 runnable task, the rq as a whole is always runnable.
*
* So we'll have to approximate.. :/
*
* Given the constraint:
*
* ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
*
* We can construct a rule that adds runnable to a rq by assuming minimal
* overlap.
*
* On removal, we'll assume each task is equally runnable; which yields:
*
* grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
*
* XXX: only do this for the part of runnable > running ?
*
*/
static inline void
update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
u32 new_sum, divider;
/* Nothing to update */
if (!delta_avg)
return;
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
divider = get_pelt_divider(&cfs_rq->avg);
/* Set new sched_entity's utilization */
se->avg.util_avg = gcfs_rq->avg.util_avg;
new_sum = se->avg.util_avg * divider;
delta_sum = (long)new_sum - (long)se->avg.util_sum;
se->avg.util_sum = new_sum;
/* Update parent cfs_rq utilization */
add_positive(&cfs_rq->avg.util_avg, delta_avg);
add_positive(&cfs_rq->avg.util_sum, delta_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
}
static inline void
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
u32 new_sum, divider;
/* Nothing to update */
if (!delta_avg)
return;
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
divider = get_pelt_divider(&cfs_rq->avg);
/* Set new sched_entity's runnable */
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
new_sum = se->avg.runnable_avg * divider;
delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
se->avg.runnable_sum = new_sum;
/* Update parent cfs_rq runnable */
add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
}
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
s64 delta_sum;
u32 divider;
if (!runnable_sum)
return;
gcfs_rq->prop_runnable_sum = 0;
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
divider = get_pelt_divider(&cfs_rq->avg);
if (runnable_sum >= 0) {
/*
* Add runnable; clip at LOAD_AVG_MAX. Reflects that until
* the CPU is saturated running == runnable.
*/
runnable_sum += se->avg.load_sum;
runnable_sum = min_t(long, runnable_sum, divider);
} else {
/*
* Estimate the new unweighted runnable_sum of the gcfs_rq by
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
load_sum = div_u64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}
/* But make sure to not inflate se's runnable */
runnable_sum = min(se->avg.load_sum, load_sum);
}
/*
* runnable_sum can't be lower than running_sum
* Rescale running sum to be in the same range as runnable sum
* running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
* runnable_sum is in [0 : LOAD_AVG_MAX]
*/
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
runnable_sum = max(runnable_sum, running_sum);
load_sum = se_weight(se) * runnable_sum;
load_avg = div_u64(load_sum, divider);
delta_avg = load_avg - se->avg.load_avg;
if (!delta_avg)
return;
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg);
add_positive(&cfs_rq->avg.load_sum, delta_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
{
cfs_rq->propagate = 1;
cfs_rq->prop_runnable_sum += runnable_sum;
}
/* Update task and its cfs_rq load average */
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq, *gcfs_rq;
if (entity_is_task(se))
return 0;
gcfs_rq = group_cfs_rq(se);
if (!gcfs_rq->propagate)
return 0;
gcfs_rq->propagate = 0;
cfs_rq = cfs_rq_of(se);
add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
update_tg_cfs_util(cfs_rq, se, gcfs_rq);
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
update_tg_cfs_load(cfs_rq, se, gcfs_rq);
trace_pelt_cfs_tp(cfs_rq);
trace_pelt_se_tp(se);
return 1;
}
/*
* Check if we need to update the load and the utilization of a blocked
* group_entity:
*/
static inline bool skip_blocked_update(struct sched_entity *se)
{
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
/*
* If sched_entity still have not zero load or utilization, we have to
* decay it:
*/
if (se->avg.load_avg || se->avg.util_avg)
return false;
/*
* If there is a pending propagation, we have to update the load and
* the utilization of the sched_entity:
*/
if (gcfs_rq->propagate)
return false;
/*
* Otherwise, the load and the utilization of the sched_entity is
* already zero and there is no pending propagation, so it will be a
* waste of time to try to decay it:
*/
return true;
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
return 0;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_NO_HZ_COMMON
static inline void migrate_se_pelt_lag(struct sched_entity *se)
{
u64 throttled = 0, now, lut;
struct cfs_rq *cfs_rq;
struct rq *rq;
bool is_idle;
if (load_avg_is_decayed(&se->avg))
return;
cfs_rq = cfs_rq_of(se);
rq = rq_of(cfs_rq);
rcu_read_lock();
is_idle = is_idle_task(rcu_dereference(rq->curr));
rcu_read_unlock();
/*
* The lag estimation comes with a cost we don't want to pay all the
* time. Hence, limiting to the case where the source CPU is idle and
* we know we are at the greatest risk to have an outdated clock.
*/
if (!is_idle)
return;
/*
* Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
*
* last_update_time (the cfs_rq's last_update_time)
* = cfs_rq_clock_pelt()@cfs_rq_idle
* = rq_clock_pelt()@cfs_rq_idle
* - cfs->throttled_clock_pelt_time@cfs_rq_idle
*
* cfs_idle_lag (delta between rq's update and cfs_rq's update)
* = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
*
* rq_idle_lag (delta between now and rq's update)
* = sched_clock_cpu() - rq_clock()@rq_idle
*
* We can then write:
*
* now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
* sched_clock_cpu() - rq_clock()@rq_idle
* Where:
* rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
* rq_clock()@rq_idle is rq->clock_idle
* cfs->throttled_clock_pelt_time@cfs_rq_idle
* is cfs_rq->throttled_pelt_idle
*/
#ifdef CONFIG_CFS_BANDWIDTH
throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
/* The clock has been stopped for throttling */
if (throttled == U64_MAX)
return;
#endif
now = u64_u32_load(rq->clock_pelt_idle);
/*
* Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
* is observed the old clock_pelt_idle value and the new clock_idle,
* which lead to an underestimation. The opposite would lead to an
* overestimation.
*/
smp_rmb();
lut = cfs_rq_last_update_time(cfs_rq);
now -= throttled;
if (now < lut)
/*
* cfs_rq->avg.last_update_time is more recent than our
* estimation, let's use it.
*/
now = lut;
else
now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
__update_load_avg_blocked_se(now, se);
}
#else
static void migrate_se_pelt_lag(struct sched_entity *se) {}
#endif
/**
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_pelt()
* @cfs_rq: cfs_rq to update
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
* avg. The immediate corollary is that all (fair) tasks must be attached.
*
* cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
*
* Return: true if the load decayed or we removed load.
*
* Since both these conditions indicate a changed cfs_rq->avg.load we should
* call update_tg_load_avg() when this function returns true.
*/
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
struct sched_avg *sa = &cfs_rq->avg;
int decayed = 0;
if (cfs_rq->removed.nr) {
unsigned long r;
u32 divider = get_pelt_divider(&cfs_rq->avg);
raw_spin_lock(&cfs_rq->removed.lock);
swap(cfs_rq->removed.util_avg, removed_util);
swap(cfs_rq->removed.load_avg, removed_load);
swap(cfs_rq->removed.runnable_avg, removed_runnable);
cfs_rq->removed.nr = 0;
raw_spin_unlock(&cfs_rq->removed.lock);
r = removed_load;
sub_positive(&sa->load_avg, r);
sub_positive(&sa->load_sum, r * divider);
/* See sa->util_sum below */
sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
r = removed_util;
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * divider);
/*
* Because of rounding, se->util_sum might ends up being +1 more than
* cfs->util_sum. Although this is not a problem by itself, detaching
* a lot of tasks with the rounding problem between 2 updates of
* util_avg (~1ms) can make cfs->util_sum becoming null whereas
* cfs_util_avg is not.
* Check that util_sum is still above its lower bound for the new
* util_avg. Given that period_contrib might have moved since the last
* sync, we are only sure that util_sum must be above or equal to
* util_avg * minimum possible divider
*/
sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
sub_positive(&sa->runnable_sum, r * divider);
/* See sa->util_sum above */
sa->runnable_sum = max_t(u32, sa->runnable_sum,
sa->runnable_avg * PELT_MIN_DIVIDER);
/*
* removed_runnable is the unweighted version of removed_load so we
* can use it to estimate removed_load_sum.
*/
add_tg_cfs_propagate(cfs_rq,
-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
decayed = 1;
}
decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
u64_u32_store_copy(sa->last_update_time,
cfs_rq->last_update_time_copy,
sa->last_update_time);
return decayed;
}
/**
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
* @cfs_rq: cfs_rq to attach to
* @se: sched_entity to attach
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
u32 divider = get_pelt_divider(&cfs_rq->avg);
/*
* When we attach the @se to the @cfs_rq, we must align the decay
* window because without that, really weird and wonderful things can
* happen.
*
* XXX illustrate
*/
se->avg.last_update_time = cfs_rq->avg.last_update_time;
se->avg.period_contrib = cfs_rq->avg.period_contrib;
/*
* Hell(o) Nasty stuff.. we need to recompute _sum based on the new
* period_contrib. This isn't strictly correct, but since we're
* entirely outside of the PELT hierarchy, nobody cares if we truncate
* _sum a little.
*/
se->avg.util_sum = se->avg.util_avg * divider;
se->avg.runnable_sum = se->avg.runnable_avg * divider;
se->avg.load_sum = se->avg.load_avg * divider;
if (se_weight(se) < se->avg.load_sum)
se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
else
se->avg.load_sum = 1;
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
cfs_rq_util_change(cfs_rq, 0);
trace_pelt_cfs_tp(cfs_rq);
}
/**
* detach_entity_load_avg - detach this entity from its cfs_rq load avg
* @cfs_rq: cfs_rq to detach from
* @se: sched_entity to detach
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
cfs_rq_util_change(cfs_rq, 0);
trace_pelt_cfs_tp(cfs_rq);
}
/*
* Optional action to be done while updating the load average
*/
#define UPDATE_TG 0x1
#define SKIP_AGE_LOAD 0x2
#define DO_ATTACH 0x4
#define DO_DETACH 0x8
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed;
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg_se(now, cfs_rq, se);
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
/*
* DO_ATTACH means we're here from enqueue_entity().
* !last_update_time means we've passed through
* migrate_task_rq_fair() indicating we migrated.
*
* IOW we're enqueueing a task on a new CPU.
*/
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq);
} else if (flags & DO_DETACH) {
/*
* DO_DETACH means we're here from dequeue_entity()
* and we are migrating task out of the CPU.
*/
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq);
} else if (decayed) {
cfs_rq_util_change(cfs_rq, 0);
if (flags & UPDATE_TG)
update_tg_load_avg(cfs_rq);
}
}
/*
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
static void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq);
__update_load_avg_blocked_se(last_update_time, se);
}
/*
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
static void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
unsigned long flags;
/*
* tasks cannot exit without having gone through wake_up_new_task() ->
* enqueue_task_fair() which will have added things to the cfs_rq,
* so we can remove unconditionally.
*/
sync_entity_load_avg(se);
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg;
cfs_rq->removed.load_avg += se->avg.load_avg;
cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
}
static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
{
return cfs_rq->avg.runnable_avg;
}
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
{
return cfs_rq->avg.load_avg;
}
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
static inline unsigned long task_util(struct task_struct *p)
{
return READ_ONCE(p->se.avg.util_avg);
}
static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
}
static inline unsigned long task_util_est(struct task_struct *p)
{
return max(task_util(p), _task_util_est(p));
}
#ifdef CONFIG_UCLAMP_TASK
static inline unsigned long uclamp_task_util(struct task_struct *p,
unsigned long uclamp_min,
unsigned long uclamp_max)
{
return clamp(task_util_est(p), uclamp_min, uclamp_max);
}
#else
static inline unsigned long uclamp_task_util(struct task_struct *p,
unsigned long uclamp_min,
unsigned long uclamp_max)
{
return task_util_est(p);
}
#endif
static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
struct task_struct *p)
{
unsigned int enqueued;
if (!sched_feat(UTIL_EST))
return;
/* Update root cfs_rq's estimated utilization */
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued += _task_util_est(p);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
trace_sched_util_est_cfs_tp(cfs_rq);
}
static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
struct task_struct *p)
{
unsigned int enqueued;
if (!sched_feat(UTIL_EST))
return;
/* Update root cfs_rq's estimated utilization */
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
trace_sched_util_est_cfs_tp(cfs_rq);
}
#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
/*
* Check if a (signed) value is within a specified (unsigned) margin,
* based on the observation that:
*
* abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
*
* NOTE: this only works when value + margin < INT_MAX.
*/
static inline bool within_margin(int value, int margin)
{
return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
}
static inline void util_est_update(struct cfs_rq *cfs_rq,
struct task_struct *p,
bool task_sleep)
{
long last_ewma_diff, last_enqueued_diff;
struct util_est ue;
if (!sched_feat(UTIL_EST))
return;
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
*/
if (!task_sleep)
return;
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
*/
ue = p->se.avg.util_est;
if (ue.enqueued & UTIL_AVG_UNCHANGED)
return;
last_enqueued_diff = ue.enqueued;
/*
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
ue.enqueued = task_util(p);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
goto done;
}
}
/*
* Skip update of task's estimated utilization when its members are
* already ~1% close to its last activation value.
*/
last_ewma_diff = ue.enqueued - ue.ewma;
last_enqueued_diff -= ue.enqueued;
if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
goto done;
return;
}
/*
* To avoid overestimation of actual task utilization, skip updates if
* we cannot grant there is idle time in this CPU.
*/
if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
return;
/*
* Update Task's estimated utilization
*
* When *p completes an activation we can consolidate another sample
* of the task size. This is done by storing the current PELT value
* as ue.enqueued and by using this value to update the Exponential
* Weighted Moving Average (EWMA):
*
* ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
* = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
* = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
* = w * ( last_ewma_diff ) + ewma(t-1)
* = w * (last_ewma_diff + ewma(t-1) / w)
*
* Where 'w' is the weight of new samples, which is configured to be
* 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
*/
ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
ue.enqueued |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_se_tp(&p->se);
}
static inline int util_fits_cpu(unsigned long util,
unsigned long uclamp_min,
unsigned long uclamp_max,
int cpu)
{
unsigned long capacity_orig, capacity_orig_thermal;
unsigned long capacity = capacity_of(cpu);
bool fits, uclamp_max_fits;
/*
* Check if the real util fits without any uclamp boost/cap applied.
*/
fits = fits_capacity(util, capacity);
if (!uclamp_is_used())
return fits;
/*
* We must use capacity_orig_of() for comparing against uclamp_min and
* uclamp_max. We only care about capacity pressure (by using
* capacity_of()) for comparing against the real util.
*
* If a task is boosted to 1024 for example, we don't want a tiny
* pressure to skew the check whether it fits a CPU or not.
*
* Similarly if a task is capped to capacity_orig_of(little_cpu), it
* should fit a little cpu even if there's some pressure.
*
* Only exception is for thermal pressure since it has a direct impact
* on available OPP of the system.
*
* We honour it for uclamp_min only as a drop in performance level
* could result in not getting the requested minimum performance level.
*
* For uclamp_max, we can tolerate a drop in performance level as the
* goal is to cap the task. So it's okay if it's getting less.
*/
capacity_orig = capacity_orig_of(cpu);
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
/*
* We want to force a task to fit a cpu as implied by uclamp_max.
* But we do have some corner cases to cater for..
*
*
* C=z
* | ___
* | C=y | |
* |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
* | C=x | | | |
* | ___ | | | |
* | | | | | | | (util somewhere in this region)
* | | | | | | |
* | | | | | | |
* +----------------------------------------
* cpu0 cpu1 cpu2
*
* In the above example if a task is capped to a specific performance
* point, y, then when:
*
* * util = 80% of x then it does not fit on cpu0 and should migrate
* to cpu1
* * util = 80% of y then it is forced to fit on cpu1 to honour
* uclamp_max request.
*
* which is what we're enforcing here. A task always fits if
* uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
* the normal upmigration rules should withhold still.
*
* Only exception is when we are on max capacity, then we need to be
* careful not to block overutilized state. This is so because:
*
* 1. There's no concept of capping at max_capacity! We can't go
* beyond this performance level anyway.
* 2. The system is being saturated when we're operating near
* max capacity, it doesn't make sense to block overutilized.
*/
uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
fits = fits || uclamp_max_fits;
/*
*
* C=z
* | ___ (region a, capped, util >= uclamp_max)
* | C=y | |
* |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
* | C=x | | | |
* | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
* |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
* | | | | | | |
* | | | | | | | (region c, boosted, util < uclamp_min)
* +----------------------------------------
* cpu0 cpu1 cpu2
*
* a) If util > uclamp_max, then we're capped, we don't care about
* actual fitness value here. We only care if uclamp_max fits
* capacity without taking margin/pressure into account.
* See comment above.
*
* b) If uclamp_min <= util <= uclamp_max, then the normal
* fits_capacity() rules apply. Except we need to ensure that we
* enforce we remain within uclamp_max, see comment above.
*
* c) If util < uclamp_min, then we are boosted. Same as (b) but we
* need to take into account the boosted value fits the CPU without
* taking margin/pressure into account.
*
* Cases (a) and (b) are handled in the 'fits' variable already. We
* just need to consider an extra check for case (c) after ensuring we
* handle the case uclamp_min > uclamp_max.
*/
uclamp_min = min(uclamp_min, uclamp_max);
if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
return -1;
return fits;
}
static inline int task_fits_cpu(struct task_struct *p, int cpu)
{
unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
unsigned long util = task_util_est(p);
/*
* Return true only if the cpu fully fits the task requirements, which
* include the utilization but also the performance hints.
*/
return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
if (!sched_asym_cpucap_active())
return;
if (!p || p->nr_cpus_allowed == 1) {
rq->misfit_task_load = 0;
return;
}
if (task_fits_cpu(p, cpu_of(rq))) {
rq->misfit_task_load = 0;
return;
}
/*
* Make sure that misfit_task_load will not be null even if
* task_h_load() returns 0.
*/
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
#else /* CONFIG_SMP */
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
return true;
}
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
#define DO_DETACH 0x0
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
cfs_rq_util_change(cfs_rq, 0);
}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
{
return 0;
}
static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
bool task_sleep) {}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 vslice = calc_delta_fair(se->slice, se);
u64 vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
/*
* Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag
* will move 'time' backwards, this can screw around with the lag of
* other tasks.
*
* EEVDF: placement strategy #1 / #2
*/
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
lag = se->vlag;
/*
* If we want to place a task and preserve lag, we have to
* consider the effect of the new entity on the weighted
* average and compensate for this, otherwise lag can quickly
* evaporate.
*
* Lag is defined as:
*
* lag_i = S - s_i = w_i * (V - v_i)
*
* To avoid the 'w_i' term all over the place, we only track
* the virtual lag:
*
* vl_i = V - v_i <=> v_i = V - vl_i
*
* And we take V to be the weighted average of all v:
*
* V = (\Sum w_j*v_j) / W
*
* Where W is: \Sum w_j
*
* Then, the weighted average after adding an entity with lag
* vl_i is given by:
*
* V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
* = (W*V + w_i*(V - vl_i)) / (W + w_i)
* = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
* = (V*(W + w_i) - w_i*l) / (W + w_i)
* = V - w_i*vl_i / (W + w_i)
*
* And the actual lag after adding an entity with vl_i is:
*
* vl'_i = V' - v_i
* = V - w_i*vl_i / (W + w_i) - (V - vl_i)
* = vl_i - w_i*vl_i / (W + w_i)
*
* Which is strictly less than vl_i. So in order to preserve lag
* we should inflate the lag before placement such that the
* effective lag after placement comes out right.
*
* As such, invert the above relation for vl'_i to get the vl_i
* we need to use such that the lag after placement is the lag
* we computed before dequeue.
*
* vl'_i = vl_i - w_i*vl_i / (W + w_i)
* = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
*
* (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
* = W*vl_i
*
* vl_i = (W + w_i)*vl'_i / W
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
load += scale_load_down(curr->load.weight);
lag *= load + scale_load_down(se->load.weight);
if (WARN_ON_ONCE(!load))
load = 1;
lag = div_s64(lag, load);
}
se->vruntime = vruntime - lag;
/*
* When joining the competition; the exisiting tasks will be,
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2;
/*
* EEVDF: vd_i = ve_i + r_i/w_i
*/
se->deadline = se->vruntime + vslice;
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool curr = cfs_rq->curr == se;
/*
* If we're the current task, we must renormalise before calling
* update_curr().
*/
if (curr)
place_entity(cfs_rq, se, flags);
update_curr(cfs_rq);
/*
* When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
* h_nr_running of its group cfs_rq.
* - For group_entity, update its weight to reflect the new share of
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
*/
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
se_update_runnable(se);
/*
* XXX update_load_avg() above will have attached us to the pelt sum;
* but update_cfs_group() here will re-adjust the weight and have to
* undo/redo all that. Seems wasteful.
*/
update_cfs_group(se);
/*
* XXX now that the entity has been re-weighted, and it's lag adjusted,
* we can place the entity.
*/
if (!curr)
place_entity(cfs_rq, se, flags);
account_entity_enqueue(cfs_rq, se);
/* Entity has migrated, no longer consider this task hot */
if (flags & ENQUEUE_MIGRATED)
se->exec_start = 0;
check_schedstat_required();
update_stats_enqueue_fair(cfs_rq, se, flags);
if (!curr)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;
if (cfs_rq->nr_running == 1) {
check_enqueue_throttle(cfs_rq);
if (!throttled_hierarchy(cfs_rq)) {
list_add_leaf_cfs_rq(cfs_rq);
} else {
#ifdef CONFIG_CFS_BANDWIDTH
struct rq *rq = rq_of(cfs_rq);
if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
cfs_rq->throttled_clock = rq_clock(rq);
if (!cfs_rq->throttled_clock_self)
cfs_rq->throttled_clock_self = rq_clock(rq);
#endif
}
}
}
static void __clear_buddies_next(struct sched_entity *se)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (cfs_rq->next != se)
break;
cfs_rq->next = NULL;
}
}
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->next == se)
__clear_buddies_next(se);
}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
action |= DO_DETACH;
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
* h_nr_running of its group cfs_rq.
* - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
update_load_avg(cfs_rq, se, action);
se_update_runnable(se);
update_stats_dequeue_fair(cfs_rq, se, flags);
clear_buddies(cfs_rq, se);
update_entity_lag(cfs_rq, se);
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
update_cfs_group(se);
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
* put back on, and if we advance min_vruntime, we'll be placed back
* further than we started -- ie. we'll be penalized.
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
if (cfs_rq->nr_running == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
}
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
clear_buddies(cfs_rq, se);
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
update_stats_wait_end_fair(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
/*
* HACK, stash a copy of deadline at the point of pick in vlag,
* which isn't used until dequeue.
*/
se->vlag = se->deadline;
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
if (schedstat_enabled() &&
rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
struct sched_statistics *stats;
stats = __schedstats_from_se(se);
__schedstat_set(stats->slice_max,
max((u64)stats->slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
* 2) pick the "next" process, since someone really wants that to run
* 3) pick the "last" process, for cache locality
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
if (sched_feat(NEXT_BUDDY) &&
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
return pick_eevdf(cfs_rq);
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
{
/*
* If still on the runqueue then deactivate_task()
* was not called and update_curr() has to be done:
*/
if (prev->on_rq)
update_curr(cfs_rq);
/* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq);
if (prev->on_rq) {
update_stats_wait_start_fair(cfs_rq, prev);
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
update_load_avg(cfs_rq, prev, 0);
}
cfs_rq->curr = NULL;
}
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* Ensure that runnable average is periodically updated.
*/
update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr);
#ifdef CONFIG_SCHED_HRTICK
/*
* queued ticks are scheduled to match the slice, so don't bother
* validating it and just reschedule.
*/
if (queued) {
resched_curr(rq_of(cfs_rq));
return;
}
/*
* don't let the period tick interfere with the hrtick preemption
*/
if (!sched_feat(DOUBLE_TICK) &&
hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
return;
#endif
}
/**************************************************
* CFS bandwidth control machinery
*/
#ifdef CONFIG_CFS_BANDWIDTH
#ifdef CONFIG_JUMP_LABEL
static struct static_key __cfs_bandwidth_used;
static inline bool cfs_bandwidth_used(void)
{
return static_key_false(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_inc(void)
{
static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_dec(void)
{
static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
#else /* CONFIG_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
{
return true;
}
void cfs_bandwidth_usage_inc(void) {}
void cfs_bandwidth_usage_dec(void) {}
#endif /* CONFIG_JUMP_LABEL */
/*
* default period for cfs group bandwidth.
* default: 0.1s, units: nanoseconds
*/
static inline u64 default_cfs_period(void)
{
return 100000000ULL;
}
static inline u64 sched_cfs_bandwidth_slice(void)
{
return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
}
/*
* Replenish runtime according to assigned quota. We use sched_clock_cpu
* directly instead of rq->clock to avoid adding additional synchronization
* around rq->lock.
*
* requires cfs_b->lock
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
s64 runtime;
if (unlikely(cfs_b->quota == RUNTIME_INF))
return;
cfs_b->runtime += cfs_b->quota;
runtime = cfs_b->runtime_snap - cfs_b->runtime;
if (runtime > 0) {
cfs_b->burst_time += runtime;
cfs_b->nr_burst++;
}
cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
cfs_b->runtime_snap = cfs_b->runtime;
}
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return &tg->cfs_bandwidth;
}
/* returns 0 on failure to allocate runtime */
static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
struct cfs_rq *cfs_rq, u64 target_runtime)
{
u64 min_amount, amount = 0;
lockdep_assert_held(&cfs_b->lock);
/* note: this is a positive sum as runtime_remaining <= 0 */
min_amount = target_runtime - cfs_rq->runtime_remaining;
if (cfs_b->quota == RUNTIME_INF)
amount = min_amount;
else {
start_cfs_bandwidth(cfs_b);
if (cfs_b->runtime > 0) {
amount = min(cfs_b->runtime, min_amount);
cfs_b->runtime -= amount;
cfs_b->idle = 0;
}
}
cfs_rq->runtime_remaining += amount;
return cfs_rq->runtime_remaining > 0;
}
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
int ret;
raw_spin_lock(&cfs_b->lock);
ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
raw_spin_unlock(&cfs_b->lock);
return ret;
}
static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
/* dock delta_exec before expiring quota (as it could span periods) */
cfs_rq->runtime_remaining -= delta_exec;
if (likely(cfs_rq->runtime_remaining > 0))
return;
if (cfs_rq->throttled)
return;
/*
* if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
resched_curr(rq_of(cfs_rq));
}
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
__account_cfs_rq_runtime(cfs_rq, delta_exec);
}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->throttled;
}
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->throttle_count;
}
/*
* Ensure that neither of the group entities corresponding to src_cpu or
* dest_cpu are members of a throttled hierarchy when performing group
* load-balance operations.
*/
static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
src_cfs_rq = tg->cfs_rq[src_cpu];
dest_cfs_rq = tg->cfs_rq[dest_cpu];
return throttled_hierarchy(src_cfs_rq) ||
throttled_hierarchy(dest_cfs_rq);
}
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
struct rq *rq = data;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq->throttle_count--;
if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
cfs_rq->throttled_clock_pelt;
/* Add cfs_rq with load or one or more already running entities to the list */
if (!cfs_rq_is_decayed(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
if (cfs_rq->throttled_clock_self) {
u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
cfs_rq->throttled_clock_self = 0;
if (SCHED_WARN_ON((s64)delta < 0))
delta = 0;
cfs_rq->throttled_clock_self_time += delta;
}
}
return 0;
}
static int tg_throttle_down(struct task_group *tg, void *data)
{
struct rq *rq = data;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
/* group is entering throttled state, stop time */
if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
list_del_leaf_cfs_rq(cfs_rq);
SCHED_WARN_ON(cfs_rq->throttled_clock_self);
if (cfs_rq->nr_running)
cfs_rq->throttled_clock_self = rq_clock(rq);
}
cfs_rq->throttle_count++;
return 0;
}
static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta, dequeue = 1;
raw_spin_lock(&cfs_b->lock);
/* This will start the period timer if necessary */
if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
/*
* We have raced with bandwidth becoming available, and if we
* actually throttled the timer might not unthrottle us for an
* entire period. We additionally needed to make sure that any
* subsequent check_cfs_rq_runtime calls agree not to throttle
* us, as we may commit to do cfs put_prev+pick_next, so we ask
* for 1ns of runtime rather than just check cfs_b.
*/
dequeue = 0;
} else {
list_add_tail_rcu(&cfs_rq->throttled_list,
&cfs_b->throttled_cfs_rq);
}
raw_spin_unlock(&cfs_b->lock);
if (!dequeue)
return false; /* Throttle no longer required. */
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
rcu_read_unlock();
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
goto done;
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
if (qcfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
break;
}
}
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
goto done;
update_load_avg(qcfs_rq, se, 0);
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
}
/* At this point se is NULL and we are at root level*/
sub_nr_running(rq, task_delta);
done:
/*
* Note: distribution will already see us throttled via the
* throttled-list. rq->lock protects completion.
*/
cfs_rq->throttled = 1;
SCHED_WARN_ON(cfs_rq->throttled_clock);
if (cfs_rq->nr_running)
cfs_rq->throttled_clock = rq_clock(rq);
return true;
}
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta;
se = cfs_rq->tg->se[cpu_of(rq)];
cfs_rq->throttled = 0;
update_rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
if (cfs_rq->throttled_clock) {
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
cfs_rq->throttled_clock = 0;
}
list_del_rcu(&cfs_rq->throttled_list);
raw_spin_unlock(&cfs_b->lock);
/* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
if (!cfs_rq->load.weight) {
if (!cfs_rq->on_list)
return;
/*
* Nothing to run but something to decay (on_list)?
* Complete the branch.
*/
for_each_sched_entity(se) {
if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
break;
}
goto unthrottle_throttle;
}
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
if (se->on_rq)
break;
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
qcfs_rq->h_nr_running += task_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
goto unthrottle_throttle;
}
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
update_load_avg(qcfs_rq, se, UPDATE_TG);
se_update_runnable(se);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
qcfs_rq->h_nr_running += task_delta;
qcfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(qcfs_rq))
goto unthrottle_throttle;
}
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);
unthrottle_throttle:
assert_list_leaf_cfs_rq(rq);
/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
}
#ifdef CONFIG_SMP
static void __cfsb_csd_unthrottle(void *arg)
{
struct cfs_rq *cursor, *tmp;
struct rq *rq = arg;
struct rq_flags rf;
rq_lock(rq, &rf);
/*
* Iterating over the list can trigger several call to
* update_rq_clock() in unthrottle_cfs_rq().
* Do it once and skip the potential next ones.
*/
update_rq_clock(rq);
rq_clock_start_loop_update(rq);
/*
* Since we hold rq lock we're safe from concurrent manipulation of
* the CSD list. However, this RCU critical section annotates the
* fact that we pair with sched_free_group_rcu(), so that we cannot
* race with group being freed in the window between removing it
* from the list and advancing to the next entry in the list.
*/
rcu_read_lock();
list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
throttled_csd_list) {
list_del_init(&cursor->throttled_csd_list);
if (cfs_rq_throttled(cursor))
unthrottle_cfs_rq(cursor);
}
rcu_read_unlock();
rq_clock_stop_loop_update(rq);
rq_unlock(rq, &rf);
}
static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
bool first;
if (rq == this_rq()) {
unthrottle_cfs_rq(cfs_rq);
return;
}
/* Already enqueued */
if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
return;
first = list_empty(&rq->cfsb_csd_list);
list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
if (first)
smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
}
#else
static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
{
unthrottle_cfs_rq(cfs_rq);
}
#endif
static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
{
lockdep_assert_rq_held(rq_of(cfs_rq));
if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) ||
cfs_rq->runtime_remaining <= 0))
return;
__unthrottle_cfs_rq_async(cfs_rq);
}
static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
{
struct cfs_rq *local_unthrottle = NULL;
int this_cpu = smp_processor_id();
u64 runtime, remaining = 1;
bool throttled = false;
struct cfs_rq *cfs_rq;
struct rq_flags rf;
struct rq *rq;
rcu_read_lock();
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
throttled_list) {
rq = rq_of(cfs_rq);
if (!remaining) {
throttled = true;
break;
}
rq_lock_irqsave(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
#ifdef CONFIG_SMP
/* Already queued for async unthrottle */
if (!list_empty(&cfs_rq->throttled_csd_list))
goto next;
#endif
/* By the above checks, this should never be true */
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
raw_spin_lock(&cfs_b->lock);
runtime = -cfs_rq->runtime_remaining + 1;
if (runtime > cfs_b->runtime)
runtime = cfs_b->runtime;
cfs_b->runtime -= runtime;
remaining = cfs_b->runtime;
raw_spin_unlock(&cfs_b->lock);
cfs_rq->runtime_remaining += runtime;
/* we check whether we're throttled above */
if (cfs_rq->runtime_remaining > 0) {
if (cpu_of(rq) != this_cpu ||
SCHED_WARN_ON(local_unthrottle))
unthrottle_cfs_rq_async(cfs_rq);
else
local_unthrottle = cfs_rq;
} else {
throttled = true;
}
next:
rq_unlock_irqrestore(rq, &rf);
}
rcu_read_unlock();
if (local_unthrottle) {
rq = cpu_rq(this_cpu);
rq_lock_irqsave(rq, &rf);
if (cfs_rq_throttled(local_unthrottle))
unthrottle_cfs_rq(local_unthrottle);
rq_unlock_irqrestore(rq, &rf);
}
return throttled;
}
/*
* Responsible for refilling a task_group's bandwidth and unthrottling its
* cfs_rqs as appropriate. If there has been no activity within the last
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
{
int throttled;
/* no need to continue the timer with no bandwidth constraint */
if (cfs_b->quota == RUNTIME_INF)
goto out_deactivate;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->nr_periods += overrun;
/* Refill extra burst quota even if cfs_b->idle */
__refill_cfs_bandwidth_runtime(cfs_b);
/*
* idle depends on !throttled (for the case of a large deficit), and if
* we're going inactive then everything else can be deferred
*/
if (cfs_b->idle && !throttled)
goto out_deactivate;
if (!throttled) {
/* mark as potentially idle for the upcoming period */
cfs_b->idle = 1;
return 0;
}
/* account preceding periods in which throttling occurred */
cfs_b->nr_throttled += overrun;
/*
* This check is repeated as we release cfs_b->lock while we unthrottle.
*/
while (throttled && cfs_b->runtime > 0) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */
throttled = distribute_cfs_runtime(cfs_b);
raw_spin_lock_irqsave(&cfs_b->lock, flags);
}
/*
* While we are ensured activity in the period following an
* unthrottle, this also covers the case in which the new bandwidth is
* insufficient to cover the existing bandwidth deficit. (Forcing the
* timer to remain active while there are any throttled entities.)
*/
cfs_b->idle = 0;
return 0;
out_deactivate:
return 1;
}
/* a cfs_rq won't donate quota below this amount */
static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
/* minimum remaining period time to redistribute slack quota */
static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
/* how long we wait to gather additional slack before distributing */
static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
/*
* Are we near the end of the current quota period?
*
* Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
* hrtimer base being cleared by hrtimer_start. In the case of
* migrate_hrtimers, base is never cleared, so we are fine.
*/
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
struct hrtimer *refresh_timer = &cfs_b->period_timer;
s64 remaining;
/* if the call-back is running a quota refresh is already occurring */
if (hrtimer_callback_running(refresh_timer))
return 1;
/* is a quota refresh about to occur? */
remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
if (remaining < (s64)min_expire)
return 1;
return 0;
}
static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
{
u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
/* if there's a quota refresh soon don't bother with slack */
if (runtime_refresh_within(cfs_b, min_left))
return;
/* don't push forwards an existing deferred unthrottle */
if (cfs_b->slack_started)
return;
cfs_b->slack_started = true;
hrtimer_start(&cfs_b->slack_timer,
ns_to_ktime(cfs_bandwidth_slack_period),
HRTIMER_MODE_REL);
}
/* we know any runtime found here is valid as update_curr() precedes return */
static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
if (slack_runtime <= 0)
return;
raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF) {
cfs_b->runtime += slack_runtime;
/* we are under rq->lock, defer unthrottling using a timer */
if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
!list_empty(&cfs_b->throttled_cfs_rq))
start_cfs_slack_bandwidth(cfs_b);
}
raw_spin_unlock(&cfs_b->lock);
/* even if it's not valid for return we don't want to try again */
cfs_rq->runtime_remaining -= slack_runtime;
}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return;
if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
}
/*
* This is done with a timer (instead of inline with bandwidth return) since
* it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
*/
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
unsigned long flags;
/* confirm we're still not at a refresh boundary */
raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->slack_started = false;
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
runtime = cfs_b->runtime;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
if (!runtime)
return;
distribute_cfs_runtime(cfs_b);
}
/*
* When a group wakes up we want to make sure that its quota is not already
* expired/exceeded, otherwise it may be allowed to steal additional ticks of
* runtime as update_curr() throttling can not trigger until it's on-rq.
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return;
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
/* ensure the group is not already throttled */
if (cfs_rq_throttled(cfs_rq))
return;
/* update runtime allocation */
account_cfs_rq_runtime(cfs_rq, 0);
if (cfs_rq->runtime_remaining <= 0)
throttle_cfs_rq(cfs_rq);
}
static void sync_throttle(struct task_group *tg, int cpu)
{
struct cfs_rq *pcfs_rq, *cfs_rq;
if (!cfs_bandwidth_used())
return;
if (!tg->parent)
return;
cfs_rq = tg->cfs_rq[cpu];
pcfs_rq = tg->parent->cfs_rq[cpu];
cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return false;
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
return false;
/*
* it's possible for a throttled entity to be forced into a running
* state (e.g. set_curr_task), in this case we're finished.
*/
if (cfs_rq_throttled(cfs_rq))
return true;
return throttle_cfs_rq(cfs_rq);
}
static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, slack_timer);
do_sched_cfs_slack_timer(cfs_b);
return HRTIMER_NORESTART;
}
extern const u64 max_cfs_quota_period;
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
unsigned long flags;
int overrun;
int idle = 0;
int count = 0;
raw_spin_lock_irqsave(&cfs_b->lock, flags);
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
break;
idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period);
/*
* Grow period by a factor of 2 to avoid losing precision.
* Precision loss in the quota/period ratio can cause __cfs_schedulable
* to fail.
*/
new = old * 2;
if (new < max_cfs_quota_period) {
cfs_b->period = ns_to_ktime(new);
cfs_b->quota *= 2;
cfs_b->burst *= 2;
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
} else {
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(old, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
}
/* reset count so we don't come right back in here */
count = 0;
}
}
if (idle)
cfs_b->period_active = 0;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent)
{
raw_spin_lock_init(&cfs_b->lock);
cfs_b->runtime = 0;
cfs_b->quota = RUNTIME_INF;
cfs_b->period = ns_to_ktime(default_cfs_period());
cfs_b->burst = 0;
cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
cfs_b->period_timer.function = sched_cfs_period_timer;
/* Add a random offset so that timers interleave */
hrtimer_set_expires(&cfs_b->period_timer,
get_random_u32_below(cfs_b->period));
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->slack_started = false;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
#ifdef CONFIG_SMP
INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
#endif
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
lockdep_assert_held(&cfs_b->lock);
if (cfs_b->period_active)
return;
cfs_b->period_active = 1;
hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
}
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
int __maybe_unused i;
/* init_cfs_bandwidth() was not called */
if (!cfs_b->throttled_cfs_rq.next)
return;
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
/*
* It is possible that we still have some cfs_rq's pending on a CSD
* list, though this race is very rare. In order for this to occur, we
* must have raced with the last task leaving the group while there
* exist throttled cfs_rq(s), and the period_timer must have queued the
* CSD item but the remote cpu has not yet processed it. To handle this,
* we can simply flush all pending CSD work inline here. We're
* guaranteed at this point that no additional cfs_rq of this group can
* join a CSD list.
*/
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
unsigned long flags;
if (list_empty(&rq->cfsb_csd_list))
continue;
local_irq_save(flags);
__cfsb_csd_unthrottle(rq);
local_irq_restore(flags);
}
#endif
}
/*
* Both these CPU hotplug callbacks race against unregister_fair_sched_group()
*
* The race is harmless, since modifying bandwidth settings of unhooked group
* bits doesn't do much.
*/
/* cpu online callback */
static void __maybe_unused update_runtime_enabled(struct rq *rq)
{
struct task_group *tg;
lockdep_assert_rq_held(rq);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
raw_spin_lock(&cfs_b->lock);
cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
raw_spin_unlock(&cfs_b->lock);
}
rcu_read_unlock();
}
/* cpu offline callback */
static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
{
struct task_group *tg;
lockdep_assert_rq_held(rq);
/*
* The rq clock has already been updated in the
* set_rq_offline(), so we should skip updating
* the rq clock again in unthrottle_cfs_rq().
*/
rq_clock_start_loop_update(rq);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
if (!cfs_rq->runtime_enabled)
continue;
/*
* clock_task is not advancing so we just need to make sure
* there's some valid quota amount
*/
cfs_rq->runtime_remaining = 1;
/*
* Offline rq is schedulable till CPU is completely disabled
* in take_cpu_down(), so we prevent new cfs throttling here.
*/
cfs_rq->runtime_enabled = 0;
if (cfs_rq_throttled(cfs_rq))
unthrottle_cfs_rq(cfs_rq);
}
rcu_read_unlock();
rq_clock_stop_loop_update(rq);
}
bool cfs_task_bw_constrained(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
if (!cfs_bandwidth_used())
return false;
if (cfs_rq->runtime_enabled ||
tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF)
return true;
return false;
}
#ifdef CONFIG_NO_HZ_FULL
/* called from pick_next_task_fair() */
static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq);
if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
return;
if (!tick_nohz_full_cpu(cpu))
return;
if (rq->nr_running != 1)
return;
/*
* We know there is only one task runnable and we've just picked it. The
* normal enqueue path will have cleared TICK_DEP_BIT_SCHED if we will
* be otherwise able to stop the tick. Just need to check if we are using
* bandwidth control.
*/
if (cfs_task_bw_constrained(p))
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
}
#endif
#else /* CONFIG_CFS_BANDWIDTH */
static inline bool cfs_bandwidth_used(void)
{
return false;
}
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static inline void sync_throttle(struct task_group *tg, int cpu) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
return 0;
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
return 0;
}
static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
return 0;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
#endif
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return NULL;
}
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#ifdef CONFIG_CGROUP_SCHED
bool cfs_task_bw_constrained(struct task_struct *p)
{
return false;
}
#endif
#endif /* CONFIG_CFS_BANDWIDTH */
#if !defined(CONFIG_CFS_BANDWIDTH) || !defined(CONFIG_NO_HZ_FULL)
static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
#endif
/**************************************************
* CFS operations on tasks:
*/
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
SCHED_WARN_ON(task_rq(p) != rq);
if (rq->cfs.h_nr_running > 1) {
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
u64 slice = se->slice;
s64 delta = slice - ran;
if (delta < 0) {
if (task_current(rq, p))
resched_curr(rq);
return;
}
hrtick_start(rq, delta);
}
}
/*
* called from enqueue/dequeue and updates the hrtick when the
* current task is from our class and nr_running is low enough
* to matter.
*/
static void hrtick_update(struct rq *rq)
{
struct task_struct *curr = rq->curr;
if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
return;
hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
static inline void hrtick_update(struct rq *rq)
{
}
#endif
#ifdef CONFIG_SMP
static inline bool cpu_overutilized(int cpu)
{
unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
/* Return true only if the utilization doesn't fit CPU's capacity */
return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
}
static inline void update_overutilized_status(struct rq *rq)
{
if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
}
}
#else
static inline void update_overutilized_status(struct rq *rq) { }
#endif
/* Runqueue only has SCHED_IDLE tasks enqueued */
static int sched_idle_rq(struct rq *rq)
{
return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
rq->nr_running);
}
#ifdef CONFIG_SMP
static int sched_idle_cpu(int cpu)
{
return sched_idle_rq(cpu_rq(cpu));
}
#endif
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
* then put the task into the rbtree:
*/
static void
enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
int task_new = !(flags & ENQUEUE_WAKEUP);
/*
* The code below (indirectly) updates schedutil which looks at
* the cfs_rq utilization to select a frequency.
* Let's add the task's estimated utilization to the cfs_rq's
* estimated utilization, before we update schedutil.
*/
util_est_enqueue(&rq->cfs, p);
/*
* If in_iowait is set, the code below may not trigger any cpufreq
* utilization updates, so do it here explicitly with the IOWAIT flag
* passed.
*/
if (p->in_iowait)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, flags);
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto enqueue_throttle;
flags = ENQUEUE_WAKEUP;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);
update_cfs_group(se);
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto enqueue_throttle;
}
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1);
/*
* Since new tasks are assigned an initial util_avg equal to
* half of the spare capacity of their CPU, tiny tasks have the
* ability to cross the overutilized threshold, which will
* result in the load balancer ruining all the task placement
* done by EAS. As a way to mitigate that effect, do not account
* for the first enqueue operation of new tasks during the
* overutilized flag detection.
*
* A better way of solving this problem would be to wait for
* the PELT signals of tasks to converge before taking them
* into account, but that is not straightforward to implement,
* and the following generally works well enough in practice.
*/
if (!task_new)
update_overutilized_status(rq);
enqueue_throttle:
assert_list_leaf_cfs_rq(rq);
hrtick_update(rq);
}
static void set_next_buddy(struct sched_entity *se);
/*
* The dequeue_task method is called before nr_running is
* decreased. We remove the task from the rbtree and
* update the fair scheduling stats:
*/
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
cfs_rq->h_nr_running--;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto dequeue_throttle;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
/*
* Bias pick_next to pick a task from this cfs_rq, as
* p is sleeping when it is within its sched_slice.
*/
if (task_sleep && se && !throttled_hierarchy(cfs_rq))
set_next_buddy(se);
break;
}
flags |= DEQUEUE_SLEEP;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);
update_cfs_group(se);
cfs_rq->h_nr_running--;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
idle_h_nr_running = 1;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto dequeue_throttle;
}
/* At this point se is NULL and we are at root level*/
sub_nr_running(rq, 1);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
dequeue_throttle:
util_est_update(&rq->cfs, p, task_sleep);
hrtick_update(rq);
}
#ifdef CONFIG_SMP
/* Working cpumask for: load_balance, load_balance_newidle. */
static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
#ifdef CONFIG_NO_HZ_COMMON
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
int has_blocked; /* Idle CPUS has blocked load */
int needs_update; /* Newly idle CPUs need their next_balance collated */
unsigned long next_balance; /* in jiffy units */
unsigned long next_blocked; /* Next update of blocked load in jiffies */
} nohz ____cacheline_aligned;
#endif /* CONFIG_NO_HZ_COMMON */
static unsigned long cpu_load(struct rq *rq)
{
return cfs_rq_load_avg(&rq->cfs);
}
/*
* cpu_load_without - compute CPU load without any contributions from *p
* @cpu: the CPU which load is requested
* @p: the task which load should be discounted
*
* The load of a CPU is defined by the load of tasks currently enqueued on that
* CPU as well as tasks which are currently sleeping after an execution on that
* CPU.
*
* This method returns the load of the specified CPU by discounting the load of
* the specified task, whenever the task is currently contributing to the CPU
* load.
*/
static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq;
unsigned int load;
/* Task has no contribution or is new */
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
return cpu_load(rq);
cfs_rq = &rq->cfs;
load = READ_ONCE(cfs_rq->avg.load_avg);
/* Discount task's util from CPU's util */
lsub_positive(&load, task_h_load(p));
return load;
}
static unsigned long cpu_runnable(struct rq *rq)
{
return cfs_rq_runnable_avg(&rq->cfs);
}
static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq;
unsigned int runnable;
/* Task has no contribution or is new */
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
return cpu_runnable(rq);
cfs_rq = &rq->cfs;
runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
/* Discount task's runnable from CPU's runnable */
lsub_positive(&runnable, p->se.avg.runnable_avg);
return runnable;
}
static unsigned long capacity_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity;
}
static void record_wakee(struct task_struct *p)
{
/*
* Only decay a single time; tasks that have less then 1 wakeup per
* jiffy will not have built up many flips.
*/
if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
current->wakee_flips >>= 1;
current->wakee_flip_decay_ts = jiffies;
}
if (current->last_wakee != p) {
current->last_wakee = p;
current->wakee_flips++;
}
}
/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
*
* A waker of many should wake a different task than the one last awakened
* at a frequency roughly N times higher than one of its wakees.
*
* In order to determine whether we should let the load spread vs consolidating
* to shared cache, we look for a minimum 'flip' frequency of llc_size in one
* partner, and a factor of lls_size higher frequency in the other.
*
* With both conditions met, we can be relatively sure that the relationship is
* non-monogamous, with partner count exceeding socket size.
*
* Waker/wakee being client/server, worker/dispatcher, interrupt source or
* whatever is irrelevant, spread criteria is apparent partner count exceeds
* socket size.
*/
static int wake_wide(struct task_struct *p)
{
unsigned int master = current->wakee_flips;
unsigned int slave = p->wakee_flips;
int factor = __this_cpu_read(sd_llc_size);
if (master < slave)
swap(master, slave);
if (slave < factor || master < slave * factor)
return 0;
return 1;
}
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
* soonest. For the purpose of speed we only consider the waking and previous
* CPU.
*
* wake_affine_idle() - only considers 'now', it check if the waking CPU is
* cache-affine and is (or will be) idle.
*
* wake_affine_weight() - considers the weight to reflect the average
* scheduling latency of the CPUs. This seems to work
* for the overloaded case.
*/
static int
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
{
/*
* If this_cpu is idle, it implies the wakeup is from interrupt
* context. Only allow the move if cache is shared. Otherwise an
* interrupt intensive workload could force all tasks onto one
* node depending on the IO topology or IRQ affinity settings.
*
* If the prev_cpu is idle and cache affine then avoid a migration.
* There is no guarantee that the cache hot data from an interrupt
* is more important than cache hot data on the prev_cpu and from
* a cpufreq perspective, it's better to have higher utilisation
* on one CPU.
*/
if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
if (sync && cpu_rq(this_cpu)->nr_running == 1)
return this_cpu;
if (available_idle_cpu(prev_cpu))
return prev_cpu;
return nr_cpumask_bits;
}
static int
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
this_eff_load = cpu_load(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
if (current_load > this_eff_load)
return this_cpu;
this_eff_load -= current_load;
}
task_load = task_h_load(p);
this_eff_load += task_load;
if (sched_feat(WA_BIAS))
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
prev_eff_load = cpu_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= capacity_of(this_cpu);
/*
* If sync, adjust the weight of prev_eff_load such that if
* prev_eff == this_eff that select_idle_sibling() will consider
* stacking the wakee on top of the waker if no other CPU is
* idle.
*/
if (sync)
prev_eff_load += 1;
return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
}
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
int target = nr_cpumask_bits;
if (sched_feat(WA_IDLE))
target = wake_affine_idle(this_cpu, prev_cpu, sync);
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->stats.nr_wakeups_affine_attempts);
if (target != this_cpu)
return prev_cpu;
schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p->stats.nr_wakeups_affine);
return target;
}
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
/*
* find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
*/
static int
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
u64 latest_idle_timestamp = 0;
int least_loaded_cpu = this_cpu;
int shallowest_idle_cpu = -1;
int i;
/* Check if we have any choice: */
if (group->group_weight == 1)
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
struct rq *rq = cpu_rq(i);
if (!sched_core_cookie_match(rq, p))
continue;
if (sched_idle_cpu(i))
return i;
if (available_idle_cpu(i)) {
struct cpuidle_state *idle = idle_get_state(rq);
if (idle && idle->exit_latency < min_exit_latency) {
/*
* We give priority to a CPU whose idle state
* has the smallest exit latency irrespective
* of any idle timestamp.
*/
min_exit_latency = idle->exit_latency;
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
} else if ((!idle || idle->exit_latency == min_exit_latency) &&
rq->idle_stamp > latest_idle_timestamp) {
/*
* If equal or no active idle state, then
* the most recently idled CPU might have
* a warmer cache.
*/
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
load = cpu_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
}
}
}
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
int cpu, int prev_cpu, int sd_flag)
{
int new_cpu = cpu;
if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
return prev_cpu;
/*
* We need task's util for cpu_util_without, sync it up to
* prev_cpu's last_update_time.
*/
if (!(sd_flag & SD_BALANCE_FORK))
sync_entity_load_avg(&p->se);
while (sd) {
struct sched_group *group;
struct sched_domain *tmp;
int weight;
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_group_cpu(group, p, cpu);
if (new_cpu == cpu) {
/* Now try balancing at a lower domain level of 'cpu': */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of 'new_cpu': */
cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
}
return new_cpu;
}
static inline int __select_idle_cpu(int cpu, struct task_struct *p)
{
if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
sched_cpu_cookie_match(cpu_rq(cpu), p))
return cpu;
return -1;
}
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
struct sched_domain_shared *sds;
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds)
WRITE_ONCE(sds->has_idle_cores, val);
}
static inline bool test_idle_cores(int cpu)
{
struct sched_domain_shared *sds;
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds)
return READ_ONCE(sds->has_idle_cores);
return false;
}
/*
* Scans the local SMT mask to see if the entire core is idle, and records this
* information in sd_llc_shared->has_idle_cores.
*
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
void __update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
rcu_read_lock();
if (test_idle_cores(core))
goto unlock;
for_each_cpu(cpu, cpu_smt_mask(core)) {
if (cpu == core)
continue;
if (!available_idle_cpu(cpu))
goto unlock;
}
set_idle_cores(core, 1);
unlock:
rcu_read_unlock();
}
/*
* Scan the entire LLC domain for idle cores; this dynamically switches off if
* there are no idle cores left in the system; tracked through
* sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
*/
static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
bool idle = true;
int cpu;
for_each_cpu(cpu, cpu_smt_mask(core)) {
if (!available_idle_cpu(cpu)) {
idle = false;
if (*idle_cpu == -1) {
if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
*idle_cpu = cpu;
break;
}
continue;
}
break;
}
if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
*idle_cpu = cpu;
}
if (idle)
return core;
cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
return -1;
}
/*
* Scan the local SMT mask for idle CPUs.
*/
static int select_idle_smt(struct task_struct *p, int target)
{
int cpu;
for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
if (cpu == target)
continue;
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
return cpu;
}
return -1;
}
#else /* CONFIG_SCHED_SMT */
static inline void set_idle_cores(int cpu, int val)
{
}
static inline bool test_idle_cores(int cpu)
{
return false;
}
static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
return __select_idle_cpu(core, p);
}
static inline int select_idle_smt(struct task_struct *p, int target)
{
return -1;
}
#endif /* CONFIG_SCHED_SMT */
/*
* Scan the LLC domain for idle CPUs; this is dynamically regulated by
* comparing the average scan cost (tracked in sd->avg_scan_cost) against the
* average idle time for this rq (as found in rq->avg_idle).
*/
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
int i, cpu, idle_cpu = -1, nr = INT_MAX;
struct sched_domain_shared *sd_share;
struct rq *this_rq = this_rq();
int this = smp_processor_id();
struct sched_domain *this_sd = NULL;
u64 time = 0;
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
if (sched_feat(SIS_PROP) && !has_idle_core) {
u64 avg_cost, avg_idle, span_avg;
unsigned long now = jiffies;
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
if (!this_sd)
return -1;
/*
* If we're busy, the assumption that the last idle period
* predicts the future is flawed; age away the remaining
* predicted idle time.
*/
if (unlikely(this_rq->wake_stamp < now)) {
while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
this_rq->wake_stamp++;
this_rq->wake_avg_idle >>= 1;
}
}
avg_idle = this_rq->wake_avg_idle;
avg_cost = this_sd->avg_scan_cost + 1;
span_avg = sd->span_weight * avg_idle;
if (span_avg > 4*avg_cost)
nr = div_u64(span_avg, avg_cost);
else
nr = 4;
time = cpu_clock(this);
}
if (sched_feat(SIS_UTIL)) {
sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
if (sd_share) {
/* because !--nr is the condition to stop scan */
nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
/* overloaded LLC is unlikely to have idle cpu/core */
if (nr == 1)
return -1;
}
}
for_each_cpu_wrap(cpu, cpus, target + 1) {
if (has_idle_core) {
i = select_idle_core(p, cpu, cpus, &idle_cpu);
if ((unsigned int)i < nr_cpumask_bits)
return i;
} else {
if (!--nr)
return -1;
idle_cpu = __select_idle_cpu(cpu, p);
if ((unsigned int)idle_cpu < nr_cpumask_bits)
break;
}
}
if (has_idle_core)
set_idle_cores(target, false);
if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
time = cpu_clock(this) - time;
/*
* Account for the scan cost of wakeups against the average
* idle time.
*/
this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
update_avg(&this_sd->avg_scan_cost, time);
}
return idle_cpu;
}
/*
* Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
* the task fits. If no CPU is big enough, but there are idle ones, try to
* maximize capacity.
*/
static int
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
{
unsigned long task_util, util_min, util_max, best_cap = 0;
int fits, best_fits = 0;
int cpu, best_cpu = -1;
struct cpumask *cpus;
cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
for_each_cpu_wrap(cpu, cpus, target) {
unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
continue;
fits = util_fits_cpu(task_util, util_min, util_max, cpu);
/* This CPU fits with all requirements */
if (fits > 0)
return cpu;
/*
* Only the min performance hint (i.e. uclamp_min) doesn't fit.
* Look for the CPU with best capacity.
*/
else if (fits < 0)
cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
/*
* First, select CPU which fits better (-1 being better than 0).
* Then, select the one with best capacity at same level.
*/
if ((fits < best_fits) ||
((fits == best_fits) && (cpu_cap > best_cap))) {
best_cap = cpu_cap;
best_cpu = cpu;
best_fits = fits;
}
}
return best_cpu;
}
static inline bool asym_fits_cpu(unsigned long util,
unsigned long util_min,
unsigned long util_max,
int cpu)
{
if (sched_asym_cpucap_active())
/*
* Return true only if the cpu fully fits the task requirements
* which include the utilization and the performance hints.
*/
return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
return true;
}
/*
* Try and locate an idle core/thread in the LLC cache domain.
*/
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
bool has_idle_core = false;
struct sched_domain *sd;
unsigned long task_util, util_min, util_max;
int i, recent_used_cpu;
/*
* On asymmetric system, update task utilization because we will check
* that the task fits with cpu's capacity.
*/
if (sched_asym_cpucap_active()) {
sync_entity_load_avg(&p->se);
task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
}
/*
* per-cpu select_rq_mask usage
*/
lockdep_assert_irqs_disabled();
if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
asym_fits_cpu(task_util, util_min, util_max, target))
return target;
/*
* If the previous CPU is cache affine and idle, don't be stupid:
*/
if (prev != target && cpus_share_cache(prev, target) &&
(available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
asym_fits_cpu(task_util, util_min, util_max, prev))
return prev;
/*
* Allow a per-cpu kthread to stack with the wakee if the
* kworker thread and the tasks previous CPUs are the same.
* The assumption is that the wakee queued work for the
* per-cpu kthread that is now complete and the wakeup is
* essentially a sync wakeup. An obvious example of this
* pattern is IO completions.
*/
if (is_per_cpu_kthread(current) &&
in_task() &&
prev == smp_processor_id() &&
this_rq()->nr_running <= 1 &&
asym_fits_cpu(task_util, util_min, util_max, prev)) {
return prev;
}
/* Check a recently used CPU as a potential idle candidate: */
recent_used_cpu = p->recent_used_cpu;
p->recent_used_cpu = prev;
if (recent_used_cpu != prev &&
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
(available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
return recent_used_cpu;
}
/*
* For asymmetric CPU capacity systems, our domain of interest is
* sd_asym_cpucapacity rather than sd_llc.
*/
if (sched_asym_cpucap_active()) {
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
/*
* On an asymmetric CPU capacity system where an exclusive
* cpuset defines a symmetric island (i.e. one unique
* capacity_orig value through the cpuset), the key will be set
* but the CPUs within that cpuset will not have a domain with
* SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
* capacity path.
*/
if (sd) {
i = select_idle_capacity(p, sd, target);
return ((unsigned)i < nr_cpumask_bits) ? i : target;
}
}
sd = rcu_dereference(per_cpu(sd_llc, target));
if (!sd)
return target;
if (sched_smt_active()) {
has_idle_core = test_idle_cores(target);
if (!has_idle_core && cpus_share_cache(prev, target)) {
i = select_idle_smt(p, prev);
if ((unsigned int)i < nr_cpumask_bits)
return i;
}
}
i = select_idle_cpu(p, sd, has_idle_core, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
return target;
}
/**
* cpu_util() - Estimates the amount of CPU capacity used by CFS tasks.
* @cpu: the CPU to get the utilization for
* @p: task for which the CPU utilization should be predicted or NULL
* @dst_cpu: CPU @p migrates to, -1 if @p moves from @cpu or @p == NULL
* @boost: 1 to enable boosting, otherwise 0
*
* The unit of the return value must be the same as the one of CPU capacity
* so that CPU utilization can be compared with CPU capacity.
*
* CPU utilization is the sum of running time of runnable tasks plus the
* recent utilization of currently non-runnable tasks on that CPU.
* It represents the amount of CPU capacity currently used by CFS tasks in
* the range [0..max CPU capacity] with max CPU capacity being the CPU
* capacity at f_max.
*
* The estimated CPU utilization is defined as the maximum between CPU
* utilization and sum of the estimated utilization of the currently
* runnable tasks on that CPU. It preserves a utilization "snapshot" of
* previously-executed tasks, which helps better deduce how busy a CPU will
* be when a long-sleeping task wakes up. The contribution to CPU utilization
* of such a task would be significantly decayed at this point of time.
*
* Boosted CPU utilization is defined as max(CPU runnable, CPU utilization).
* CPU contention for CFS tasks can be detected by CPU runnable > CPU
* utilization. Boosting is implemented in cpu_util() so that internal
* users (e.g. EAS) can use it next to external users (e.g. schedutil),
* latter via cpu_util_cfs_boost().
*
* CPU utilization can be higher than the current CPU capacity
* (f_curr/f_max * max CPU capacity) or even the max CPU capacity because
* of rounding errors as well as task migrations or wakeups of new tasks.
* CPU utilization has to be capped to fit into the [0..max CPU capacity]
* range. Otherwise a group of CPUs (CPU0 util = 121% + CPU1 util = 80%)
* could be seen as over-utilized even though CPU1 has 20% of spare CPU
* capacity. CPU utilization is allowed to overshoot current CPU capacity
* though since this is useful for predicting the CPU capacity required
* after task migrations (scheduler-driven DVFS).
*
* Return: (Boosted) (estimated) utilization for the specified CPU.
*/
static unsigned long
cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
{
struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
unsigned long runnable;
if (boost) {
runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
util = max(util, runnable);
}
/*
* If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
* contribution. If @p migrates from another CPU to @cpu add its
* contribution. In all the other cases @cpu is not impacted by the
* migration so its util_avg is already correct.
*/
if (p && task_cpu(p) == cpu && dst_cpu != cpu)
lsub_positive(&util, task_util(p));
else if (p && task_cpu(p) != cpu && dst_cpu == cpu)
util += task_util(p);
if (sched_feat(UTIL_EST)) {
unsigned long util_est;
util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
/*
* During wake-up @p isn't enqueued yet and doesn't contribute
* to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
* If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
* has been enqueued.
*
* During exec (@dst_cpu = -1) @p is enqueued and does
* contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
* Remove it to "simulate" cpu_util without @p's contribution.
*
* Despite the task_on_rq_queued(@p) check there is still a
* small window for a possible race when an exec
* select_task_rq_fair() races with LB's detach_task().
*
* detach_task()
* deactivate_task()
* p->on_rq = TASK_ON_RQ_MIGRATING;
* -------------------------------- A
* dequeue_task() \
* dequeue_task_fair() + Race Time
* util_est_dequeue() /
* -------------------------------- B
*
* The additional check "current == p" is required to further
* reduce the race window.
*/
if (dst_cpu == cpu)
util_est += _task_util_est(p);
else if (p && unlikely(task_on_rq_queued(p) || current == p))
lsub_positive(&util_est, _task_util_est(p));
util = max(util, util_est);
}
return min(util, capacity_orig_of(cpu));
}
unsigned long cpu_util_cfs(int cpu)
{
return cpu_util(cpu, NULL, -1, 0);
}
unsigned long cpu_util_cfs_boost(int cpu)
{
return cpu_util(cpu, NULL, -1, 1);
}
/*
* cpu_util_without: compute cpu utilization without any contributions from *p
* @cpu: the CPU which utilization is requested
* @p: the task which utilization should be discounted
*
* The utilization of a CPU is defined by the utilization of tasks currently
* enqueued on that CPU as well as tasks which are currently sleeping after an
* execution on that CPU.
*
* This method returns the utilization of the specified CPU by discounting the
* utilization of the specified task, whenever the task is currently
* contributing to the CPU utilization.
*/
static unsigned long cpu_util_without(int cpu, struct task_struct *p)
{
/* Task has no contribution or is new */
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
p = NULL;
return cpu_util(cpu, p, -1, 0);
}
/*
* energy_env - Utilization landscape for energy estimation.
* @task_busy_time: Utilization contribution by the task for which we test the
* placement. Given by eenv_task_busy_time().
* @pd_busy_time: Utilization of the whole perf domain without the task
* contribution. Given by eenv_pd_busy_time().
* @cpu_cap: Maximum CPU capacity for the perf domain.
* @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
*/
struct energy_env {
unsigned long task_busy_time;
unsigned long pd_busy_time;
unsigned long cpu_cap;
unsigned long pd_cap;
};
/*
* Compute the task busy time for compute_energy(). This time cannot be
* injected directly into effective_cpu_util() because of the IRQ scaling.
* The latter only makes sense with the most recent CPUs where the task has
* run.
*/
static inline void eenv_task_busy_time(struct energy_env *eenv,
struct task_struct *p, int prev_cpu)
{
unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu);
unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu));
if (unlikely(irq >= max_cap))
busy_time = max_cap;
else
busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap);
eenv->task_busy_time = busy_time;
}
/*
* Compute the perf_domain (PD) busy time for compute_energy(). Based on the
* utilization for each @pd_cpus, it however doesn't take into account
* clamping since the ratio (utilization / cpu_capacity) is already enough to
* scale the EM reported power consumption at the (eventually clamped)
* cpu_capacity.
*
* The contribution of the task @p for which we want to estimate the
* energy cost is removed (by cpu_util()) and must be calculated
* separately (see eenv_task_busy_time). This ensures:
*
* - A stable PD utilization, no matter which CPU of that PD we want to place
* the task on.
*
* - A fair comparison between CPUs as the task contribution (task_util())
* will always be the same no matter which CPU utilization we rely on
* (util_avg or util_est).
*
* Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
* exceed @eenv->pd_cap.
*/
static inline void eenv_pd_busy_time(struct energy_env *eenv,
struct cpumask *pd_cpus,
struct task_struct *p)
{
unsigned long busy_time = 0;
int cpu;
for_each_cpu(cpu, pd_cpus) {
unsigned long util = cpu_util(cpu, p, -1, 0);
busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
}
eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
}
/*
* Compute the maximum utilization for compute_energy() when the task @p
* is placed on the cpu @dst_cpu.
*
* Returns the maximum utilization among @eenv->cpus. This utilization can't
* exceed @eenv->cpu_cap.
*/
static inline unsigned long
eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
struct task_struct *p, int dst_cpu)
{
unsigned long max_util = 0;
int cpu;
for_each_cpu(cpu, pd_cpus) {
struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
unsigned long util = cpu_util(cpu, p, dst_cpu, 1);
unsigned long eff_util;
/*
* Performance domain frequency: utilization clamping
* must be considered since it affects the selection
* of the performance domain frequency.
* NOTE: in case RT tasks are running, by default the
* FREQUENCY_UTIL's utilization can be max OPP.
*/
eff_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
max_util = max(max_util, eff_util);
}
return min(max_util, eenv->cpu_cap);
}
/*
* compute_energy(): Use the Energy Model to estimate the energy that @pd would
* consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
* contribution is ignored.
*/
static inline unsigned long
compute_energy(struct energy_env *eenv, struct perf_domain *pd,
struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
{
unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
unsigned long busy_time = eenv->pd_busy_time;
if (dst_cpu >= 0)
busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
}
/*
* find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
* waking task. find_energy_efficient_cpu() looks for the CPU with maximum
* spare capacity in each performance domain and uses it as a potential
* candidate to execute the task. Then, it uses the Energy Model to figure
* out which of the CPU candidates is the most energy-efficient.
*
* The rationale for this heuristic is as follows. In a performance domain,
* all the most energy efficient CPU candidates (according to the Energy
* Model) are those for which we'll request a low frequency. When there are
* several CPUs for which the frequency request will be the same, we don't
* have enough data to break the tie between them, because the Energy Model
* only includes active power costs. With this model, if we assume that
* frequency requests follow utilization (e.g. using schedutil), the CPU with
* the maximum spare capacity in a performance domain is guaranteed to be among
* the best candidates of the performance domain.
*
* In practice, it could be preferable from an energy standpoint to pack
* small tasks on a CPU in order to let other CPUs go in deeper idle states,
* but that could also hurt our chances to go cluster idle, and we have no
* ways to tell with the current Energy Model if this is actually a good
* idea or not. So, find_energy_efficient_cpu() basically favors
* cluster-packing, and spreading inside a cluster. That should at least be
* a good thing for latency, and this is consistent with the idea that most
* of the energy savings of EAS come from the asymmetry of the system, and
* not so much from breaking the tie between identical CPUs. That's also the
* reason why EAS is enabled in the topology code only for systems where
* SD_ASYM_CPUCAPACITY is set.
*
* NOTE: Forkees are not accepted in the energy-aware wake-up path because
* they don't have any useful utilization data yet and it's not possible to
* forecast their impact on energy consumption. Consequently, they will be
* placed by find_idlest_cpu() on the least loaded CPU, which might turn out
* to be energy-inefficient in some use-cases. The alternative would be to
* bias new tasks towards specific types of CPUs first, or to try to infer
* their util_avg from the parent task, but those heuristics could hurt
* other use-cases too. So, until someone finds a better way to solve this,
* let's keep things simple by re-using the existing slow path.
*/
static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
struct root_domain *rd = this_rq()->rd;
int cpu, best_energy_cpu, target = -1;
int prev_fits = -1, best_fits = -1;
unsigned long best_thermal_cap = 0;
unsigned long prev_thermal_cap = 0;
struct sched_domain *sd;
struct perf_domain *pd;
struct energy_env eenv;
rcu_read_lock();
pd = rcu_dereference(rd->pd);
if (!pd || READ_ONCE(rd->overutilized))
goto unlock;
/*
* Energy-aware wake-up happens on the lowest sched_domain starting
* from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
*/
sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
sd = sd->parent;
if (!sd)
goto unlock;
target = prev_cpu;
sync_entity_load_avg(&p->se);
if (!uclamp_task_util(p, p_util_min, p_util_max))
goto unlock;
eenv_task_busy_time(&eenv, p, prev_cpu);
for (; pd; pd = pd->next) {
unsigned long util_min = p_util_min, util_max = p_util_max;
unsigned long cpu_cap, cpu_thermal_cap, util;
unsigned long cur_delta, max_spare_cap = 0;
unsigned long rq_util_min, rq_util_max;
unsigned long prev_spare_cap = 0;
int max_spare_cap_cpu = -1;
unsigned long base_energy;
int fits, max_fits = -1;
cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
if (cpumask_empty(cpus))
continue;
/* Account thermal pressure for the energy estimation */
cpu = cpumask_first(cpus);
cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
eenv.cpu_cap = cpu_thermal_cap;
eenv.pd_cap = 0;
for_each_cpu(cpu, cpus) {
struct rq *rq = cpu_rq(cpu);
eenv.pd_cap += cpu_thermal_cap;
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
continue;
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
util = cpu_util(cpu, p, cpu, 0);
cpu_cap = capacity_of(cpu);
/*
* Skip CPUs that cannot satisfy the capacity request.
* IOW, placing the task there would make the CPU
* overutilized. Take uclamp into account to see how
* much capacity we can get out of the CPU; this is
* aligned with sched_cpu_util().
*/
if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
/*
* Open code uclamp_rq_util_with() except for
* the clamp() part. Ie: apply max aggregation
* only. util_fits_cpu() logic requires to
* operate on non clamped util but must use the
* max-aggregated uclamp_{min, max}.
*/
rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
util_min = max(rq_util_min, p_util_min);
util_max = max(rq_util_max, p_util_max);
}
fits = util_fits_cpu(util, util_min, util_max, cpu);
if (!fits)
continue;
lsub_positive(&cpu_cap, util);
if (cpu == prev_cpu) {
/* Always use prev_cpu as a candidate. */
prev_spare_cap = cpu_cap;
prev_fits = fits;
} else if ((fits > max_fits) ||
((fits == max_fits) && (cpu_cap > max_spare_cap))) {
/*
* Find the CPU with the maximum spare capacity
* among the remaining CPUs in the performance
* domain.
*/
max_spare_cap = cpu_cap;
max_spare_cap_cpu = cpu;
max_fits = fits;
}
}
if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
continue;
eenv_pd_busy_time(&eenv, cpus, p);
/* Compute the 'base' energy of the pd, without @p */
base_energy = compute_energy(&eenv, pd, cpus, p, -1);
/* Evaluate the energy impact of using prev_cpu. */
if (prev_spare_cap > 0) {
prev_delta = compute_energy(&eenv, pd, cpus, p,
prev_cpu);
/* CPU utilization has changed */
if (prev_delta < base_energy)
goto unlock;
prev_delta -= base_energy;
prev_thermal_cap = cpu_thermal_cap;
best_delta = min(best_delta, prev_delta);
}
/* Evaluate the energy impact of using max_spare_cap_cpu. */
if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
/* Current best energy cpu fits better */
if (max_fits < best_fits)
continue;
/*
* Both don't fit performance hint (i.e. uclamp_min)
* but best energy cpu has better capacity.
*/
if ((max_fits < 0) &&
(cpu_thermal_cap <= best_thermal_cap))
continue;
cur_delta = compute_energy(&eenv, pd, cpus, p,
max_spare_cap_cpu);
/* CPU utilization has changed */
if (cur_delta < base_energy)
goto unlock;
cur_delta -= base_energy;
/*
* Both fit for the task but best energy cpu has lower
* energy impact.
*/
if ((max_fits > 0) && (best_fits > 0) &&
(cur_delta >= best_delta))
continue;
best_delta = cur_delta;
best_energy_cpu = max_spare_cap_cpu;
best_fits = max_fits;
best_thermal_cap = cpu_thermal_cap;
}
}
rcu_read_unlock();
if ((best_fits > prev_fits) ||
((best_fits > 0) && (best_delta < prev_delta)) ||
((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
target = best_energy_cpu;
return target;
unlock:
rcu_read_unlock();
return target;
}
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
*
* Balances load by selecting the idlest CPU in the idlest group, or under
* certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
*
* Returns the target CPU number.
*/
static int
select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
{
int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
struct sched_domain *tmp, *sd = NULL;
int cpu = smp_processor_id();
int new_cpu = prev_cpu;
int want_affine = 0;
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
/*
* required for stable ->cpus_allowed
*/
lockdep_assert_held(&p->pi_lock);
if (wake_flags & WF_TTWU) {
record_wakee(p);
if ((wake_flags & WF_CURRENT_CPU) &&
cpumask_test_cpu(cpu, p->cpus_ptr))
return cpu;
if (sched_energy_enabled()) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
if (new_cpu >= 0)
return new_cpu;
new_cpu = prev_cpu;
}
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
rcu_read_lock();
for_each_domain(cpu, tmp) {
/*
* If both 'cpu' and 'prev_cpu' are part of this domain,
* cpu is a valid SD_WAKE_AFFINE target.
*/
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
if (cpu != prev_cpu)
new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
sd = NULL; /* Prefer wake_affine over balance flags */
break;
}
/*
* Usually only true for WF_EXEC and WF_FORK, as sched_domains
* usually do not have SD_BALANCE_WAKE set. That means wakeup
* will usually go to the fast path.
*/
if (tmp->flags & sd_flag)
sd = tmp;
else if (!want_affine)
break;
}
if (unlikely(sd)) {
/* Slow path */
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
} else if (wake_flags & WF_TTWU) { /* XXX always ? */
/* Fast path */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
}
rcu_read_unlock();
return new_cpu;
}
/*
* Called immediately before a task is migrated to a new CPU; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the
* previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
struct sched_entity *se = &p->se;
if (!task_on_rq_migrating(p)) {
remove_entity_load_avg(se);
/*
* Here, the task's PELT values have been updated according to
* the current rq's clock. But if that clock hasn't been
* updated in a while, a substantial idle time will be missed,
* leading to an inflation after wake-up on the new rq.
*
* Estimate the missing time from the cfs_rq last_update_time
* and update sched_avg to improve the PELT continuity after
* migration.
*/
migrate_se_pelt_lag(se);
}
/* Tell new CPU we are migrated */
se->avg.last_update_time = 0;
update_scan_period(p, new_cpu);
}
static void task_dead_fair(struct task_struct *p)
{
remove_entity_load_avg(&p->se);
}
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
if (rq->nr_running)
return 1;
return newidle_balance(rq, rf) != 0;
}
#endif /* CONFIG_SMP */
static void set_next_buddy(struct sched_entity *se)
{
for_each_sched_entity(se) {
if (SCHED_WARN_ON(!se->on_rq))
return;
if (se_is_idle(se))
return;
cfs_rq_of(se)->next = se;
}
}
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int next_buddy_marked = 0;
int cse_is_idle, pse_is_idle;
if (unlikely(se == pse))
return;
/*
* This is possible from callers such as attach_tasks(), in which we
* unconditionally check_preempt_curr() after an enqueue (which may have
* lead to a throttle). This both saves work and prevents false
* next-buddy nomination below.
*/
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
/*
* We can come here with TIF_NEED_RESCHED already set from new task
* wake up path.
*
* Note: this also catches the edge-case of curr being in a throttled
* group (e.g. via set_curr_task), since update_curr() (in the
* enqueue of curr) will have resulted in resched being set. This
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
if (test_tsk_need_resched(curr))
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(task_has_idle_policy(curr)) &&
likely(!task_has_idle_policy(p)))
goto preempt;
/*
* Batch and idle tasks do not preempt non-idle tasks (their preemption
* is driven by the tick):
*/
if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
WARN_ON_ONCE(!pse);
cse_is_idle = se_is_idle(se);
pse_is_idle = se_is_idle(pse);
/*
* Preempt an idle group in favor of a non-idle group (and don't preempt
* in the inverse case).
*/
if (cse_is_idle && !pse_is_idle)
goto preempt;
if (cse_is_idle != pse_is_idle)
return;
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
if (pick_eevdf(cfs_rq) == pse)
goto preempt;
return;
preempt:
resched_curr(rq);
}
#ifdef CONFIG_SMP
static struct task_struct *pick_task_fair(struct rq *rq)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
again:
cfs_rq = &rq->cfs;
if (!cfs_rq->nr_running)
return NULL;
do {
struct sched_entity *curr = cfs_rq->curr;
/* When we pick for a remote RQ, we'll not have done put_prev_entity() */
if (curr) {
if (curr->on_rq)
update_curr(cfs_rq);
else
curr = NULL;
if (unlikely(check_cfs_rq_runtime(cfs_rq)))
goto again;
}
se = pick_next_entity(cfs_rq, curr);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
return task_of(se);
}
#endif
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
if (!sched_fair_runnable(rq))
goto idle;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (!prev || prev->sched_class != &fair_sched_class)
goto simple;
/*
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
* likely that a next task is from the same cgroup as the current.
*
* Therefore attempt to avoid putting and setting the entire cgroup
* hierarchy, only change the part that actually changes.
*/
do {
struct sched_entity *curr = cfs_rq->curr;
/*
* Since we got here without doing put_prev_entity() we also
* have to consider cfs_rq->curr. If it is still a runnable
* entity, update_curr() will update its vruntime, otherwise
* forget we've ever seen it.
*/
if (curr) {
if (curr->on_rq)
update_curr(cfs_rq);
else
curr = NULL;
/*
* This call to check_cfs_rq_runtime() will do the
* throttle and dequeue its entity in the parent(s).
* Therefore the nr_running test will indeed
* be correct.
*/
if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
cfs_rq = &rq->cfs;
if (!cfs_rq->nr_running)
goto idle;
goto simple;
}
}
se = pick_next_entity(cfs_rq, curr);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
/*
* Since we haven't yet done put_prev_entity and if the selected task
* is a different task than we started out with, try and touch the
* least amount of cfs_rqs.
*/
if (prev != p) {
struct sched_entity *pse = &prev->se;
while (!(cfs_rq = is_same_group(se, pse))) {
int se_depth = se->depth;
int pse_depth = pse->depth;
if (se_depth <= pse_depth) {
put_prev_entity(cfs_rq_of(pse), pse);
pse = parent_entity(pse);
}
if (se_depth >= pse_depth) {
set_next_entity(cfs_rq_of(se), se);
se = parent_entity(se);
}
}
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
}
goto done;
simple:
#endif
if (prev)
put_prev_task(rq, prev);
do {
se = pick_next_entity(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
done: __maybe_unused;
#ifdef CONFIG_SMP
/*
* Move the next running task to the front of
* the list, so our cfs_tasks list becomes MRU
* one.
*/
list_move(&p->se.group_node, &rq->cfs_tasks);
#endif
if (hrtick_enabled_fair(rq))
hrtick_start_fair(rq, p);
update_misfit_status(p, rq);
sched_fair_update_stop_tick(rq, p);
return p;
idle:
if (!rf)
return NULL;
new_tasks = newidle_balance(rq, rf);
/*
* Because newidle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
* must re-start the pick_next_entity() loop.
*/
if (new_tasks < 0)
return RETRY_TASK;
if (new_tasks > 0)
goto again;
/*
* rq is about to be idle, check if we need to update the
* lost_idle_time of clock_pelt
*/
update_idle_rq_clock_pelt(rq);
return NULL;
}
static struct task_struct *__pick_next_task_fair(struct rq *rq)
{
return pick_next_task_fair(rq, NULL, NULL);
}
/*
* Account for a descheduled task:
*/
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
put_prev_entity(cfs_rq, se);
}
}
/*
* sched_yield() is very simple
*/
static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;
/*
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
return;
clear_buddies(cfs_rq, se);
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
se->deadline += calc_delta_fair(se->slice, se);
}
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
/* throttled hierarchies are not runnable */
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
yield_task_fair(rq);
return true;
}
#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods.
*
* BASICS
*
* The purpose of load-balancing is to achieve the same basic fairness the
* per-CPU scheduler provides, namely provide a proportional amount of compute
* time to each task. This is expressed in the following equation:
*
* W_i,n/P_i == W_j,n/P_j for all i,j (1)
*
* Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
* W_i,0 is defined as:
*
* W_i,0 = \Sum_j w_i,j (2)
*
* Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
* is derived from the nice value as per sched_prio_to_weight[].
*
* The weight average is an exponential decay average of the instantaneous
* weight:
*
* W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
*
* C_i is the compute capacity of CPU i, typically it is the
* fraction of 'recent' time available for SCHED_OTHER task execution. But it
* can also include other factors [XXX].
*
* To achieve this balance we define a measure of imbalance which follows
* directly from (1):
*
* imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
*
* We them move tasks around to minimize the imbalance. In the continuous
* function space it is obvious this converges, in the discrete case we get
* a few fun cases generally called infeasible weight scenarios.
*
* [XXX expand on:
* - infeasible weights;
* - local vs global optima in the discrete case. ]
*
*
* SCHED DOMAINS
*
* In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
* for all i,j solution, we create a tree of CPUs that follows the hardware
* topology where each level pairs two lower groups (or better). This results
* in O(log n) layers. Furthermore we reduce the number of CPUs going up the
* tree to only the first of the previous level and we decrease the frequency
* of load-balance at each level inv. proportional to the number of CPUs in
* the groups.
*
* This yields:
*
* log_2 n 1 n
* \Sum { --- * --- * 2^i } = O(n) (5)
* i = 0 2^i 2^i
* `- size of each group
* | | `- number of CPUs doing load-balance
* | `- freq
* `- sum over all levels
*
* Coupled with a limit on how many tasks we can migrate every balance pass,
* this makes (5) the runtime complexity of the balancer.
*
* An important property here is that each CPU is still (indirectly) connected
* to every other CPU in at most O(log n) steps:
*
* The adjacency matrix of the resulting graph is given by:
*
* log_2 n
* A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
* k = 0
*
* And you'll find that:
*
* A^(log_2 n)_i,j != 0 for all i,j (7)
*
* Showing there's indeed a path between every CPU in at most O(log n) steps.
* The task movement gives a factor of O(m), giving a convergence complexity
* of:
*
* O(nm log n), n := nr_cpus, m := nr_tasks (8)
*
*
* WORK CONSERVING
*
* In order to avoid CPUs going idle while there's still work to do, new idle
* balancing is more aggressive and has the newly idle CPU iterate up the domain
* tree itself instead of relying on other CPUs to bring it work.
*
* This adds some complexity to both (5) and (8) but it reduces the total idle
* time.
*
* [XXX more?]
*
*
* CGROUPS
*
* Cgroups make a horror show out of (2), instead of a simple sum we get:
*
* s_k,i
* W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
* S_k
*
* Where
*
* s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
*
* w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
*
* The big problem is S_k, its a global sum needed to compute a local (W_i)
* property.
*
* [XXX write more on how we solve this.. _after_ merging pjt's patches that
* rewrite all of this once again.]
*/
static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
/*
* 'group_type' describes the group of CPUs at the moment of load balancing.
*
* The enum is ordered by pulling priority, with the group with lowest priority
* first so the group_type can simply be compared when selecting the busiest
* group. See update_sd_pick_busiest().
*/
enum group_type {
/* The group has spare capacity that can be used to run more tasks. */
group_has_spare = 0,
/*
* The group is fully used and the tasks don't compete for more CPU
* cycles. Nevertheless, some tasks might wait before running.
*/
group_fully_busy,
/*
* One task doesn't fit with CPU's capacity and must be migrated to a
* more powerful CPU.
*/
group_misfit_task,
/*
* Balance SMT group that's fully busy. Can benefit from migration
* a task on SMT with busy sibling to another CPU on idle core.
*/
group_smt_balance,
/*
* SD_ASYM_PACKING only: One local CPU with higher capacity is available,
* and the task should be migrated to it instead of running on the
* current CPU.
*/
group_asym_packing,
/*
* The tasks' affinity constraints previously prevented the scheduler
* from balancing the load across the system.
*/
group_imbalanced,
/*
* The CPU is overloaded and can't provide expected CPU cycles to all
* tasks.
*/
group_overloaded
};
enum migration_type {
migrate_load = 0,
migrate_util,
migrate_task,
migrate_misfit
};
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
#define LBF_ACTIVE_LB 0x10
struct lb_env {
struct sched_domain *sd;
struct rq *src_rq;
int src_cpu;
int dst_cpu;
struct rq *dst_rq;
struct cpumask *dst_grpmask;
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
unsigned int flags;
unsigned int loop;
unsigned int loop_break;
unsigned int loop_max;
enum fbq_type fbq_type;
enum migration_type migration_type;
struct list_head tasks;
};
/*
* Is this task likely cache-hot:
*/
static int task_hot(struct task_struct *p, struct lb_env *env)
{
s64 delta;
lockdep_assert_rq_held(env->src_rq);
if (p->sched_class != &fair_sched_class)
return 0;
if (unlikely(task_has_idle_policy(p)))
return 0;
/* SMT siblings share cache */
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
/*
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next))
return 1;
if (sysctl_sched_migration_cost == -1)
return 1;
/*
* Don't migrate task if the task's cookie does not match
* with the destination CPU's core cookie.
*/
if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
return 1;
if (sysctl_sched_migration_cost == 0)
return 0;
delta = rq_clock_task(env->src_rq) - p->se.exec_start;
return delta < (s64)sysctl_sched_migration_cost;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns 1, if task migration degrades locality
* Returns 0, if task migration improves locality i.e migration preferred.
* Returns -1, if task migration is not affected by locality.
*/
static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
struct numa_group *numa_group = rcu_dereference(p->numa_group);
unsigned long src_weight, dst_weight;
int src_nid, dst_nid, dist;
if (!static_branch_likely(&sched_numa_balancing))
return -1;
if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
return -1;
src_nid = cpu_to_node(env->src_cpu);
dst_nid = cpu_to_node(env->dst_cpu);
if (src_nid == dst_nid)
return -1;
/* Migrating away from the preferred node is always bad. */
if (src_nid == p->numa_preferred_nid) {
if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
return 1;
else
return -1;
}
/* Encourage migration to the preferred node. */
if (dst_nid == p->numa_preferred_nid)
return 0;
/* Leaving a core idle is often worse than degrading locality. */
if (env->idle == CPU_IDLE)
return -1;
dist = node_distance(src_nid, dst_nid);
if (numa_group) {
src_weight = group_weight(p, src_nid, dist);
dst_weight = group_weight(p, dst_nid, dist);
} else {
src_weight = task_weight(p, src_nid, dist);
dst_weight = task_weight(p, dst_nid, dist);
}
return dst_weight < src_weight;
}
#else
static inline int migrate_degrades_locality(struct task_struct *p,
struct lb_env *env)
{
return -1;
}
#endif
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
lockdep_assert_rq_held(env->src_rq);
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
* 2) cannot be migrated to this CPU due to cpus_ptr, or
* 3) running (obviously), or
* 4) are cache-hot on their current CPU.
*/
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
/* Disregard pcpu kthreads; they are where they need to be. */
if (kthread_is_per_cpu(p))
return 0;
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
int cpu;
schedstat_inc(p->stats.nr_failed_migrations_affine);
env->flags |= LBF_SOME_PINNED;
/*
* Remember if this task can be migrated to any other CPU in
* our sched_group. We may want to revisit it if we couldn't
* meet load balance goals by pulling other tasks on src_cpu.
*
* Avoid computing new_dst_cpu
* - for NEWLY_IDLE
* - if we have already computed one in current iteration
* - if it's an active balance
*/
if (env->idle == CPU_NEWLY_IDLE ||
env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
return 0;
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
}
}
return 0;
}
/* Record that we found at least one task that could run on dst_cpu */
env->flags &= ~LBF_ALL_PINNED;
if (task_on_cpu(env->src_rq, p)) {
schedstat_inc(p->stats.nr_failed_migrations_running);
return 0;
}
/*
* Aggressive migration if:
* 1) active balance
* 2) destination numa is preferred
* 3) task is cache cold, or
* 4) too many balance attempts have failed.
*/
if (env->flags & LBF_ACTIVE_LB)
return 1;
tsk_cache_hot = migrate_degrades_locality(p, env);
if (tsk_cache_hot == -1)
tsk_cache_hot = task_hot(p, env);
if (tsk_cache_hot <= 0 ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot == 1) {
schedstat_inc(env->sd->lb_hot_gained[env->idle]);
schedstat_inc(p->stats.nr_forced_migrations);
}
return 1;
}
schedstat_inc(p->stats.nr_failed_migrations_hot);
return 0;
}
/*
* detach_task() -- detach the task for the migration specified in env
*/
static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_rq_held(env->src_rq);
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, env->dst_cpu);
}
/*
* detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
* part of active balancing operations within "domain".
*
* Returns a task if successful and NULL otherwise.
*/
static struct task_struct *detach_one_task(struct lb_env *env)
{
struct task_struct *p;
lockdep_assert_rq_held(env->src_rq);
list_for_each_entry_reverse(p,
&env->src_rq->cfs_tasks, se.group_node) {
if (!can_migrate_task(p, env))
continue;
detach_task(p, env);
/*
* Right now, this is only the second place where
* lb_gained[env->idle] is updated (other is detach_tasks)
* so we can safely collect stats here rather than
* inside detach_tasks().
*/
schedstat_inc(env->sd->lb_gained[env->idle]);
return p;
}
return NULL;
}
/*
* detach_tasks() -- tries to detach up to imbalance load/util/tasks from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
static int detach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
unsigned long util, load;
struct task_struct *p;
int detached = 0;
lockdep_assert_rq_held(env->src_rq);
/*
* Source run queue has been emptied by another CPU, clear
* LBF_ALL_PINNED flag as we will not test any task.
*/
if (env->src_rq->nr_running <= 1) {
env->flags &= ~LBF_ALL_PINNED;
return 0;
}
if (env->imbalance <= 0)
return 0;
while (!list_empty(tasks)) {
/*
* We don't want to steal all, otherwise we may be treated likewise,
* which could at worst lead to a livelock crash.
*/
if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
break;
env->loop++;
/*
* We've more or less seen every task there is, call it quits
* unless we haven't found any movable task yet.
*/
if (env->loop > env->loop_max &&
!(env->flags & LBF_ALL_PINNED))
break;
/* take a breather every nr_migrate tasks */
if (env->loop > env->loop_break) {
env->loop_break += SCHED_NR_MIGRATE_BREAK;
env->flags |= LBF_NEED_BREAK;
break;
}
p = list_last_entry(tasks, struct task_struct, se.group_node);
if (!can_migrate_task(p, env))
goto next;
switch (env->migration_type) {
case migrate_load:
/*
* Depending of the number of CPUs and tasks and the
* cgroup hierarchy, task_h_load() can return a null
* value. Make sure that env->imbalance decreases
* otherwise detach_tasks() will stop only after
* detaching up to loop_max tasks.
*/
load = max_t(unsigned long, task_h_load(p), 1);
if (sched_feat(LB_MIN) &&
load < 16 && !env->sd->nr_balance_failed)
goto next;
/*
* Make sure that we don't migrate too much load.
* Nevertheless, let relax the constraint if
* scheduler fails to find a good waiting task to
* migrate.
*/
if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
goto next;
env->imbalance -= load;
break;
case migrate_util:
util = task_util_est(p);
if (util > env->imbalance)
goto next;
env->imbalance -= util;
break;
case migrate_task:
env->imbalance--;
break;
case migrate_misfit:
/* This is not a misfit task */
if (task_fits_cpu(p, env->src_cpu))
goto next;
env->imbalance = 0;
break;
}
detach_task(p, env);
list_add(&p->se.group_node, &env->tasks);
detached++;
#ifdef CONFIG_PREEMPTION
/*
* NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is detached to minimize
* the critical section.
*/
if (env->idle == CPU_NEWLY_IDLE)
break;
#endif
/*
* We only want to steal up to the prescribed amount of
* load/util/tasks.
*/
if (env->imbalance <= 0)
break;
continue;
next:
list_move(&p->se.group_node, tasks);
}
/*
* Right now, this is one of only two places we collect this stat
* so we can safely collect detach_one_task() stats here rather
* than inside detach_one_task().
*/
schedstat_add(env->sd->lb_gained[env->idle], detached);
return detached;
}
/*
* attach_task() -- attach the task detached by detach_task() to its new rq.
*/
static void attach_task(struct rq *rq, struct task_struct *p)
{
lockdep_assert_rq_held(rq);
WARN_ON_ONCE(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK);
check_preempt_curr(rq, p, 0);
}
/*
* attach_one_task() -- attaches the task returned from detach_one_task() to
* its new rq.
*/
static void attach_one_task(struct rq *rq, struct task_struct *p)
{
struct rq_flags rf;
rq_lock(rq, &rf);
update_rq_clock(rq);
attach_task(rq, p);
rq_unlock(rq, &rf);
}
/*
* attach_tasks() -- attaches all tasks detached by detach_tasks() to their
* new rq.
*/
static void attach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->tasks;
struct task_struct *p;
struct rq_flags rf;
rq_lock(env->dst_rq, &rf);
update_rq_clock(env->dst_rq);
while (!list_empty(tasks)) {
p = list_first_entry(tasks, struct task_struct, se.group_node);
list_del_init(&p->se.group_node);
attach_task(env->dst_rq, p);
}
rq_unlock(env->dst_rq, &rf);
}
#ifdef CONFIG_NO_HZ_COMMON
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
{
if (cfs_rq->avg.load_avg)
return true;
if (cfs_rq->avg.util_avg)
return true;
return false;
}
static inline bool others_have_blocked(struct rq *rq)
{
if (READ_ONCE(rq->avg_rt.util_avg))
return true;
if (READ_ONCE(rq->avg_dl.util_avg))
return true;
if (thermal_load_avg(rq))
return true;
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if (READ_ONCE(rq->avg_irq.util_avg))
return true;
#endif
return false;
}
static inline void update_blocked_load_tick(struct rq *rq)
{
WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
}
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
{
if (!has_blocked)
rq->has_blocked_load = 0;
}
#else
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
static inline bool others_have_blocked(struct rq *rq) { return false; }
static inline void update_blocked_load_tick(struct rq *rq) {}
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
#endif
static bool __update_blocked_others(struct rq *rq, bool *done)
{
const struct sched_class *curr_class;
u64 now = rq_clock_pelt(rq);
unsigned long thermal_pressure;
bool decayed;
/*
* update_load_avg() can call cpufreq_update_util(). Make sure that RT,
* DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
update_irq_load_avg(rq, 0);
if (others_have_blocked(rq))
*done = false;
return decayed;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq, *pos;
bool decayed = false;
int cpu = cpu_of(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
update_tg_load_avg(cfs_rq);
if (cfs_rq->nr_running == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
if (cfs_rq == &rq->cfs)
decayed = true;
}
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
* decayed cfs_rqs linger on the list.
*/
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
/* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq))
*done = false;
}
return decayed;
}
/*
* Compute the hierarchical load factor for cfs_rq and all its ascendants.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
unsigned long now = jiffies;
unsigned long load;
if (cfs_rq->last_h_load_update == now)
return;
WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
if (!se) {
cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
cfs_rq->last_h_load_update = now;
}
while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
cfs_rq = group_cfs_rq(se);
cfs_rq->h_load = load;
cfs_rq->last_h_load_update = now;
}
}
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_cfs_rq_h_load(cfs_rq);
return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
cfs_rq_load_avg(cfs_rq) + 1);
}
#else
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq = &rq->cfs;
bool decayed;
decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
if (cfs_rq_has_blocked(cfs_rq))
*done = false;
return decayed;
}
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.avg.load_avg;
}
#endif
static void update_blocked_averages(int cpu)
{
bool decayed = false, done = true;
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
update_blocked_load_tick(rq);
update_rq_clock(rq);
decayed |= __update_blocked_others(rq, &done);
decayed |= __update_blocked_fair(rq, &done);
update_blocked_load_status(rq, !done);
if (decayed)
cpufreq_update_util(rq, 0);
rq_unlock_irqrestore(rq, &rf);
}
/********** Helpers for find_busiest_group ************************/
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
unsigned long group_capacity;
unsigned long group_util; /* Total utilization over the CPUs of the group */
unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
unsigned int sum_nr_running; /* Nr of tasks running in the group */
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
unsigned int group_smt_balance; /* Task on busy SMT be moved */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
#endif
};
/*
* sd_lb_stats - Structure to store the statistics of a sched_domain
* during load balancing.
*/
struct sd_lb_stats {
struct sched_group *busiest; /* Busiest group in this sd */
struct sched_group *local; /* Local group in this sd */
unsigned long total_load; /* Total load of all groups in sd */
unsigned long total_capacity; /* Total capacity of all groups in sd */
unsigned long avg_load; /* Average load across all groups in sd */
unsigned int prefer_sibling; /* tasks should go to sibling first */
struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
struct sg_lb_stats local_stat; /* Statistics of the local group */
};
static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
{
/*
* Skimp on the clearing to avoid duplicate work. We can avoid clearing
* local_stat because update_sg_lb_stats() does a full clear/assignment.
* We must however set busiest_stat::group_type and
* busiest_stat::idle_cpus to the worst busiest group because
* update_sd_pick_busiest() reads these before assignment.
*/
*sds = (struct sd_lb_stats){
.busiest = NULL,
.local = NULL,
.total_load = 0UL,
.total_capacity = 0UL,
.busiest_stat = {
.idle_cpus = UINT_MAX,
.group_type = group_has_spare,
},
};
}
static unsigned long scale_rt_capacity(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long max = arch_scale_cpu_capacity(cpu);
unsigned long used, free;
unsigned long irq;
irq = cpu_util_irq(rq);
if (unlikely(irq >= max))
return 1;
/*
* avg_rt.util_avg and avg_dl.util_avg track binary signals
* (running and not running) with weights 0 and 1024 respectively.
* avg_thermal.load_avg tracks thermal pressure and the weighted
* average uses the actual delta max capacity(load).
*/
used = READ_ONCE(rq->avg_rt.util_avg);
used += READ_ONCE(rq->avg_dl.util_avg);
used += thermal_load_avg(rq);
if (unlikely(used >= max))
return 1;
free = max - used;
return scale_irq_capacity(free, irq, max);
}
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity)
capacity = 1;
cpu_rq(cpu)->cpu_capacity = capacity;
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;
sdg->sgc->max_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
unsigned long capacity, min_capacity, max_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
interval = clamp(interval, 1UL, max_load_balance_interval);
sdg->sgc->next_update = jiffies + interval;
if (!child) {
update_cpu_capacity(sd, cpu);
return;
}
capacity = 0;
min_capacity = ULONG_MAX;
max_capacity = 0;
if (child->flags & SD_OVERLAP) {
/*
* SD_OVERLAP domains cannot assume that child groups
* span the current group.
*/
for_each_cpu(cpu, sched_group_span(sdg)) {
unsigned long cpu_cap = capacity_of(cpu);
capacity += cpu_cap;
min_capacity = min(cpu_cap, min_capacity);
max_capacity = max(cpu_cap, max_capacity);
}
} else {
/*
* !SD_OVERLAP domains can assume that child groups
* span the current group.
*/
group = child->groups;
do {
struct sched_group_capacity *sgc = group->sgc;
capacity += sgc->capacity;
min_capacity = min(sgc->min_capacity, min_capacity);
max_capacity = max(sgc->max_capacity, max_capacity);
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = min_capacity;
sdg->sgc->max_capacity = max_capacity;
}
/*
* Check whether the capacity of the rq has been noticeably reduced by side
* activity. The imbalance_pct is used for the threshold.
* Return true is the capacity is reduced
*/
static inline int
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
{
return ((rq->cpu_capacity * sd->imbalance_pct) <
(rq->cpu_capacity_orig * 100));
}
/*
* Check whether a rq has a misfit task and if it looks like we can actually
* help that task: we can migrate the task to a CPU of higher capacity, or
* the task's current CPU is heavily pressured.
*/
static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
{
return rq->misfit_task_load &&
(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
check_cpu_capacity(rq, sd));
}
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
* groups is inadequate due to ->cpus_ptr constraints.
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
* Something like:
*
* { 0 1 2 3 } { 4 5 6 7 }
* * * * *
*
* If we were to balance group-wise we'd place two tasks in the first group and
* two tasks in the second group. Clearly this is undesired as it will overload
* cpu 3 and leave one of the CPUs in the second group unused.
*
* The current solution to this issue is detecting the skew in the first group
* by noticing the lower domain failed to reach balance and had difficulty
* moving tasks due to affinity constraints.
*
* When this is so detected; this group becomes a candidate for busiest; see
* update_sd_pick_busiest(). And calculate_imbalance() and
* find_busiest_group() avoid some of the usual balance conditions to allow it
* to create an effective group imbalance.
*
* This is a somewhat tricky proposition since the next run might not find the
* group imbalance and decide the groups need to be balanced again. A most
* subtle and fragile situation.
*/
static inline int sg_imbalanced(struct sched_group *group)
{
return group->sgc->imbalance;
}
/*
* group_has_capacity returns true if the group has spare capacity that could
* be used by some tasks.
* We consider that a group has spare capacity if the number of task is
* smaller than the number of CPUs or if the utilization is lower than the
* available capacity for CFS tasks.
* For the latter, we use a threshold to stabilize the state, to take into
* account the variance of the tasks' load and to return true if the available
* capacity in meaningful for the load balancer.
* As an example, an available capacity of 1% can appear but it doesn't make
* any benefit for the load balance.
*/
static inline bool
group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running < sgs->group_weight)
return true;
if ((sgs->group_capacity * imbalance_pct) <
(sgs->group_runnable * 100))
return false;
if ((sgs->group_capacity * 100) >
(sgs->group_util * imbalance_pct))
return true;
return false;
}
/*
* group_is_overloaded returns true if the group has more tasks than it can
* handle.
* group_is_overloaded is not equals to !group_has_capacity because a group
* with the exact right number of tasks, has no more spare capacity but is not
* overloaded so both group_has_capacity and group_is_overloaded return
* false.
*/
static inline bool
group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running <= sgs->group_weight)
return false;
if ((sgs->group_capacity * 100) <
(sgs->group_util * imbalance_pct))
return true;
if ((sgs->group_capacity * imbalance_pct) <
(sgs->group_runnable * 100))
return true;
return false;
}
static inline enum
group_type group_classify(unsigned int imbalance_pct,
struct sched_group *group,
struct sg_lb_stats *sgs)
{
if (group_is_overloaded(imbalance_pct, sgs))
return group_overloaded;
if (sg_imbalanced(group))
return group_imbalanced;
if (sgs->group_asym_packing)
return group_asym_packing;
if (sgs->group_smt_balance)
return group_smt_balance;
if (sgs->group_misfit_task_load)
return group_misfit_task;
if (!group_has_capacity(imbalance_pct, sgs))
return group_fully_busy;
return group_has_spare;
}
/**
* sched_use_asym_prio - Check whether asym_packing priority must be used
* @sd: The scheduling domain of the load balancing
* @cpu: A CPU
*
* Always use CPU priority when balancing load between SMT siblings. When
* balancing load between cores, it is not sufficient that @cpu is idle. Only
* use CPU priority if the whole core is idle.
*
* Returns: True if the priority of @cpu must be followed. False otherwise.
*/
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
{
if (!sched_smt_active())
return true;
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
}
/**
* sched_asym - Check if the destination CPU can do asym_packing load balance
* @env: The load balancing environment
* @sds: Load-balancing data with statistics of the local group
* @sgs: Load-balancing statistics of the candidate busiest group
* @group: The candidate busiest group
*
* @env::dst_cpu can do asym_packing if it has higher priority than the
* preferred CPU of @group.
*
* SMT is a special case. If we are balancing load between cores, @env::dst_cpu
* can do asym_packing balance only if all its SMT siblings are idle. Also, it
* can only do it if @group is an SMT group and has exactly on busy CPU. Larger
* imbalances in the number of CPUS are dealt with in find_busiest_group().
*
* If we are balancing load within an SMT core, or at DIE domain level, always
* proceed.
*
* Return: true if @env::dst_cpu can do with asym_packing load balance. False
* otherwise.
*/
static inline bool
sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
struct sched_group *group)
{
/* Ensure that the whole local core is idle, if applicable. */
if (!sched_use_asym_prio(env->sd, env->dst_cpu))
return false;
/*
* CPU priorities does not make sense for SMT cores with more than one
* busy sibling.
*/
if (group->flags & SD_SHARE_CPUCAPACITY) {
if (sgs->group_weight - sgs->idle_cpus != 1)
return false;
}
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
}
/* One group has more than one SMT CPU while the other group does not */
static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
struct sched_group *sg2)
{
if (!sg1 || !sg2)
return false;
return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
(sg2->flags & SD_SHARE_CPUCAPACITY);
}
static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
struct sched_group *group)
{
if (env->idle == CPU_NOT_IDLE)
return false;
/*
* For SMT source group, it is better to move a task
* to a CPU that doesn't have multiple tasks sharing its CPU capacity.
* Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
* will not be on.
*/
if (group->flags & SD_SHARE_CPUCAPACITY &&
sgs->sum_h_nr_running > 1)
return true;
return false;
}
static inline long sibling_imbalance(struct lb_env *env,
struct sd_lb_stats *sds,
struct sg_lb_stats *busiest,
struct sg_lb_stats *local)
{
int ncores_busiest, ncores_local;
long imbalance;
if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
return 0;
ncores_busiest = sds->busiest->cores;
ncores_local = sds->local->cores;
if (ncores_busiest == ncores_local) {
imbalance = busiest->sum_nr_running;
lsub_positive(&imbalance, local->sum_nr_running);
return imbalance;
}
/* Balance such that nr_running/ncores ratio are same on both groups */
imbalance = ncores_local * busiest->sum_nr_running;
lsub_positive(&imbalance, ncores_busiest * local->sum_nr_running);
/* Normalize imbalance and do rounding on normalization */
imbalance = 2 * imbalance + ncores_local + ncores_busiest;
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
return imbalance;
}
static inline bool
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
{
/*
* When there is more than 1 task, the group_overloaded case already
* takes care of cpu with reduced capacity
*/
if (rq->cfs.h_nr_running != 1)
return false;
return check_cpu_capacity(rq, sd);
}
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
* @sds: Load-balancing data with statistics of the local group.
* @group: sched_group whose statistics are to be updated.
* @sgs: variable to hold the statistics for this group.
* @sg_status: Holds flag indicating the status of the sched_group
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sd_lb_stats *sds,
struct sched_group *group,
struct sg_lb_stats *sgs,
int *sg_status)
{
int i, nr_running, local_group;
memset(sgs, 0, sizeof(*sgs));
local_group = group == sds->local;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
unsigned long load = cpu_load(rq);
sgs->group_load += load;
sgs->group_util += cpu_util_cfs(i);
sgs->group_runnable += cpu_runnable(rq);
sgs->sum_h_nr_running += rq->cfs.h_nr_running;
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
if (nr_running > 1)
*sg_status |= SG_OVERLOAD;
if (cpu_overutilized(i))
*sg_status |= SG_OVERUTILIZED;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
#endif
/*
* No need to call idle_cpu() if nr_running is not 0
*/
if (!nr_running && idle_cpu(i)) {
sgs->idle_cpus++;
/* Idle cpu can't have misfit task */
continue;
}
if (local_group)
continue;
if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
/* Check for a misfit task on the cpu */
if (sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs->group_misfit_task_load = rq->misfit_task_load;
*sg_status |= SG_OVERLOAD;
}
} else if ((env->idle != CPU_NOT_IDLE) &&
sched_reduced_capacity(rq, env->sd)) {
/* Check for a task running on a CPU with reduced capacity */
if (sgs->group_misfit_task_load < load)
sgs->group_misfit_task_load = load;
}
}
sgs->group_capacity = group->sgc->capacity;
sgs->group_weight = group->group_weight;
/* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
sched_asym(env, sds, sgs, group)) {
sgs->group_asym_packing = 1;
}
/* Check for loaded SMT group to be balanced to dst CPU */
if (!local_group && smt_balance(env, sgs, group))
sgs->group_smt_balance = 1;
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
/* Computing avg_load makes sense only when group is overloaded */
if (sgs->group_type == group_overloaded)
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
sgs->group_capacity;
}
/**
* update_sd_pick_busiest - return 1 on busiest group
* @env: The load balancing environment.
* @sds: sched_domain statistics
* @sg: sched_group candidate to be checked for being the busiest
* @sgs: sched_group statistics
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
*
* Return: %true if @sg is a busier group than the previously selected
* busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
struct sched_group *sg,
struct sg_lb_stats *sgs)
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
/* Make sure that there is at least one task to pull */
if (!sgs->sum_h_nr_running)
return false;
/*
* Don't try to pull misfit tasks we can't help.
* We can use max_capacity here as reduction in capacity on some
* CPUs in the group should either be possible to resolve
* internally or be covered by avg_load imbalance (eventually).
*/
if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
(sgs->group_type == group_misfit_task) &&
(!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
sds->local_stat.group_type != group_has_spare))
return false;
if (sgs->group_type > busiest->group_type)
return true;
if (sgs->group_type < busiest->group_type)
return false;
/*
* The candidate and the current busiest group are the same type of
* group. Let check which one is the busiest according to the type.
*/
switch (sgs->group_type) {
case group_overloaded:
/* Select the overloaded group with highest avg_load. */
if (sgs->avg_load <= busiest->avg_load)
return false;
break;
case group_imbalanced:
/*
* Select the 1st imbalanced group as we don't have any way to
* choose one more than another.
*/
return false;
case group_asym_packing:
/* Prefer to move from lowest priority CPU's work */
if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
return false;
break;
case group_misfit_task:
/*
* If we have more than one misfit sg go with the biggest
* misfit.
*/
if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
return false;
break;
case group_smt_balance:
/*
* Check if we have spare CPUs on either SMT group to
* choose has spare or fully busy handling.
*/
if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
goto has_spare;
fallthrough;
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
* theory, there is no need to pull task from such kind of
* group because tasks have all compute capacity that they need
* but we can still improve the overall throughput by reducing
* contention when accessing shared HW resources.
*
* XXX for now avg_load is not computed and always 0 so we
* select the 1st one, except if @sg is composed of SMT
* siblings.
*/
if (sgs->avg_load < busiest->avg_load)
return false;
if (sgs->avg_load == busiest->avg_load) {
/*
* SMT sched groups need more help than non-SMT groups.
* If @sg happens to also be SMT, either choice is good.
*/
if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
return false;
}
break;
case group_has_spare:
/*
* Do not pick sg with SMT CPUs over sg with pure CPUs,
* as we do not want to pull task off SMT core with one task
* and make the core idle.
*/
if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
return false;
else
return true;
}
has_spare:
/*
* Select not overloaded group with lowest number of idle cpus
* and highest number of running tasks. We could also compare
* the spare capacity which is more stable but it can end up
* that the group has less spare capacity but finally more idle
* CPUs which means less opportunity to pull tasks.
*/
if (sgs->idle_cpus > busiest->idle_cpus)
return false;
else if ((sgs->idle_cpus == busiest->idle_cpus) &&
(sgs->sum_nr_running <= busiest->sum_nr_running))
return false;
break;
}
/*
* Candidate sg has no more than one task per CPU and has higher
* per-CPU capacity. Migrating tasks to less capable CPUs may harm
* throughput. Maximize throughput, power/energy consequences are not
* considered.
*/
if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
(sgs->group_type <= group_fully_busy) &&
(capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
return false;
return true;
}
#ifdef CONFIG_NUMA_BALANCING
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
if (sgs->sum_h_nr_running > sgs->nr_numa_running)
return regular;
if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
return remote;
return all;
}
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
if (rq->nr_running > rq->nr_numa_running)
return regular;
if (rq->nr_running > rq->nr_preferred_running)
return remote;
return all;
}
#else
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
return all;
}
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
return regular;
}
#endif /* CONFIG_NUMA_BALANCING */
struct sg_lb_stats;
/*
* task_running_on_cpu - return 1 if @p is running on @cpu.
*/
static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
{
/* Task has no contribution or is new */
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
return 0;
if (task_on_rq_queued(p))
return 1;
return 0;
}
/**
* idle_cpu_without - would a given CPU be idle without p ?
* @cpu: the processor on which idleness is tested.
* @p: task which should be ignored.
*
* Return: 1 if the CPU would be idle. 0 otherwise.
*/
static int idle_cpu_without(int cpu, struct task_struct *p)
{
struct rq *rq = cpu_rq(cpu);
if (rq->curr != rq->idle && rq->curr != p)
return 0;
/*
* rq->nr_running can't be used but an updated version without the
* impact of p on cpu must be used instead. The updated nr_running
* be computed and tested before calling idle_cpu_without().
*/
#ifdef CONFIG_SMP
if (rq->ttwu_pending)
return 0;
#endif
return 1;
}
/*
* update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
* @sd: The sched_domain level to look for idlest group.
* @group: sched_group whose statistics are to be updated.
* @sgs: variable to hold the statistics for this group.
* @p: The task for which we look for the idlest group/CPU.
*/
static inline void update_sg_wakeup_stats(struct sched_domain *sd,
struct sched_group *group,
struct sg_lb_stats *sgs,
struct task_struct *p)
{
int i, nr_running;
memset(sgs, 0, sizeof(*sgs));
/* Assume that task can't fit any CPU of the group */
if (sd->flags & SD_ASYM_CPUCAPACITY)
sgs->group_misfit_task_load = 1;
for_each_cpu(i, sched_group_span(group)) {
struct rq *rq = cpu_rq(i);
unsigned int local;
sgs->group_load += cpu_load_without(rq, p);
sgs->group_util += cpu_util_without(i, p);
sgs->group_runnable += cpu_runnable_without(rq, p);
local = task_running_on_cpu(i, p);
sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
nr_running = rq->nr_running - local;
sgs->sum_nr_running += nr_running;
/*
* No need to call idle_cpu_without() if nr_running is not 0
*/
if (!nr_running && idle_cpu_without(i, p))
sgs->idle_cpus++;
/* Check if task fits in the CPU */
if (sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load &&
task_fits_cpu(p, i))
sgs->group_misfit_task_load = 0;
}
sgs->group_capacity = group->sgc->capacity;
sgs->group_weight = group->group_weight;
sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
/*
* Computing avg_load makes sense only when group is fully busy or
* overloaded
*/
if (sgs->group_type == group_fully_busy ||
sgs->group_type == group_overloaded)
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
sgs->group_capacity;
}
static bool update_pick_idlest(struct sched_group *idlest,
struct sg_lb_stats *idlest_sgs,
struct sched_group *group,
struct sg_lb_stats *sgs)
{
if (sgs->group_type < idlest_sgs->group_type)
return true;
if (sgs->group_type > idlest_sgs->group_type)
return false;
/*
* The candidate and the current idlest group are the same type of
* group. Let check which one is the idlest according to the type.
*/
switch (sgs->group_type) {
case group_overloaded:
case group_fully_busy:
/* Select the group with lowest avg_load. */
if (idlest_sgs->avg_load <= sgs->avg_load)
return false;
break;
case group_imbalanced:
case group_asym_packing:
case group_smt_balance:
/* Those types are not used in the slow wakeup path */
return false;
case group_misfit_task:
/* Select group with the highest max capacity */
if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
return false;
break;
case group_has_spare:
/* Select group with most idle CPUs */
if (idlest_sgs->idle_cpus > sgs->idle_cpus)
return false;
/* Select group with lowest group_util */
if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
idlest_sgs->group_util <= sgs->group_util)
return false;
break;
}
return true;
}
/*
* find_idlest_group() finds and returns the least busy CPU group within the
* domain.
*
* Assumes p is allowed on at least one CPU in sd.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
{
struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
struct sg_lb_stats local_sgs, tmp_sgs;
struct sg_lb_stats *sgs;
unsigned long imbalance;
struct sg_lb_stats idlest_sgs = {
.avg_load = UINT_MAX,
.group_type = group_overloaded,
};
do {
int local_group;
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
p->cpus_ptr))
continue;
/* Skip over this group if no cookie matched */
if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
continue;
local_group = cpumask_test_cpu(this_cpu,
sched_group_span(group));
if (local_group) {
sgs = &local_sgs;
local = group;
} else {
sgs = &tmp_sgs;
}
update_sg_wakeup_stats(sd, group, sgs, p);
if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
idlest = group;
idlest_sgs = *sgs;
}
} while (group = group->next, group != sd->groups);
/* There is no idlest group to push tasks to */
if (!idlest)
return NULL;
/* The local group has been skipped because of CPU affinity */
if (!local)
return idlest;
/*
* If the local group is idler than the selected idlest group
* don't try and push the task.
*/
if (local_sgs.group_type < idlest_sgs.group_type)
return NULL;
/*
* If the local group is busier than the selected idlest group
* try and push the task.
*/
if (local_sgs.group_type > idlest_sgs.group_type)
return idlest;
switch (local_sgs.group_type) {
case group_overloaded:
case group_fully_busy:
/* Calculate allowed imbalance based on load */
imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
/*
* When comparing groups across NUMA domains, it's possible for
* the local domain to be very lightly loaded relative to the
* remote domains but "imbalance" skews the comparison making
* remote CPUs look much more favourable. When considering
* cross-domain, add imbalance to the load on the remote node
* and consider staying local.
*/
if ((sd->flags & SD_NUMA) &&
((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
return NULL;
/*
* If the local group is less loaded than the selected
* idlest group don't try and push any tasks.
*/
if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
return NULL;
if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
return NULL;
break;
case group_imbalanced:
case group_asym_packing:
case group_smt_balance:
/* Those type are not used in the slow wakeup path */
return NULL;
case group_misfit_task:
/* Select group with the highest max capacity */
if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
return NULL;
break;
case group_has_spare:
#ifdef CONFIG_NUMA
if (sd->flags & SD_NUMA) {
int imb_numa_nr = sd->imb_numa_nr;
#ifdef CONFIG_NUMA_BALANCING
int idlest_cpu;
/*
* If there is spare capacity at NUMA, try to select
* the preferred node
*/
if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
return NULL;
idlest_cpu = cpumask_first(sched_group_span(idlest));
if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
return idlest;
#endif /* CONFIG_NUMA_BALANCING */
/*
* Otherwise, keep the task close to the wakeup source
* and improve locality if the number of running tasks
* would remain below threshold where an imbalance is
* allowed while accounting for the possibility the
* task is pinned to a subset of CPUs. If there is a
* real need of migration, periodic load balance will
* take care of it.
*/
if (p->nr_cpus_allowed != NR_CPUS) {
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
}
imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
if (!adjust_numa_imbalance(imbalance,
local_sgs.sum_nr_running + 1,
imb_numa_nr)) {
return NULL;
}
}
#endif /* CONFIG_NUMA */
/*
* Select group with highest number of idle CPUs. We could also
* compare the utilization which is more stable but it can end
* up that the group has less spare capacity but finally more
* idle CPUs which means more opportunity to run task.
*/
if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
return NULL;
break;
}
return idlest;
}
static void update_idle_cpu_scan(struct lb_env *env,
unsigned long sum_util)
{
struct sched_domain_shared *sd_share;
int llc_weight, pct;
u64 x, y, tmp;
/*
* Update the number of CPUs to scan in LLC domain, which could
* be used as a hint in select_idle_cpu(). The update of sd_share
* could be expensive because it is within a shared cache line.
* So the write of this hint only occurs during periodic load
* balancing, rather than CPU_NEWLY_IDLE, because the latter
* can fire way more frequently than the former.
*/
if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
return;
llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
if (env->sd->span_weight != llc_weight)
return;
sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
if (!sd_share)
return;
/*
* The number of CPUs to search drops as sum_util increases, when
* sum_util hits 85% or above, the scan stops.
* The reason to choose 85% as the threshold is because this is the
* imbalance_pct(117) when a LLC sched group is overloaded.
*
* let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
* and y'= y / SCHED_CAPACITY_SCALE
*
* x is the ratio of sum_util compared to the CPU capacity:
* x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
* y' is the ratio of CPUs to be scanned in the LLC domain,
* and the number of CPUs to scan is calculated by:
*
* nr_scan = llc_weight * y' [2]
*
* When x hits the threshold of overloaded, AKA, when
* x = 100 / pct, y drops to 0. According to [1],
* p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
*
* Scale x by SCHED_CAPACITY_SCALE:
* x' = sum_util / llc_weight; [3]
*
* and finally [1] becomes:
* y = SCHED_CAPACITY_SCALE -
* x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4]
*
*/
/* equation [3] */
x = sum_util;
do_div(x, llc_weight);
/* equation [4] */
pct = env->sd->imbalance_pct;
tmp = x * x * pct * pct;
do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
y = SCHED_CAPACITY_SCALE - tmp;
/* equation [2] */
y *= llc_weight;
do_div(y, SCHED_CAPACITY_SCALE);
if ((int)y != sd_share->nr_idle_scan)
WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
}
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
unsigned long sum_util = 0;
int sg_status = 0;
do {
struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
if (local_group) {
sds->local = sg;
sgs = local;
if (env->idle != CPU_NEWLY_IDLE ||
time_after_eq(jiffies, sg->sgc->next_update))
update_group_capacity(env->sd, env->dst_cpu);
}
update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
if (local_group)
goto next_group;
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
}
next_group:
/* Now, start updating sd_lb_stats */
sds->total_load += sgs->group_load;
sds->total_capacity += sgs->group_capacity;
sum_util += sgs->group_util;
sg = sg->next;
} while (sg != env->sd->groups);
/*
* Indicate that the child domain of the busiest group prefers tasks
* go to a child's sibling domains first. NB the flags of a sched group
* are those of the child domain.
*/
if (sds->busiest)
sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
if (env->sd->flags & SD_NUMA)
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
if (!env->sd->parent) {
struct root_domain *rd = env->dst_rq->rd;
/* update overload indicator if we are at root domain */
WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
/* Update over-utilization (tipping point, U >= 0) indicator */
WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
} else if (sg_status & SG_OVERUTILIZED) {
struct root_domain *rd = env->dst_rq->rd;
WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
}
update_idle_cpu_scan(env, sum_util);
}
/**
* calculate_imbalance - Calculate the amount of imbalance present within the
* groups of a given sched_domain during load balance.
* @env: load balance environment
* @sds: statistics of the sched_domain whose imbalance is to be calculated.
*/
static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sg_lb_stats *local, *busiest;
local = &sds->local_stat;
busiest = &sds->busiest_stat;
if (busiest->group_type == group_misfit_task) {
if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
/* Set imbalance to allow misfit tasks to be balanced. */
env->migration_type = migrate_misfit;
env->imbalance = 1;
} else {
/*
* Set load imbalance to allow moving task from cpu
* with reduced capacity.
*/
env->migration_type = migrate_load;
env->imbalance = busiest->group_misfit_task_load;
}
return;
}
if (busiest->group_type == group_asym_packing) {
/*
* In case of asym capacity, we will try to migrate all load to
* the preferred CPU.
*/
env->migration_type = migrate_task;
env->imbalance = busiest->sum_h_nr_running;
return;
}
if (busiest->group_type == group_smt_balance) {
/* Reduce number of tasks sharing CPU capacity */
env->migration_type = migrate_task;
env->imbalance = 1;
return;
}
if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
* to ensure CPU-load equilibrium, try to move any task to fix
* the imbalance. The next load balance will take care of
* balancing back the system.
*/
env->migration_type = migrate_task;
env->imbalance = 1;
return;
}
/*
* Try to use spare capacity of local group without overloading it or
* emptying busiest.
*/
if (local->group_type == group_has_spare) {
if ((busiest->group_type > group_fully_busy) &&
!(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
/*
* If busiest is overloaded, try to fill spare
* capacity. This might end up creating spare capacity
* in busiest or busiest still being overloaded but
* there is no simple way to directly compute the
* amount of load to migrate in order to balance the
* system.
*/
env->migration_type = migrate_util;
env->imbalance = max(local->group_capacity, local->group_util) -
local->group_util;
/*
* In some cases, the group's utilization is max or even
* higher than capacity because of migrations but the
* local CPU is (newly) idle. There is at least one
* waiting task in this overloaded busiest group. Let's
* try to pull it.
*/
if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
env->migration_type = migrate_task;
env->imbalance = 1;
}
return;
}
if (busiest->group_weight == 1 || sds->prefer_sibling) {
/*
* When prefer sibling, evenly spread running tasks on
* groups.
*/
env->migration_type = migrate_task;
env->imbalance = sibling_imbalance(env, sds, busiest, local);
} else {
/*
* If there is no overload, we just want to even the number of
* idle cpus.
*/
env->migration_type = migrate_task;
env->imbalance = max_t(long, 0,
(local->idle_cpus - busiest->idle_cpus));
}
#ifdef CONFIG_NUMA
/* Consider allowing a small imbalance between NUMA groups */
if (env->sd->flags & SD_NUMA) {
env->imbalance = adjust_numa_imbalance(env->imbalance,
local->sum_nr_running + 1,
env->sd->imb_numa_nr);
}
#endif
/* Number of tasks to move to restore balance */
env->imbalance >>= 1;
return;
}
/*
* Local is fully busy but has to take more load to relieve the
* busiest group
*/
if (local->group_type < group_overloaded) {
/*
* Local will become overloaded so the avg_load metrics are
* finally needed.
*/
local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
local->group_capacity;
/*
* If the local group is more loaded than the selected
* busiest group don't try to pull any tasks.
*/
if (local->avg_load >= busiest->avg_load) {
env->imbalance = 0;
return;
}
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
sds->total_capacity;
/*
* If the local group is more loaded than the average system
* load, don't try to pull any tasks.
*/
if (local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return;
}
}
/*
* Both group are or will become overloaded and we're trying to get all
* the CPUs to the average_load, so we don't want to push ourselves
* above the average load, nor do we wish to reduce the max loaded CPU
* below the average load. At the same time, we also don't want to
* reduce the group load below the group capacity. Thus we look for
* the minimum possible imbalance.
*/
env->migration_type = migrate_load;
env->imbalance = min(
(busiest->avg_load - sds->avg_load) * busiest->group_capacity,
(sds->avg_load - local->avg_load) * local->group_capacity
) / SCHED_CAPACITY_SCALE;
}
/******* find_busiest_group() helpers end here *********************/
/*
* Decision matrix according to the local and busiest group type:
*
* busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
* has_spare nr_idle balanced N/A N/A balanced balanced
* fully_busy nr_idle nr_idle N/A N/A balanced balanced
* misfit_task force N/A N/A N/A N/A N/A
* asym_packing force force N/A N/A force force
* imbalanced force force N/A N/A force force
* overloaded force force N/A N/A force avg_load
*
* N/A : Not Applicable because already filtered while updating
* statistics.
* balanced : The system is balanced for these 2 groups.
* force : Calculate the imbalance as load migration is probably needed.
* avg_load : Only if imbalance is significant enough.
* nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
* different in groups.
*/
/**
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
* @env: The load balancing environment.
*
* Also calculates the amount of runnable load which should be moved
* to restore balance.
*
* Return: - The busiest group if imbalance exists.
*/
static struct sched_group *find_busiest_group(struct lb_env *env)
{
struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
init_sd_lb_stats(&sds);
/*
* Compute the various statistics relevant for load balancing at
* this level.
*/
update_sd_lb_stats(env, &sds);
/* There is no busy sibling group to pull tasks from */
if (!sds.busiest)
goto out_balanced;
busiest = &sds.busiest_stat;
/* Misfit tasks should be dealt with regardless of the avg load */
if (busiest->group_type == group_misfit_task)
goto force_balance;
if (sched_energy_enabled()) {
struct root_domain *rd = env->dst_rq->rd;
if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
goto out_balanced;
}
/* ASYM feature bypasses nice load balance check */
if (busiest->group_type == group_asym_packing)
goto force_balance;
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
* isn't true due to cpus_ptr constraints and the like.
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
local = &sds.local_stat;
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
*/
if (local->group_type > busiest->group_type)
goto out_balanced;
/*
* When groups are overloaded, use the avg_load to ensure fairness
* between tasks.
*/
if (local->group_type == group_overloaded) {
/*
* If the local group is more loaded than the selected
* busiest group don't try to pull any tasks.
*/
if (local->avg_load >= busiest->avg_load)
goto out_balanced;
/* XXX broken for overlapping NUMA groups */
sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
sds.total_capacity;
/*
* Don't pull any tasks if this group is already above the
* domain average load.
*/
if (local->avg_load >= sds.avg_load)
goto out_balanced;
/*
* If the busiest group is more loaded, use imbalance_pct to be
* conservative.
*/
if (100 * busiest->avg_load <=
env->sd->imbalance_pct * local->avg_load)
goto out_balanced;
}
/*
* Try to move all excess tasks to a sibling domain of the busiest
* group's child domain.
*/
if (sds.prefer_sibling && local->group_type == group_has_spare &&
sibling_imbalance(env, &sds, busiest, local) > 1)
goto force_balance;
if (busiest->group_type != group_overloaded) {
if (env->idle == CPU_NOT_IDLE) {
/*
* If the busiest group is not overloaded (and as a
* result the local one too) but this CPU is already
* busy, let another idle CPU try to pull task.
*/
goto out_balanced;
}
if (busiest->group_type == group_smt_balance &&
smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
/* Let non SMT CPU pull from SMT CPU sharing with sibling */
goto force_balance;
}
if (busiest->group_weight > 1 &&
local->idle_cpus <= (busiest->idle_cpus + 1)) {
/*
* If the busiest group is not overloaded
* and there is no imbalance between this and busiest
* group wrt idle CPUs, it is balanced. The imbalance
* becomes significant if the diff is greater than 1
* otherwise we might end up to just move the imbalance
* on another group. Of course this applies only if
* there is more than 1 CPU per group.
*/
goto out_balanced;
}
if (busiest->sum_h_nr_running == 1) {
/*
* busiest doesn't have any tasks waiting to run
*/
goto out_balanced;
}
}
force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(env, &sds);
return env->imbalance ? sds.busiest : NULL;
out_balanced:
env->imbalance = 0;
return NULL;
}
/*
* find_busiest_queue - find the busiest runqueue among the CPUs in the group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
unsigned int busiest_nr = 0;
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
unsigned long capacity, load, util;
unsigned int nr_running;
enum fbq_type rt;
rq = cpu_rq(i);
rt = fbq_classify_rq(rq);
/*
* We classify groups/runqueues into three groups:
* - regular: there are !numa tasks
* - remote: there are numa tasks that run on the 'wrong' node
* - all: there is no distinction
*
* In order to avoid migrating ideally placed numa tasks,
* ignore those when there's better options.
*
* If we ignore the actual busiest queue to migrate another
* task, the next balance pass can still reduce the busiest
* queue by moving tasks around inside the node.
*
* If we cannot move enough load due to this classification
* the next pass will adjust the group classification and
* allow migration of more tasks.
*
* Both cases only affect the total convergence complexity.
*/
if (rt > env->fbq_type)
continue;
nr_running = rq->cfs.h_nr_running;
if (!nr_running)
continue;
capacity = capacity_of(i);
/*
* For ASYM_CPUCAPACITY domains, don't pick a CPU that could
* eventually lead to active_balancing high->low capacity.
* Higher per-CPU capacity is considered better than balancing
* average load.
*/
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
!capacity_greater(capacity_of(env->dst_cpu), capacity) &&
nr_running == 1)
continue;
/*
* Make sure we only pull tasks from a CPU of lower priority
* when balancing between SMT siblings.
*
* If balancing between cores, let lower priority CPUs help
* SMT cores with more than one busy sibling.
*/
if ((env->sd->flags & SD_ASYM_PACKING) &&
sched_use_asym_prio(env->sd, i) &&
sched_asym_prefer(i, env->dst_cpu) &&
nr_running == 1)
continue;
switch (env->migration_type) {
case migrate_load:
/*
* When comparing with load imbalance, use cpu_load()
* which is not scaled with the CPU capacity.
*/
load = cpu_load(rq);
if (nr_running == 1 && load > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
break;
/*
* For the load comparisons with the other CPUs,
* consider the cpu_load() scaled with the CPU
* capacity, so that the load can be moved away
* from the CPU that is potentially running at a
* lower capacity.
*
* Thus we're looking for max(load_i / capacity_i),
* crosswise multiplication to rid ourselves of the
* division works out to:
* load_i * capacity_j > load_j * capacity_i;
* where j is our previous maximum.
*/
if (load * busiest_capacity > busiest_load * capacity) {
busiest_load = load;
busiest_capacity = capacity;
busiest = rq;
}
break;
case migrate_util:
util = cpu_util_cfs_boost(i);
/*
* Don't try to pull utilization from a CPU with one
* running task. Whatever its utilization, we will fail
* detach the task.
*/
if (nr_running <= 1)
continue;
if (busiest_util < util) {
busiest_util = util;
busiest = rq;
}
break;
case migrate_task:
if (busiest_nr < nr_running) {
busiest_nr = nr_running;
busiest = rq;
}
break;
case migrate_misfit:
/*
* For ASYM_CPUCAPACITY domains with misfit tasks we
* simply seek the "biggest" misfit task.
*/
if (rq->misfit_task_load > busiest_load) {
busiest_load = rq->misfit_task_load;
busiest = rq;
}
break;
}
}
return busiest;
}
/*
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
* so long as it is large enough.
*/
#define MAX_PINNED_INTERVAL 512
static inline bool
asym_active_balance(struct lb_env *env)
{
/*
* ASYM_PACKING needs to force migrate tasks from busy but lower
* priority CPUs in order to pack all tasks in the highest priority
* CPUs. When done between cores, do it only if the whole core if the
* whole core is idle.
*
* If @env::src_cpu is an SMT core with busy siblings, let
* the lower priority @env::dst_cpu help it. Do not follow
* CPU priority.
*/
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
sched_use_asym_prio(env->sd, env->dst_cpu) &&
(sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
!sched_use_asym_prio(env->sd, env->src_cpu));
}
static inline bool
imbalanced_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
/*
* The imbalanced case includes the case of pinned tasks preventing a fair
* distribution of the load on the system but also the even distribution of the
* threads on a system with spare capacity
*/
if ((env->migration_type == migrate_task) &&
(sd->nr_balance_failed > sd->cache_nice_tries+2))
return 1;
return 0;
}
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (asym_active_balance(env))
return 1;
if (imbalanced_active_balance(env))
return 1;
/*
* The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
* It's worth migrating the task if the src_cpu's capacity is reduced
* because of other sched_class or IRQs if more capacity stays
* available on dst_cpu.
*/
if ((env->idle != CPU_NOT_IDLE) &&
(env->src_rq->cfs.h_nr_running == 1)) {
if ((check_cpu_capacity(env->src_rq, sd)) &&
(capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
return 1;
}
if (env->migration_type == migrate_misfit)
return 1;
return 0;
}
static int active_load_balance_cpu_stop(void *data);
static int should_we_balance(struct lb_env *env)
{
struct cpumask *swb_cpus = this_cpu_cpumask_var_ptr(should_we_balance_tmpmask);
struct sched_group *sg = env->sd->groups;
int cpu, idle_smt = -1;
/*
* Ensure the balancing environment is consistent; can happen
* when the softirq triggers 'during' hotplug.
*/
if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
return 0;
/*
* In the newly idle case, we will allow all the CPUs
* to do the newly idle load balance.
*
* However, we bail out if we already have tasks or a wakeup pending,
* to optimize wakeup latency.
*/
if (env->idle == CPU_NEWLY_IDLE) {
if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
return 0;
return 1;
}
cpumask_copy(swb_cpus, group_balance_mask(sg));
/* Try to find first idle CPU */
for_each_cpu_and(cpu, swb_cpus, env->cpus) {
if (!idle_cpu(cpu))
continue;
/*
* Don't balance to idle SMT in busy core right away when
* balancing cores, but remember the first idle SMT CPU for
* later consideration. Find CPU on an idle core first.
*/
if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
if (idle_smt == -1)
idle_smt = cpu;
/*
* If the core is not idle, and first SMT sibling which is
* idle has been found, then its not needed to check other
* SMT siblings for idleness:
*/
#ifdef CONFIG_SCHED_SMT
cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
#endif
continue;
}
/* Are we the first idle CPU? */
return cpu == env->dst_cpu;
}
if (idle_smt == env->dst_cpu)
return true;
/* Are we the first CPU of this group ? */
return group_balance_cpu(sg) == env->dst_cpu;
}
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
int *continue_balancing)
{
int ld_moved, cur_ld_moved, active_balance = 0;
struct sched_domain *sd_parent = sd->parent;
struct sched_group *group;
struct rq *busiest;
struct rq_flags rf;
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
struct lb_env env = {
.sd = sd,
.dst_cpu = this_cpu,
.dst_rq = this_rq,
.dst_grpmask = group_balance_mask(sd->groups),
.idle = idle,
.loop_break = SCHED_NR_MIGRATE_BREAK,
.cpus = cpus,
.fbq_type = all,
.tasks = LIST_HEAD_INIT(env.tasks),
};
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
schedstat_inc(sd->lb_count[idle]);
redo:
if (!should_we_balance(&env)) {
*continue_balancing = 0;
goto out_balanced;
}
group = find_busiest_group(&env);
if (!group) {
schedstat_inc(sd->lb_nobusyg[idle]);
goto out_balanced;
}
busiest = find_busiest_queue(&env, group);
if (!busiest) {
schedstat_inc(sd->lb_nobusyq[idle]);
goto out_balanced;
}
WARN_ON_ONCE(busiest == env.dst_rq);
schedstat_add(sd->lb_imbalance[idle], env.imbalance);
env.src_cpu = busiest->cpu;
env.src_rq = busiest;
ld_moved = 0;
/* Clear this flag as soon as we find a pullable task */
env.flags |= LBF_ALL_PINNED;
if (busiest->nr_running > 1) {
/*
* Attempt to move tasks. If find_busiest_group has found
* an imbalance but busiest->nr_running <= 1, the group is
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance:
rq_lock_irqsave(busiest, &rf);
update_rq_clock(busiest);
/*
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
*/
cur_ld_moved = detach_tasks(&env);
/*
* We've detached some tasks from busiest_rq. Every
* task is masked "TASK_ON_RQ_MIGRATING", so we can safely
* unlock busiest->lock, and we are able to be sure
* that nobody can manipulate the tasks in parallel.
* See task_rq_lock() family for the details.
*/
rq_unlock(busiest, &rf);
if (cur_ld_moved) {
attach_tasks(&env);
ld_moved += cur_ld_moved;
}
local_irq_restore(rf.flags);
if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK;
/* Stop if we tried all running tasks */
if (env.loop < busiest->nr_running)
goto more_balance;
}
/*
* Revisit (affine) tasks on src_cpu that couldn't be moved to
* us and move them to an alternate dst_cpu in our sched_group
* where they can run. The upper limit on how many times we
* iterate on same src_cpu is dependent on number of CPUs in our
* sched_group.
*
* This changes load balance semantics a bit on who can move
* load to a given_cpu. In addition to the given_cpu itself
* (or a ilb_cpu acting on its behalf where given_cpu is
* nohz-idle), we now have balance_cpu in a position to move
* load to given_cpu. In rare situations, this may cause
* conflicts (balance_cpu and given_cpu/ilb_cpu deciding
* _independently_ and at _same_ time to move some load to
* given_cpu) causing excess load to be moved to given_cpu.
* This however should not happen so much in practice and
* moreover subsequent load balance cycles should correct the
* excess load moved.
*/
if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
/* Prevent to re-select dst_cpu via env's CPUs */
__cpumask_clear_cpu(env.dst_cpu, env.cpus);
env.dst_rq = cpu_rq(env.new_dst_cpu);
env.dst_cpu = env.new_dst_cpu;
env.flags &= ~LBF_DST_PINNED;
env.loop = 0;
env.loop_break = SCHED_NR_MIGRATE_BREAK;
/*
* Go back to "more_balance" rather than "redo" since we
* need to continue with same src_cpu.
*/
goto more_balance;
}
/*
* We failed to reach balance because of affinity.
*/
if (sd_parent) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
*group_imbalance = 1;
}
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(env.flags & LBF_ALL_PINNED)) {
__cpumask_clear_cpu(cpu_of(busiest), cpus);
/*
* Attempting to continue load balancing at the current
* sched_domain level only makes sense if there are
* active CPUs remaining as possible busiest CPUs to
* pull load from which are not contained within the
* destination group that is receiving any migrated
* load.
*/
if (!cpumask_subset(cpus, env.dst_grpmask)) {
env.loop = 0;
env.loop_break = SCHED_NR_MIGRATE_BREAK;
goto redo;
}
goto out_all_pinned;
}
}
if (!ld_moved) {
schedstat_inc(sd->lb_failed[idle]);
/*
* Increment the failure counter only on periodic balance.
* We do not want newidle balance, which can be very
* frequent, pollute the failure counter causing
* excessive cache_hot migrations and active balances.
*/
if (idle != CPU_NEWLY_IDLE)
sd->nr_balance_failed++;
if (need_active_balance(&env)) {
unsigned long flags;
raw_spin_rq_lock_irqsave(busiest, flags);
/*
* Don't kick the active_load_balance_cpu_stop,
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
raw_spin_rq_unlock_irqrestore(busiest, flags);
goto out_one_pinned;
}
/* Record that we found at least one task that could run on this_cpu */
env.flags &= ~LBF_ALL_PINNED;
/*
* ->active_balance synchronizes accesses to
* ->active_balance_work. Once set, it's cleared
* only after active load balance is finished.
*/
if (!busiest->active_balance) {
busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
active_balance = 1;
}
raw_spin_rq_unlock_irqrestore(busiest, flags);
if (active_balance) {
stop_one_cpu_nowait(cpu_of(busiest),
active_load_balance_cpu_stop, busiest,
&busiest->active_balance_work);
}
}
} else {
sd->nr_balance_failed = 0;
}
if (likely(!active_balance) || need_active_balance(&env)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
}
goto out;
out_balanced:
/*
* We reach balance although we may have faced some affinity
* constraints. Clear the imbalance flag only if other tasks got
* a chance to move and fix the imbalance.
*/
if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if (*group_imbalance)
*group_imbalance = 0;
}
out_all_pinned:
/*
* We reach balance because all tasks are pinned at this level so
* we can't migrate them. Let the imbalance flag set so parent level
* can try to migrate them.
*/
schedstat_inc(sd->lb_balanced[idle]);
sd->nr_balance_failed = 0;
out_one_pinned:
ld_moved = 0;
/*
* newidle_balance() disregards balance intervals, so we could
* repeatedly reach this code, which would lead to balance_interval
* skyrocketing in a short amount of time. Skip the balance_interval
* increase logic to avoid that.
*/
if (env.idle == CPU_NEWLY_IDLE)
goto out;
/* tune up the balancing interval */
if ((env.flags & LBF_ALL_PINNED &&
sd->balance_interval < MAX_PINNED_INTERVAL) ||
sd->balance_interval < sd->max_interval)
sd->balance_interval *= 2;
out:
return ld_moved;
}
static inline unsigned long
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
{
unsigned long interval = sd->balance_interval;
if (cpu_busy)
interval *= sd->busy_factor;
/* scale ms to jiffies */
interval = msecs_to_jiffies(interval);
/*
* Reduce likelihood of busy balancing at higher domains racing with
* balancing at lower domains by preventing their balancing periods
* from being multiples of each other.
*/
if (cpu_busy)
interval -= 1;
interval = clamp(interval, 1UL, max_load_balance_interval);
return interval;
}
static inline void
update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
{
unsigned long interval, next;
/* used by idle balance, so cpu_busy = 0 */
interval = get_sd_balance_interval(sd, 0);
next = sd->last_balance + interval;
if (time_after(*next_balance, next))
*next_balance = next;
}
/*
* active_load_balance_cpu_stop is run by the CPU stopper. It pushes
* running tasks off the busiest CPU onto idle CPUs. It requires at
* least 1 task to be running on each physical CPU where possible, and
* avoids physical / logical imbalances.
*/
static int active_load_balance_cpu_stop(void *data)
{
struct rq *busiest_rq = data;
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
struct task_struct *p = NULL;
struct rq_flags rf;
rq_lock_irq(busiest_rq, &rf);
/*
* Between queueing the stop-work and running it is a hole in which
* CPUs can become inactive. We should not move tasks from or to
* inactive CPUs.
*/
if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
goto out_unlock;
/* Make sure the requested CPU hasn't gone down in the meantime: */
if (unlikely(busiest_cpu != smp_processor_id() ||
!busiest_rq->active_balance))
goto out_unlock;
/* Is there any task to move? */
if (busiest_rq->nr_running <= 1)
goto out_unlock;
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-CPU setup.
*/
WARN_ON_ONCE(busiest_rq == target_rq);
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
break;
}
if (likely(sd)) {
struct lb_env env = {
.sd = sd,
.dst_cpu = target_cpu,
.dst_rq = target_rq,
.src_cpu = busiest_rq->cpu,
.src_rq = busiest_rq,
.idle = CPU_IDLE,
.flags = LBF_ACTIVE_LB,
};
schedstat_inc(sd->alb_count);
update_rq_clock(busiest_rq);
p = detach_one_task(&env);
if (p) {
schedstat_inc(sd->alb_pushed);
/* Active balancing done, reset the failure counter. */
sd->nr_balance_failed = 0;
} else {
schedstat_inc(sd->alb_failed);
}
}
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
rq_unlock(busiest_rq, &rf);
if (p)
attach_one_task(target_rq, p);
local_irq_enable();
return 0;
}
static DEFINE_SPINLOCK(balancing);
/*
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.
*/
void update_max_interval(void)
{
max_load_balance_interval = HZ*num_online_cpus()/10;
}
static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
{
if (cost > sd->max_newidle_lb_cost) {
/*
* Track max cost of a domain to make sure to not delay the
* next wakeup on the CPU.
*/
sd->max_newidle_lb_cost = cost;
sd->last_decay_max_lb_cost = jiffies;
} else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
/*
* Decay the newidle max times by ~1% per second to ensure that
* it is not outdated and the current max cost is actually
* shorter.
*/
sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
sd->last_decay_max_lb_cost = jiffies;
return true;
}
return false;
}
/*
* It checks each scheduling domain to see if it is due to be balanced,
* and initiates a balancing operation if so.
*
* Balancing parameters are set up in init_sched_domains.
*/
static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
{
int continue_balancing = 1;
int cpu = rq->cpu;
int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
unsigned long interval;
struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
int need_serialize, need_decay = 0;
u64 max_cost = 0;
rcu_read_lock();
for_each_domain(cpu, sd) {
/*
* Decay the newidle max times here because this is a regular
* visit to all the domains.
*/
need_decay = update_newidle_cost(sd, 0);
max_cost += sd->max_newidle_lb_cost;
/*
* Stop the load balance at this level. There is another
* CPU in our sched group which is doing load balancing more
* actively.
*/
if (!continue_balancing) {
if (need_decay)
continue;
break;
}
interval = get_sd_balance_interval(sd, busy);
need_serialize = sd->flags & SD_SERIALIZE;
if (need_serialize) {
if (!spin_trylock(&balancing))
goto out;
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
* The LBF_DST_PINNED logic could have changed
* env->dst_cpu, so we can't know our idle
* state even if we migrated tasks. Update it.
*/
idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
}
sd->last_balance = jiffies;
interval = get_sd_balance_interval(sd, busy);
}
if (need_serialize)
spin_unlock(&balancing);
out:
if (time_after(next_balance, sd->last_balance + interval)) {
next_balance = sd->last_balance + interval;
update_next_balance = 1;
}
}
if (need_decay) {
/*
* Ensure the rq-wide value also decays but keep it at a
* reasonable floor to avoid funnies with rq->avg_idle.
*/
rq->max_idle_balance_cost =
max((u64)sysctl_sched_migration_cost, max_cost);
}
rcu_read_unlock();
/*
* next_balance will be updated only when there is a need.
* When the cpu is attached to null domain for ex, it will not be
* updated.
*/
if (likely(update_next_balance))
rq->next_balance = next_balance;
}
static inline int on_null_domain(struct rq *rq)
{
return unlikely(!rcu_dereference_sched(rq->sd));
}
#ifdef CONFIG_NO_HZ_COMMON
/*
* idle load balancing details
* - When one of the busy CPUs notice that there may be an idle rebalancing
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
* - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
* anywhere yet.
*/
static inline int find_new_ilb(void)
{
int ilb;
const struct cpumask *hk_mask;
hk_mask = housekeeping_cpumask(HK_TYPE_MISC);
for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
if (ilb == smp_processor_id())
continue;
if (idle_cpu(ilb))
return ilb;
}
return nr_cpu_ids;
}
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick any
* idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
*/
static void kick_ilb(unsigned int flags)
{
int ilb_cpu;
/*
* Increase nohz.next_balance only when if full ilb is triggered but
* not if we only update stats.
*/
if (flags & NOHZ_BALANCE_KICK)
nohz.next_balance = jiffies+1;
ilb_cpu = find_new_ilb();
if (ilb_cpu >= nr_cpu_ids)
return;
/*
* Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
* the first flag owns it; cleared by nohz_csd_func().
*/
flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
if (flags & NOHZ_KICK_MASK)
return;
/*
* This way we generate an IPI on the target CPU which
* is idle. And the softirq performing nohz idle load balance
* will be run before returning from the IPI.
*/
smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
}
/*
* Current decision point for kicking the idle load balancer in the presence
* of idle CPUs in the system.
*/
static void nohz_balancer_kick(struct rq *rq)
{
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
int nr_busy, i, cpu = rq->cpu;
unsigned int flags = 0;
if (unlikely(rq->idle_balance))
return;
/*
* We may be recently in ticked or tickless idle mode. At the first
* busy tick after returning from idle, we will update the busy stats.
*/
nohz_balance_exit_idle(rq);
/*
* None are in tickless mode and hence no need for NOHZ idle load
* balancing.
*/
if (likely(!atomic_read(&nohz.nr_cpus)))
return;
if (READ_ONCE(nohz.has_blocked) &&
time_after(now, READ_ONCE(nohz.next_blocked)))
flags = NOHZ_STATS_KICK;
if (time_before(now, nohz.next_balance))
goto out;
if (rq->nr_running >= 2) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto out;
}
rcu_read_lock();
sd = rcu_dereference(rq->sd);
if (sd) {
/*
* If there's a CFS task and the current CPU has reduced
* capacity; kick the ILB to see if there's a better CPU to run
* on.
*/
if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
}
sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
if (sd) {
/*
* When ASYM_PACKING; see if there's a more preferred CPU
* currently idle; in which case, kick the ILB to move tasks
* around.
*
* When balancing betwen cores, all the SMT siblings of the
* preferred CPU must be idle.
*/
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
if (sched_use_asym_prio(sd, i) &&
sched_asym_prefer(i, cpu)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
}
}
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
if (sd) {
/*
* When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
* to run the misfit task on.
*/
if (check_misfit_status(rq, sd)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
/*
* For asymmetric systems, we do not want to nicely balance
* cache use, instead we want to embrace asymmetry and only
* ensure tasks have enough CPU capacity.
*
* Skip the LLC logic because it's not relevant in that case.
*/
goto unlock;
}
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds) {
/*
* If there is an imbalance between LLC domains (IOW we could
* increase the overall cache use), we need some less-loaded LLC
* domain to pull some load. Likewise, we may need to spread
* load within the current LLC domain (e.g. packed SMT cores but
* other CPUs are idle). We can't really know from here how busy
* the others are - so just get a nohz balance going if it looks
* like this LLC domain has tasks we could move.
*/
nr_busy = atomic_read(&sds->nr_busy_cpus);
if (nr_busy > 1) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
}
unlock:
rcu_read_unlock();
out:
if (READ_ONCE(nohz.needs_update))
flags |= NOHZ_NEXT_KICK;
if (flags)
kick_ilb(flags);
}
static void set_cpu_sd_state_busy(int cpu)
{
struct sched_domain *sd;
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
if (!sd || !sd->nohz_idle)
goto unlock;
sd->nohz_idle = 0;
atomic_inc(&sd->shared->nr_busy_cpus);
unlock:
rcu_read_unlock();
}
void nohz_balance_exit_idle(struct rq *rq)
{
SCHED_WARN_ON(rq != this_rq());
if (likely(!rq->nohz_tick_stopped))
return;
rq->nohz_tick_stopped = 0;
cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
atomic_dec(&nohz.nr_cpus);
set_cpu_sd_state_busy(rq->cpu);
}
static void set_cpu_sd_state_idle(int cpu)
{
struct sched_domain *sd;
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
if (!sd || sd->nohz_idle)
goto unlock;
sd->nohz_idle = 1;
atomic_dec(&sd->shared->nr_busy_cpus);
unlock:
rcu_read_unlock();
}
/*
* This routine will record that the CPU is going idle with tick stopped.
* This info will be used in performing idle load balancing in the future.
*/
void nohz_balance_enter_idle(int cpu)
{
struct rq *rq = cpu_rq(cpu);
SCHED_WARN_ON(cpu != smp_processor_id());
/* If this CPU is going down, then nothing needs to be done: */
if (!cpu_active(cpu))
return;
/* Spare idle load balancing on CPUs that don't want to be disturbed: */
if (!housekeeping_cpu(cpu, HK_TYPE_SCHED))
return;
/*
* Can be set safely without rq->lock held
* If a clear happens, it will have evaluated last additions because
* rq->lock is held during the check and the clear
*/
rq->has_blocked_load = 1;
/*
* The tick is still stopped but load could have been added in the
* meantime. We set the nohz.has_blocked flag to trig a check of the
* *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
* of nohz.has_blocked can only happen after checking the new load
*/
if (rq->nohz_tick_stopped)
goto out;
/* If we're a completely isolated CPU, we don't play: */
if (on_null_domain(rq))
return;
rq->nohz_tick_stopped = 1;
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
/*
* Ensures that if nohz_idle_balance() fails to observe our
* @idle_cpus_mask store, it must observe the @has_blocked
* and @needs_update stores.
*/
smp_mb__after_atomic();
set_cpu_sd_state_idle(cpu);
WRITE_ONCE(nohz.needs_update, 1);
out:
/*
* Each time a cpu enter idle, we assume that it has blocked load and
* enable the periodic update of the load of idle cpus
*/
WRITE_ONCE(nohz.has_blocked, 1);
}
static bool update_nohz_stats(struct rq *rq)
{
unsigned int cpu = rq->cpu;
if (!rq->has_blocked_load)
return false;
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return false;
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
return true;
update_blocked_averages(cpu);
return rq->has_blocked_load;
}
/*
* Internal function that runs load balance for all idle cpus. The load balance
* can be a simple update of blocked load or a complete load balance with
* tasks movement depending of flags.
*/
static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
{
/* Earliest time when we have to do rebalance again */
unsigned long now = jiffies;
unsigned long next_balance = now + 60*HZ;
bool has_blocked_load = false;
int update_next_balance = 0;
int this_cpu = this_rq->cpu;
int balance_cpu;
struct rq *rq;
SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
/*
* We assume there will be no idle load after this update and clear
* the has_blocked flag. If a cpu enters idle in the mean time, it will
* set the has_blocked flag and trigger another update of idle load.
* Because a cpu that becomes idle, is added to idle_cpus_mask before
* setting the flag, we are sure to not clear the state and not
* check the load of an idle cpu.
*
* Same applies to idle_cpus_mask vs needs_update.
*/
if (flags & NOHZ_STATS_KICK)
WRITE_ONCE(nohz.has_blocked, 0);
if (flags & NOHZ_NEXT_KICK)
WRITE_ONCE(nohz.needs_update, 0);
/*
* Ensures that if we miss the CPU, we must see the has_blocked
* store from nohz_balance_enter_idle().
*/
smp_mb();
/*
* Start with the next CPU after this_cpu so we will end with this_cpu and let a
* chance for other idle cpu to pull load.
*/
for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
if (!idle_cpu(balance_cpu))
continue;
/*
* If this CPU gets work to do, stop the load balancing
* work being done for other CPUs. Next load
* balancing owner will pick it up.
*/
if (need_resched()) {
if (flags & NOHZ_STATS_KICK)
has_blocked_load = true;
if (flags & NOHZ_NEXT_KICK)
WRITE_ONCE(nohz.needs_update, 1);
goto abort;
}
rq = cpu_rq(balance_cpu);
if (flags & NOHZ_STATS_KICK)
has_blocked_load |= update_nohz_stats(rq);
/*
* If time for next balance is due,
* do the balance.
*/
if (time_after_eq(jiffies, rq->next_balance)) {
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
rq_unlock_irqrestore(rq, &rf);
if (flags & NOHZ_BALANCE_KICK)
rebalance_domains(rq, CPU_IDLE);
}
if (time_after(next_balance, rq->next_balance)) {
next_balance = rq->next_balance;
update_next_balance = 1;
}
}
/*
* next_balance will be updated only when there is a need.
* When the CPU is attached to null domain for ex, it will not be
* updated.
*/
if (likely(update_next_balance))
nohz.next_balance = next_balance;
if (flags & NOHZ_STATS_KICK)
WRITE_ONCE(nohz.next_blocked,
now + msecs_to_jiffies(LOAD_AVG_PERIOD));
abort:
/* There is still blocked load, enable periodic update */
if (has_blocked_load)
WRITE_ONCE(nohz.has_blocked, 1);
}
/*
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
* rebalancing for all the cpus for whom scheduler ticks are stopped.
*/
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
{
unsigned int flags = this_rq->nohz_idle_balance;
if (!flags)
return false;
this_rq->nohz_idle_balance = 0;
if (idle != CPU_IDLE)
return false;
_nohz_idle_balance(this_rq, flags);
return true;
}
/*
* Check if we need to run the ILB for updating blocked load before entering
* idle state.
*/
void nohz_run_idle_balance(int cpu)
{
unsigned int flags;
flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
/*
* Update the blocked load only if no SCHED_SOFTIRQ is about to happen
* (ie NOHZ_STATS_KICK set) and will do the same.
*/
if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
_nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK);
}
static void nohz_newidle_balance(struct rq *this_rq)
{
int this_cpu = this_rq->cpu;
/*
* This CPU doesn't want to be disturbed by scheduler
* housekeeping
*/
if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
return;
/* Will wake up very soon. No time for doing anything else*/
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
/* Don't need to update blocked load of idle CPUs*/
if (!READ_ONCE(nohz.has_blocked) ||
time_before(jiffies, READ_ONCE(nohz.next_blocked)))
return;
/*
* Set the need to trigger ILB in order to update blocked load
* before entering idle state.
*/
atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
}
#else /* !CONFIG_NO_HZ_COMMON */
static inline void nohz_balancer_kick(struct rq *rq) { }
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
{
return false;
}
static inline void nohz_newidle_balance(struct rq *this_rq) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* newidle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*
* Returns:
* < 0 - we released the lock and there are !fair tasks present
* 0 - failed, no new tasks
* > 0 - success, new (fair) tasks present
*/
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
{
unsigned long next_balance = jiffies + HZ;
int this_cpu = this_rq->cpu;
u64 t0, t1, curr_cost = 0;
struct sched_domain *sd;
int pulled_task = 0;
update_misfit_status(NULL, this_rq);
/*
* There is a task waiting to run. No need to search for one.
* Return 0; the task will be enqueued when switching to idle.
*/
if (this_rq->ttwu_pending)
return 0;
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
*/
this_rq->idle_stamp = rq_clock(this_rq);
/*
* Do not pull tasks towards !active CPUs...
*/
if (!cpu_active(this_cpu))
return 0;
/*
* This is OK, because current is on_cpu, which avoids it being picked
* for load-balance and preemption/IRQs are still disabled avoiding
* further scheduler activity on it and we're being very careful to
* re-start the picking loop.
*/
rq_unpin_lock(this_rq, rf);
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);
if (!READ_ONCE(this_rq->rd->overload) ||
(sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
if (sd)
update_next_balance(sd, &next_balance);
rcu_read_unlock();
goto out;
}
rcu_read_unlock();
raw_spin_rq_unlock(this_rq);
t0 = sched_clock_cpu(this_cpu);
update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
int continue_balancing = 1;
u64 domain_cost;
update_next_balance(sd, &next_balance);
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
break;
if (sd->flags & SD_BALANCE_NEWIDLE) {
pulled_task = load_balance(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
&continue_balancing);
t1 = sched_clock_cpu(this_cpu);
domain_cost = t1 - t0;
update_newidle_cost(sd, domain_cost);
curr_cost += domain_cost;
t0 = t1;
}
/*
* Stop searching for tasks to pull if there are
* now runnable tasks on this rq.
*/
if (pulled_task || this_rq->nr_running > 0 ||
this_rq->ttwu_pending)
break;
}
rcu_read_unlock();
raw_spin_rq_lock(this_rq);
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
/*
* While browsing the domains, we released the rq lock, a task could
* have been enqueued in the meantime. Since we're not going idle,
* pretend we pulled a task.
*/
if (this_rq->cfs.h_nr_running && !pulled_task)
pulled_task = 1;
/* Is there a task of a high priority class? */
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
pulled_task = -1;
out:
/* Move the next balance forward */
if (time_after(this_rq->next_balance, next_balance))
this_rq->next_balance = next_balance;
if (pulled_task)
this_rq->idle_stamp = 0;
else
nohz_newidle_balance(this_rq);
rq_repin_lock(this_rq, rf);
return pulled_task;
}
/*
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
/*
* If this CPU has a pending nohz_balance_kick, then do the
* balancing on behalf of the other idle CPUs whose ticks are
* stopped. Do nohz_idle_balance *before* rebalance_domains to
* give the idle CPUs a chance to load balance. Else we may
* load balance only within the local sched_domain hierarchy
* and abort nohz_idle_balance altogether if we pull some load.
*/
if (nohz_idle_balance(this_rq, idle))
return;
/* normal load balance */
update_blocked_averages(this_rq->cpu);
rebalance_domains(this_rq, idle);
}
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*/
void trigger_load_balance(struct rq *rq)
{
/*
* Don't need to rebalance while attached to NULL domain or
* runqueue CPU is not active
*/
if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
return;
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
nohz_balancer_kick(rq);
}
static void rq_online_fair(struct rq *rq)
{
update_sysctl();
update_runtime_enabled(rq);
}
static void rq_offline_fair(struct rq *rq)
{
update_sysctl();
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_CORE
static inline bool
__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
{
u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
u64 slice = se->slice;
return (rtime * min_nr_tasks > slice);
}
#define MIN_NR_TASKS_DURING_FORCEIDLE 2
static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
{
if (!sched_core_enabled(rq))
return;
/*
* If runqueue has only one task which used up its slice and
* if the sibling is forced idle, then trigger schedule to
* give forced idle task a chance.
*
* sched_slice() considers only this active rq and it gets the
* whole slice. But during force idle, we have siblings acting
* like a single runqueue and hence we need to consider runnable
* tasks on this CPU and the forced idle CPU. Ideally, we should
* go through the forced idle rq, but that would be a perf hit.
* We can assume that the forced idle CPU has at least
* MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
* if we need to give up the CPU.
*/
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
resched_curr(rq);
}
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (forceidle) {
if (cfs_rq->forceidle_seq == fi_seq)
break;
cfs_rq->forceidle_seq = fi_seq;
}
cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
}
}
void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
{
struct sched_entity *se = &p->se;
if (p->sched_class != &fair_sched_class)
return;
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi)
{
struct rq *rq = task_rq(a);
const struct sched_entity *sea = &a->se;
const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;
SCHED_WARN_ON(task_rq(b)->core != rq->core);
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Find an se in the hierarchy for tasks a and b, such that the se's
* are immediate siblings.
*/
while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
int sea_depth = sea->depth;
int seb_depth = seb->depth;
if (sea_depth >= seb_depth)
sea = parent_entity(sea);
if (sea_depth <= seb_depth)
seb = parent_entity(seb);
}
se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
cfs_rqa = sea->cfs_rq;
cfs_rqb = seb->cfs_rq;
#else
cfs_rqa = &task_rq(a)->cfs;
cfs_rqb = &task_rq(b)->cfs;
#endif
/*
* Find delta after normalizing se's vruntime with its cfs_rq's
* min_vruntime_fi, which would have been updated in prior calls
* to se_fi_update().
*/
delta = (s64)(sea->vruntime - seb->vruntime) +
(s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
return delta > 0;
}
static int task_is_throttled_fair(struct task_struct *p, int cpu)
{
struct cfs_rq *cfs_rq;
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq = task_group(p)->cfs_rq[cpu];
#else
cfs_rq = &cpu_rq(cpu)->cfs;
#endif
return throttled_hierarchy(cfs_rq);
}
#else
static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
#endif
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
entity_tick(cfs_rq, se, queued);
}
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
task_tick_core(rq, curr);
}
/*
* called on fork with the child task as argument from the parent's context
* - child not yet on the tasklist
* - preemption disabled
*/
static void task_fork_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se, *curr;
struct cfs_rq *cfs_rq;
struct rq *rq = this_rq();
struct rq_flags rf;
rq_lock(rq, &rf);
update_rq_clock(rq);
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
/*
* Priority of the task has changed. Check to see if we preempt
* the current task.
*/
static void
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
if (rq->cfs.nr_running == 1)
return;
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
resched_curr(rq);
} else
check_preempt_curr(rq, p, 0);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Propagate the changes of the sched_entity across the tg tree to make it
* visible to the root
*/
static void propagate_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (cfs_rq_throttled(cfs_rq))
return;
if (!throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
/* Start to propagate at parent */
se = se->parent;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
if (cfs_rq_throttled(cfs_rq))
break;
if (!throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
}
#else
static void propagate_entity_cfs_rq(struct sched_entity *se) { }
#endif
static void detach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
#ifdef CONFIG_SMP
/*
* In case the task sched_avg hasn't been attached:
* - A forked task which hasn't been woken up by wake_up_new_task().
* - A task which has been woken up by try_to_wake_up() but is
* waiting for actually being woken up by sched_ttwu_pending().
*/
if (!se->avg.last_update_time)
return;
#endif
/* Catch up with the cfs_rq and remove our load when we leave */
update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
static void attach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq);
propagate_entity_cfs_rq(se);
}
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
detach_entity_cfs_rq(se);
}
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
attach_entity_cfs_rq(se);
}
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
detach_task_cfs_rq(p);
}
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
attach_task_cfs_rq(p);
if (task_on_rq_queued(p)) {
/*
* We were most likely switched from sched_rt, so
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
if (task_current(rq, p))
resched_curr(rq);
else
check_preempt_curr(rq, p, 0);
}
}
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_entity *se = &p->se;
#ifdef CONFIG_SMP
if (task_on_rq_queued(p)) {
/*
* Move the next running task to the front of the list, so our
* cfs_tasks list becomes MRU one.
*/
list_move(&se->group_node, &rq->cfs_tasks);
}
#endif
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
set_next_entity(cfs_rq, se);
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
}
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
#ifdef CONFIG_SMP
raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_change_group_fair(struct task_struct *p)
{
/*
* We couldn't detach or attach a forked task which
* hasn't been woken up by wake_up_new_task().
*/
if (READ_ONCE(p->__state) == TASK_NEW)
return;
detach_task_cfs_rq(p);
#ifdef CONFIG_SMP
/* Tell se's cfs_rq has been changed -- migrated */
p->se.avg.last_update_time = 0;
#endif
set_task_rq(p, task_cpu(p));
attach_task_cfs_rq(p);
}
void free_fair_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
if (tg->se)
kfree(tg->se[i]);
}
kfree(tg->cfs_rq);
kfree(tg->se);
}
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
int i;
tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
if (!tg->se)
goto err;
tg->shares = NICE_0_LOAD;
init_cfs_bandwidth(tg_cfs_bandwidth(tg), tg_cfs_bandwidth(parent));
for_each_possible_cpu(i) {
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
goto err;
se = kzalloc_node(sizeof(struct sched_entity_stats),
GFP_KERNEL, cpu_to_node(i));
if (!se)
goto err_free_rq;
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
}
return 1;
err_free_rq:
kfree(cfs_rq);
err:
return 0;
}
void online_fair_sched_group(struct task_group *tg)
{
struct sched_entity *se;
struct rq_flags rf;
struct rq *rq;
int i;
for_each_possible_cpu(i) {
rq = cpu_rq(i);
se = tg->se[i];
rq_lock_irq(rq, &rf);
update_rq_clock(rq);
attach_entity_cfs_rq(se);
sync_throttle(tg, i);
rq_unlock_irq(rq, &rf);
}
}
void unregister_fair_sched_group(struct task_group *tg)
{
unsigned long flags;
struct rq *rq;
int cpu;
destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(cpu) {
if (tg->se[cpu])
remove_entity_load_avg(tg->se[cpu]);
/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
if (!tg->cfs_rq[cpu]->on_list)
continue;
rq = cpu_rq(cpu);
raw_spin_rq_lock_irqsave(rq, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
raw_spin_rq_unlock_irqrestore(rq, flags);
}
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
cfs_rq->tg = tg;
cfs_rq->rq = rq;
init_cfs_rq_runtime(cfs_rq);
tg->cfs_rq[cpu] = cfs_rq;
tg->se[cpu] = se;
/* se could be NULL for root_task_group */
if (!se)
return;
if (!parent) {
se->cfs_rq = &rq->cfs;
se->depth = 0;
} else {
se->cfs_rq = parent->my_q;
se->depth = parent->depth + 1;
}
se->my_q = cfs_rq;
/* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent;
}
static DEFINE_MUTEX(shares_mutex);
static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
lockdep_assert_held(&shares_mutex);
/*
* We can't change the weight of the root cgroup.
*/
if (!tg->se[0])
return -EINVAL;
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
if (tg->shares == shares)
return 0;
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se = tg->se[i];
struct rq_flags rf;
/* Propagate contribution to hierarchy */
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
update_cfs_group(se);
}
rq_unlock_irqrestore(rq, &rf);
}
return 0;
}
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int ret;
mutex_lock(&shares_mutex);
if (tg_is_idle(tg))
ret = -EINVAL;
else
ret = __sched_group_set_shares(tg, shares);
mutex_unlock(&shares_mutex);
return ret;
}
int sched_group_set_idle(struct task_group *tg, long idle)
{
int i;
if (tg == &root_task_group)
return -EINVAL;
if (idle < 0 || idle > 1)
return -EINVAL;
mutex_lock(&shares_mutex);
if (tg->idle == idle) {
mutex_unlock(&shares_mutex);
return 0;
}
tg->idle = idle;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se = tg->se[i];
struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
long idle_task_delta;
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
grp_cfs_rq->idle = idle;
if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
goto next_cpu;
if (se->on_rq) {
parent_cfs_rq = cfs_rq_of(se);
if (cfs_rq_is_idle(grp_cfs_rq))
parent_cfs_rq->idle_nr_running++;
else
parent_cfs_rq->idle_nr_running--;
}
idle_task_delta = grp_cfs_rq->h_nr_running -
grp_cfs_rq->idle_h_nr_running;
if (!cfs_rq_is_idle(grp_cfs_rq))
idle_task_delta *= -1;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (!se->on_rq)
break;
cfs_rq->idle_h_nr_running += idle_task_delta;
/* Already accounted at parent level and above. */
if (cfs_rq_is_idle(cfs_rq))
break;
}
next_cpu:
rq_unlock_irqrestore(rq, &rf);
}
/* Idle groups have minimum weight. */
if (tg_is_idle(tg))
__sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
else
__sched_group_set_shares(tg, NICE_0_LOAD);
mutex_unlock(&shares_mutex);
return 0;
}
#else /* CONFIG_FAIR_GROUP_SCHED */
void free_fair_sched_group(struct task_group *tg) { }
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
void online_fair_sched_group(struct task_group *tg) { }
void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
struct sched_entity *se = &task->se;
unsigned int rr_interval = 0;
/*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue:
*/
if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(se->slice);
return rr_interval;
}
/*
* All the scheduling class methods:
*/
DEFINE_SCHED_CLASS(fair) = {
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
.yield_to_task = yield_to_task_fair,
.check_preempt_curr = check_preempt_wakeup,
.pick_next_task = __pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
#ifdef CONFIG_SMP
.balance = balance_fair,
.pick_task = pick_task_fair,
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
.task_dead = task_dead_fair,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.task_tick = task_tick_fair,
.task_fork = task_fork_fair,
.prio_changed = prio_changed_fair,
.switched_from = switched_from_fair,
.switched_to = switched_to_fair,
.get_rr_interval = get_rr_interval_fair,
.update_curr = update_curr_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_CORE
.task_is_throttled = task_is_throttled_fair,
#endif
#ifdef CONFIG_UCLAMP_TASK
.uclamp_enabled = 1,
#endif
};
#ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq, *pos;
rcu_read_lock();
for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock();
}
#ifdef CONFIG_NUMA_BALANCING
void show_numa_stats(struct task_struct *p, struct seq_file *m)
{
int node;
unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
struct numa_group *ng;
rcu_read_lock();
ng = rcu_dereference(p->numa_group);
for_each_online_node(node) {
if (p->numa_faults) {
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
}
if (ng) {
gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
print_numa_stats(m, node, tsf, tpf, gsf, gpf);
}
rcu_read_unlock();
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
__init void init_sched_fair_class(void)
{
#ifdef CONFIG_SMP
int i;
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i),
GFP_KERNEL, cpu_to_node(i));
#ifdef CONFIG_CFS_BANDWIDTH
INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);
#endif
}
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
#ifdef CONFIG_NO_HZ_COMMON
nohz.next_balance = jiffies;
nohz.next_blocked = jiffies;
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
#endif
#endif /* SMP */
}
| linux-master | kernel/sched/fair.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic entry points for the idle threads and
* implementation of the idle task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE batch scheduled
* tasks which are handled in sched/fair.c )
*/
/* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[];
/**
* sched_idle_set_state - Record idle state for the current CPU.
* @idle_state: State to record.
*/
void sched_idle_set_state(struct cpuidle_state *idle_state)
{
idle_set_state(this_rq(), idle_state);
}
static int __read_mostly cpu_idle_force_poll;
void cpu_idle_poll_ctrl(bool enable)
{
if (enable) {
cpu_idle_force_poll++;
} else {
cpu_idle_force_poll--;
WARN_ON_ONCE(cpu_idle_force_poll < 0);
}
}
#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
static int __init cpu_idle_poll_setup(char *__unused)
{
cpu_idle_force_poll = 1;
return 1;
}
__setup("nohlt", cpu_idle_poll_setup);
static int __init cpu_idle_nopoll_setup(char *__unused)
{
cpu_idle_force_poll = 0;
return 1;
}
__setup("hlt", cpu_idle_nopoll_setup);
#endif
static noinline int __cpuidle cpu_idle_poll(void)
{
instrumentation_begin();
trace_cpu_idle(0, smp_processor_id());
stop_critical_timings();
ct_cpuidle_enter();
raw_local_irq_enable();
while (!tif_need_resched() &&
(cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
raw_local_irq_disable();
ct_cpuidle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
local_irq_enable();
instrumentation_end();
return 1;
}
/* Weak implementations for optional arch specific functions */
void __weak arch_cpu_idle_prepare(void) { }
void __weak arch_cpu_idle_enter(void) { }
void __weak arch_cpu_idle_exit(void) { }
void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
void __weak arch_cpu_idle(void)
{
cpu_idle_force_poll = 1;
}
/**
* default_idle_call - Default CPU idle routine.
*
* To use when the cpuidle framework cannot be used.
*/
void __cpuidle default_idle_call(void)
{
instrumentation_begin();
if (!current_clr_polling_and_test()) {
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings();
ct_cpuidle_enter();
arch_cpu_idle();
ct_cpuidle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
local_irq_enable();
instrumentation_end();
}
static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
if (current_clr_polling_and_test())
return -EBUSY;
return cpuidle_enter_s2idle(drv, dev);
}
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int next_state)
{
/*
* The idle task must be scheduled, it is pointless to go to idle, just
* update no idle residency and return.
*/
if (current_clr_polling_and_test()) {
dev->last_residency_ns = 0;
local_irq_enable();
return -EBUSY;
}
/*
* Enter the idle state previously returned by the governor decision.
* This function will block until an interrupt occurs and will take
* care of re-enabling the local interrupts
*/
return cpuidle_enter(drv, dev, next_state);
}
/**
* cpuidle_idle_call - the main idle function
*
* NOTE: no locks or semaphores should be used here
*
* On architectures that support TIF_POLLING_NRFLAG, is called with polling
* set, and it returns with polling set. If it ever stops polling, it
* must clear the polling bit.
*/
static void cpuidle_idle_call(void)
{
struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
/*
* Check if the idle task must be rescheduled. If it is the
* case, exit the function after re-enabling the local irq.
*/
if (need_resched()) {
local_irq_enable();
return;
}
/*
* The RCU framework needs to be told that we are entering an idle
* section, so no more rcu read side critical sections and one more
* step to the grace period
*/
if (cpuidle_not_available(drv, dev)) {
tick_nohz_idle_stop_tick();
default_idle_call();
goto exit_idle;
}
/*
* Suspend-to-idle ("s2idle") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only
* activity happens here and in interrupts (if any). In that case bypass
* the cpuidle governor and go straight for the deepest idle state
* available. Possibly also suspend the local tick and the entire
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens.
*/
if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
u64 max_latency_ns;
if (idle_should_enter_s2idle()) {
entered_state = call_cpuidle_s2idle(drv, dev);
if (entered_state > 0)
goto exit_idle;
max_latency_ns = U64_MAX;
} else {
max_latency_ns = dev->forced_idle_latency_limit_ns;
}
tick_nohz_idle_stop_tick();
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
} else {
bool stop_tick = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev, &stop_tick);
if (stop_tick || tick_nohz_tick_stopped())
tick_nohz_idle_stop_tick();
else
tick_nohz_idle_retain_tick();
entered_state = call_cpuidle(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
*/
cpuidle_reflect(dev, entered_state);
}
exit_idle:
__current_set_polling();
/*
* It is up to the idle functions to reenable local interrupts
*/
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();
}
/*
* Generic idle loop implementation
*
* Called with polling cleared.
*/
static void do_idle(void)
{
int cpu = smp_processor_id();
/*
* Check if we need to update blocked load
*/
nohz_run_idle_balance(cpu);
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
* rq->idle). This means that, if rq->idle has the polling bit set,
* then setting need_resched is guaranteed to cause the CPU to
* reschedule.
*/
__current_set_polling();
tick_nohz_idle_enter();
while (!need_resched()) {
rmb();
local_irq_disable();
if (cpu_is_offline(cpu)) {
tick_nohz_idle_stop_tick();
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
arch_cpu_idle_enter();
rcu_nocb_flush_deferred_wakeup();
/*
* In poll mode we reenable interrupts and spin. Also if we
* detected in the wakeup from idle path that the tick
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
cpuidle_idle_call();
}
arch_cpu_idle_exit();
}
/*
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
* be set, propagate it into PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will not have had
* an IPI to fold the state for us.
*/
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();
/*
* We promise to call sched_ttwu_pending() and reschedule if
* need_resched() is set while polling is set. That means that clearing
* polling needs to be visible before doing these things.
*/
smp_mb__after_atomic();
/*
* RCU relies on this call to be done outside of an RCU read-side
* critical section.
*/
flush_smp_call_function_queue();
schedule_idle();
if (unlikely(klp_patch_pending(current)))
klp_update_patch_state(current);
}
bool cpu_in_idle(unsigned long pc)
{
return pc >= (unsigned long)__cpuidle_text_start &&
pc < (unsigned long)__cpuidle_text_end;
}
struct idle_timer {
struct hrtimer timer;
int done;
};
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
WRITE_ONCE(it->done, 1);
set_tsk_need_resched(current);
return HRTIMER_NORESTART;
}
void play_idle_precise(u64 duration_ns, u64 latency_ns)
{
struct idle_timer it;
/*
* Only FIFO tasks can disable the tick since they don't need the forced
* preemption.
*/
WARN_ON_ONCE(current->policy != SCHED_FIFO);
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_ns);
WARN_ON_ONCE(current->mm);
rcu_sleep_check();
preempt_disable();
current->flags |= PF_IDLE;
cpuidle_use_deepest_state(latency_ns);
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
it.timer.function = idle_inject_timer_fn;
hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
HRTIMER_MODE_REL_PINNED_HARD);
while (!READ_ONCE(it.done))
do_idle();
cpuidle_use_deepest_state(0);
current->flags &= ~PF_IDLE;
preempt_fold_need_resched();
preempt_enable();
}
EXPORT_SYMBOL_GPL(play_idle_precise);
void cpu_startup_entry(enum cpuhp_state state)
{
current->flags |= PF_IDLE;
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
while (1)
do_idle();
}
/*
* idle-task scheduling class.
*/
#ifdef CONFIG_SMP
static int
select_task_rq_idle(struct task_struct *p, int cpu, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
static int
balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
return WARN_ON_ONCE(1);
}
#endif
/*
* Idle tasks are unconditionally rescheduled:
*/
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
{
resched_curr(rq);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
}
#ifdef CONFIG_SMP
static struct task_struct *pick_task_idle(struct rq *rq)
{
return rq->idle;
}
#endif
struct task_struct *pick_next_task_idle(struct rq *rq)
{
struct task_struct *next = rq->idle;
set_next_task_idle(rq, next, true);
return next;
}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_rq_unlock_irq(rq);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_rq_lock_irq(rq);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}
static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
BUG();
}
static void
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{
BUG();
}
static void update_curr_idle(struct rq *rq)
{
}
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
DEFINE_SCHED_CLASS(idle) = {
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
.check_preempt_curr = check_preempt_curr_idle,
.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
.set_next_task = set_next_task_idle,
#ifdef CONFIG_SMP
.balance = balance_idle,
.pick_task = pick_task_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.task_tick = task_tick_idle,
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
| linux-master | kernel/sched/idle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* These are various utility functions of the scheduler,
* built in a single compilation unit for build efficiency reasons.
*
* ( Incidentally, the size of the compilation unit is roughly
* comparable to core.c, fair.c, smp.c and policy.c, the other
* big compilation units. This helps balance build time, while
* coalescing source files to amortize header inclusion
* cost. )
*/
#include <linux/sched/clock.h>
#include <linux/sched/cputime.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/nohz.h>
#include <linux/sched/mm.h>
#include <linux/sched/rseq_api.h>
#include <linux/sched/task_stack.h>
#include <linux/cpufreq.h>
#include <linux/cpumask_api.h>
#include <linux/cpuset.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/energy_model.h>
#include <linux/hashtable_api.h>
#include <linux/irq.h>
#include <linux/kobject_api.h>
#include <linux/membarrier.h>
#include <linux/mempolicy.h>
#include <linux/nmi.h>
#include <linux/nospec.h>
#include <linux/proc_fs.h>
#include <linux/psi.h>
#include <linux/psi.h>
#include <linux/ptrace_api.h>
#include <linux/sched_clock.h>
#include <linux/security.h>
#include <linux/spinlock_api.h>
#include <linux/swait_api.h>
#include <linux/timex.h>
#include <linux/utsname.h>
#include <linux/wait_api.h>
#include <linux/workqueue_api.h>
#include <uapi/linux/prctl.h>
#include <uapi/linux/sched/types.h>
#include <asm/switch_to.h>
#include "sched.h"
#include "sched-pelt.h"
#include "stats.h"
#include "autogroup.h"
#include "clock.c"
#ifdef CONFIG_CGROUP_CPUACCT
# include "cpuacct.c"
#endif
#ifdef CONFIG_CPU_FREQ
# include "cpufreq.c"
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
# include "cpufreq_schedutil.c"
#endif
#ifdef CONFIG_SCHED_DEBUG
# include "debug.c"
#endif
#ifdef CONFIG_SCHEDSTATS
# include "stats.c"
#endif
#include "loadavg.c"
#include "completion.c"
#include "swait.c"
#include "wait_bit.c"
#include "wait.c"
#ifdef CONFIG_SMP
# include "cpupri.c"
# include "stop_task.c"
# include "topology.c"
#endif
#ifdef CONFIG_SCHED_CORE
# include "core_sched.c"
#endif
#ifdef CONFIG_PSI
# include "psi.c"
#endif
#ifdef CONFIG_MEMBARRIER
# include "membarrier.c"
#endif
#ifdef CONFIG_CPU_ISOLATION
# include "isolation.c"
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
# include "autogroup.c"
#endif
| linux-master | kernel/sched/build_utility.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CPUFreq governor based on scheduler-provided CPU utilization data.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
*/
#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
};
struct sugov_policy {
struct cpufreq_policy *policy;
struct sugov_tunables *tunables;
struct list_head tunables_hook;
raw_spinlock_t update_lock;
u64 last_freq_update_time;
s64 freq_update_delay_ns;
unsigned int next_freq;
unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used: */
struct irq_work irq_work;
struct kthread_work work;
struct mutex work_lock;
struct kthread_worker worker;
struct task_struct *thread;
bool work_in_progress;
bool limits_changed;
bool need_freq_update;
};
struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
unsigned int cpu;
bool iowait_boost_pending;
unsigned int iowait_boost;
u64 last_update;
unsigned long util;
unsigned long bw_dl;
/* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON
unsigned long saved_idle_calls;
#endif
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
/************************ Governor internals ***********************/
static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
{
s64 delta_ns;
/*
* Since cpufreq_update_util() is called with rq->lock held for
* the @target_cpu, our per-CPU data is fully serialized.
*
* However, drivers cannot in general deal with cross-CPU
* requests, so while get_next_freq() will work, our
* sugov_update_commit() call may not for the fast switching platforms.
*
* Hence stop here for remote requests if they aren't supported
* by the hardware, as calculating the frequency is pointless if
* we cannot in fact act on it.
*
* This is needed on the slow switching platforms too to prevent CPUs
* going offline from leaving stale IRQ work items behind.
*/
if (!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
if (unlikely(sg_policy->limits_changed)) {
sg_policy->limits_changed = false;
sg_policy->need_freq_update = true;
return true;
}
delta_ns = time - sg_policy->last_freq_update_time;
return delta_ns >= sg_policy->freq_update_delay_ns;
}
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
if (sg_policy->need_freq_update)
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
else if (sg_policy->next_freq == next_freq)
return false;
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
return true;
}
static void sugov_deferred_update(struct sugov_policy *sg_policy)
{
if (!sg_policy->work_in_progress) {
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
}
}
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
* @max: CPU capacity.
*
* If the utilization is frequency-invariant, choose the new frequency to be
* proportional to it, that is
*
* next_freq = C * max_freq * util / max
*
* Otherwise, approximate the would-be frequency-invariant utilization by
* util_raw * (curr_freq / max_freq) which leads to
*
* next_freq = C * curr_freq * util_raw / max
*
* Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
*
* The lowest driver-supported frequency which is equal or greater than the raw
* next_freq (as calculated above) is returned, subject to policy min/max and
* cpufreq driver limitations.
*/
static unsigned int get_next_freq(struct sugov_policy *sg_policy,
unsigned long util, unsigned long max)
{
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
util = map_util_perf(util);
freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;
sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
unsigned long util = cpu_util_cfs_boost(sg_cpu->cpu);
struct rq *rq = cpu_rq(sg_cpu->cpu);
sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util,
FREQUENCY_UTIL, NULL);
}
/**
* sugov_iowait_reset() - Reset the IO boost status of a CPU.
* @sg_cpu: the sugov data for the CPU to boost
* @time: the update time from the caller
* @set_iowait_boost: true if an IO boost has been requested
*
* The IO wait boost of a task is disabled after a tick since the last update
* of a CPU. If a new IO wait boost is requested after more then a tick, then
* we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
* efficiency by ignoring sporadic wakeups from IO.
*/
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
bool set_iowait_boost)
{
s64 delta_ns = time - sg_cpu->last_update;
/* Reset boost only if a tick has elapsed since last request */
if (delta_ns <= TICK_NSEC)
return false;
sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost;
return true;
}
/**
* sugov_iowait_boost() - Updates the IO boost status of a CPU.
* @sg_cpu: the sugov data for the CPU to boost
* @time: the update time from the caller
* @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
*
* Each time a task wakes up after an IO operation, the CPU utilization can be
* boosted to a certain utilization which doubles at each "frequent and
* successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
* of the maximum OPP.
*
* To keep doubling, an IO boost has to be requested at least once per tick,
* otherwise we restart from the utilization of the minimum OPP.
*/
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
unsigned int flags)
{
bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
/* Reset boost if the CPU appears to have been idle enough */
if (sg_cpu->iowait_boost &&
sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
return;
/* Boost only tasks waking up after IO */
if (!set_iowait_boost)
return;
/* Ensure boost doubles only one time at each request */
if (sg_cpu->iowait_boost_pending)
return;
sg_cpu->iowait_boost_pending = true;
/* Double the boost at each request */
if (sg_cpu->iowait_boost) {
sg_cpu->iowait_boost =
min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
return;
}
/* First wakeup after IO: start with minimum boost */
sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
}
/**
* sugov_iowait_apply() - Apply the IO boost to a CPU.
* @sg_cpu: the sugov data for the cpu to boost
* @time: the update time from the caller
* @max_cap: the max CPU capacity
*
* A CPU running a task which woken up after an IO operation can have its
* utilization boosted to speed up the completion of those IO operations.
* The IO boost value is increased each time a task wakes up from IO, in
* sugov_iowait_apply(), and it's instead decreased by this function,
* each time an increase has not been requested (!iowait_boost_pending).
*
* A CPU which also appears to have been idle for at least one tick has also
* its IO boost utilization reset.
*
* This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations.
*/
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
unsigned long max_cap)
{
unsigned long boost;
/* No boost currently required */
if (!sg_cpu->iowait_boost)
return;
/* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false))
return;
if (!sg_cpu->iowait_boost_pending) {
/*
* No boost pending; reduce the boost value.
*/
sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
return;
}
}
sg_cpu->iowait_boost_pending = false;
/*
* sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare.
*/
boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
if (sg_cpu->util < boost)
sg_cpu->util = boost;
}
#ifdef CONFIG_NO_HZ_COMMON
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
{
unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
bool ret = idle_calls == sg_cpu->saved_idle_calls;
sg_cpu->saved_idle_calls = idle_calls;
return ret;
}
#else
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Make sugov_should_update_freq() ignore the rate limit when DL
* has increased the utilization.
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
{
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_cpu->sg_policy->limits_changed = true;
}
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
u64 time, unsigned long max_cap,
unsigned int flags)
{
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
ignore_dl_rate_limit(sg_cpu);
if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
return false;
sugov_get_util(sg_cpu);
sugov_iowait_apply(sg_cpu, time, max_cap);
return true;
}
static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned int cached_freq = sg_policy->cached_raw_freq;
unsigned long max_cap;
unsigned int next_f;
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
/*
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*
* Except when the rq is capped by uclamp_max.
*/
if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */
sg_policy->cached_raw_freq = cached_freq;
}
if (!sugov_update_next_freq(sg_policy, time, next_f))
return;
/*
* This code runs under rq->lock for the target CPU, so it won't run
* concurrently on two different CPUs for the same target and it is not
* necessary to acquire the lock in the fast switch case.
*/
if (sg_policy->policy->fast_switch_enabled) {
cpufreq_driver_fast_switch(sg_policy->policy, next_f);
} else {
raw_spin_lock(&sg_policy->update_lock);
sugov_deferred_update(sg_policy);
raw_spin_unlock(&sg_policy->update_lock);
}
}
static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
unsigned long prev_util = sg_cpu->util;
unsigned long max_cap;
/*
* Fall back to the "frequency" path if frequency invariance is not
* supported, because the direct mapping between the utilization and
* the performance levels depends on the frequency invariance.
*/
if (!arch_scale_freq_invariant()) {
sugov_update_single_freq(hook, time, flags);
return;
}
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
/*
* Do not reduce the target performance level if the CPU has not been
* idle recently, as the reduction is likely to be premature then.
*
* Except when the rq is capped by uclamp_max.
*/
if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
map_util_perf(sg_cpu->util), max_cap);
sg_cpu->sg_policy->last_freq_update_time = time;
}
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util = 0, max_cap;
unsigned int j;
max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
sugov_get_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time, max_cap);
util = max(j_sg_cpu->util, util);
}
return get_next_freq(sg_policy, util, max_cap);
}
static void
sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned int next_f;
raw_spin_lock(&sg_policy->update_lock);
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
ignore_dl_rate_limit(sg_cpu);
if (sugov_should_update_freq(sg_policy, time)) {
next_f = sugov_next_freq_shared(sg_cpu, time);
if (!sugov_update_next_freq(sg_policy, time, next_f))
goto unlock;
if (sg_policy->policy->fast_switch_enabled)
cpufreq_driver_fast_switch(sg_policy->policy, next_f);
else
sugov_deferred_update(sg_policy);
}
unlock:
raw_spin_unlock(&sg_policy->update_lock);
}
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
unsigned int freq;
unsigned long flags;
/*
* Hold sg_policy->update_lock shortly to handle the case where:
* in case sg_policy->next_freq is read here, and then updated by
* sugov_deferred_update() just before work_in_progress is set to false
* here, we may miss queueing the new update.
*
* Note: If a work was queued after the update_lock is released,
* sugov_work() will just be called again by kthread_work code; and the
* request will be proceed before the sugov thread sleeps.
*/
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
freq = sg_policy->next_freq;
sg_policy->work_in_progress = false;
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
mutex_lock(&sg_policy->work_lock);
__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
}
static void sugov_irq_work(struct irq_work *irq_work)
{
struct sugov_policy *sg_policy;
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
}
/************************** sysfs interface ************************/
static struct sugov_tunables *global_tunables;
static DEFINE_MUTEX(global_tunables_lock);
static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
{
return container_of(attr_set, struct sugov_tunables, attr_set);
}
static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return sprintf(buf, "%u\n", tunables->rate_limit_us);
}
static ssize_t
rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
struct sugov_policy *sg_policy;
unsigned int rate_limit_us;
if (kstrtouint(buf, 10, &rate_limit_us))
return -EINVAL;
tunables->rate_limit_us = rate_limit_us;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
return count;
}
static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
static struct attribute *sugov_attrs[] = {
&rate_limit_us.attr,
NULL
};
ATTRIBUTE_GROUPS(sugov);
static void sugov_tunables_free(struct kobject *kobj)
{
struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
kfree(to_sugov_tunables(attr_set));
}
static const struct kobj_type sugov_tunables_ktype = {
.default_groups = sugov_groups,
.sysfs_ops = &governor_sysfs_ops,
.release = &sugov_tunables_free,
};
/********************** cpufreq governor interface *********************/
struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
if (!sg_policy)
return NULL;
sg_policy->policy = policy;
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
kfree(sg_policy);
}
static int sugov_kthread_create(struct sugov_policy *sg_policy)
{
struct task_struct *thread;
struct sched_attr attr = {
.size = sizeof(struct sched_attr),
.sched_policy = SCHED_DEADLINE,
.sched_flags = SCHED_FLAG_SUGOV,
.sched_nice = 0,
.sched_priority = 0,
/*
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
.sched_runtime = 1000000,
.sched_deadline = 10000000,
.sched_period = 10000000,
};
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
/* kthread only required for slow path */
if (policy->fast_switch_enabled)
return 0;
kthread_init_work(&sg_policy->work, sugov_work);
kthread_init_worker(&sg_policy->worker);
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
"sugov:%d",
cpumask_first(policy->related_cpus));
if (IS_ERR(thread)) {
pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
return PTR_ERR(thread);
}
ret = sched_setattr_nocheck(thread, &attr);
if (ret) {
kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
return ret;
}
sg_policy->thread = thread;
kthread_bind_mask(thread, policy->related_cpus);
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
mutex_init(&sg_policy->work_lock);
wake_up_process(thread);
return 0;
}
static void sugov_kthread_stop(struct sugov_policy *sg_policy)
{
/* kthread only required for slow path */
if (sg_policy->policy->fast_switch_enabled)
return;
kthread_flush_worker(&sg_policy->worker);
kthread_stop(sg_policy->thread);
mutex_destroy(&sg_policy->work_lock);
}
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
{
struct sugov_tunables *tunables;
tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
if (tunables) {
gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
if (!have_governor_per_policy())
global_tunables = tunables;
}
return tunables;
}
static void sugov_clear_global_tunables(void)
{
if (!have_governor_per_policy())
global_tunables = NULL;
}
static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
int ret = 0;
/* State should be equivalent to EXIT */
if (policy->governor_data)
return -EBUSY;
cpufreq_enable_fast_switch(policy);
sg_policy = sugov_policy_alloc(policy);
if (!sg_policy) {
ret = -ENOMEM;
goto disable_fast_switch;
}
ret = sugov_kthread_create(sg_policy);
if (ret)
goto free_sg_policy;
mutex_lock(&global_tunables_lock);
if (global_tunables) {
if (WARN_ON(have_governor_per_policy())) {
ret = -EINVAL;
goto stop_kthread;
}
policy->governor_data = sg_policy;
sg_policy->tunables = global_tunables;
gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
goto out;
}
tunables = sugov_tunables_alloc(sg_policy);
if (!tunables) {
ret = -ENOMEM;
goto stop_kthread;
}
tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
get_governor_parent_kobj(policy), "%s",
schedutil_gov.name);
if (ret)
goto fail;
out:
mutex_unlock(&global_tunables_lock);
return 0;
fail:
kobject_put(&tunables->attr_set.kobj);
policy->governor_data = NULL;
sugov_clear_global_tunables();
stop_kthread:
sugov_kthread_stop(sg_policy);
mutex_unlock(&global_tunables_lock);
free_sg_policy:
sugov_policy_free(sg_policy);
disable_fast_switch:
cpufreq_disable_fast_switch(policy);
pr_err("initialization failed (error %d)\n", ret);
return ret;
}
static void sugov_exit(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
struct sugov_tunables *tunables = sg_policy->tunables;
unsigned int count;
mutex_lock(&global_tunables_lock);
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
policy->governor_data = NULL;
if (!count)
sugov_clear_global_tunables();
mutex_unlock(&global_tunables_lock);
sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
cpufreq_disable_fast_switch(policy);
}
static int sugov_start(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
unsigned int cpu;
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
sg_policy->limits_changed = false;
sg_policy->cached_raw_freq = 0;
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
}
if (policy_is_shared(policy))
uu = sugov_update_shared;
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
uu = sugov_update_single_perf;
else
uu = sugov_update_single_freq;
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
}
return 0;
}
static void sugov_stop(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
unsigned int cpu;
for_each_cpu(cpu, policy->cpus)
cpufreq_remove_update_util_hook(cpu);
synchronize_rcu();
if (!policy->fast_switch_enabled) {
irq_work_sync(&sg_policy->irq_work);
kthread_cancel_work_sync(&sg_policy->work);
}
}
static void sugov_limits(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
}
sg_policy->limits_changed = true;
}
struct cpufreq_governor schedutil_gov = {
.name = "schedutil",
.owner = THIS_MODULE,
.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
.init = sugov_init,
.exit = sugov_exit,
.start = sugov_start,
.stop = sugov_stop,
.limits = sugov_limits,
};
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
struct cpufreq_governor *cpufreq_default_governor(void)
{
return &schedutil_gov;
}
#endif
cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
/*
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
* on governor changes to make sure the scheduler knows about it.
*/
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
struct cpufreq_governor *old_gov)
{
if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
/*
* When called from the cpufreq_register_driver() path, the
* cpu_hotplug_lock is already held, so use a work item to
* avoid nested locking in rebuild_sched_domains().
*/
schedule_work(&rebuild_sd_work);
}
}
#endif
| linux-master | kernel/sched/cpufreq_schedutil.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Pressure stall information for CPU, memory and IO
*
* Copyright (c) 2018 Facebook, Inc.
* Author: Johannes Weiner <[email protected]>
*
* Polling support by Suren Baghdasaryan <[email protected]>
* Copyright (c) 2018 Google, Inc.
*
* When CPU, memory and IO are contended, tasks experience delays that
* reduce throughput and introduce latencies into the workload. Memory
* and IO contention, in addition, can cause a full loss of forward
* progress in which the CPU goes idle.
*
* This code aggregates individual task delays into resource pressure
* metrics that indicate problems with both workload health and
* resource utilization.
*
* Model
*
* The time in which a task can execute on a CPU is our baseline for
* productivity. Pressure expresses the amount of time in which this
* potential cannot be realized due to resource contention.
*
* This concept of productivity has two components: the workload and
* the CPU. To measure the impact of pressure on both, we define two
* contention states for a resource: SOME and FULL.
*
* In the SOME state of a given resource, one or more tasks are
* delayed on that resource. This affects the workload's ability to
* perform work, but the CPU may still be executing other tasks.
*
* In the FULL state of a given resource, all non-idle tasks are
* delayed on that resource such that nobody is advancing and the CPU
* goes idle. This leaves both workload and CPU unproductive.
*
* SOME = nr_delayed_tasks != 0
* FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
*
* What it means for a task to be productive is defined differently
* for each resource. For IO, productive means a running task. For
* memory, productive means a running task that isn't a reclaimer. For
* CPU, productive means an oncpu task.
*
* Naturally, the FULL state doesn't exist for the CPU resource at the
* system level, but exist at the cgroup level. At the cgroup level,
* FULL means all non-idle tasks in the cgroup are delayed on the CPU
* resource which is being used by others outside of the cgroup or
* throttled by the cgroup cpu.max configuration.
*
* The percentage of wallclock time spent in those compound stall
* states gives pressure numbers between 0 and 100 for each resource,
* where the SOME percentage indicates workload slowdowns and the FULL
* percentage indicates reduced CPU utilization:
*
* %SOME = time(SOME) / period
* %FULL = time(FULL) / period
*
* Multiple CPUs
*
* The more tasks and available CPUs there are, the more work can be
* performed concurrently. This means that the potential that can go
* unrealized due to resource contention *also* scales with non-idle
* tasks and CPUs.
*
* Consider a scenario where 257 number crunching tasks are trying to
* run concurrently on 256 CPUs. If we simply aggregated the task
* states, we would have to conclude a CPU SOME pressure number of
* 100%, since *somebody* is waiting on a runqueue at all
* times. However, that is clearly not the amount of contention the
* workload is experiencing: only one out of 256 possible execution
* threads will be contended at any given time, or about 0.4%.
*
* Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
* given time *one* of the tasks is delayed due to a lack of memory.
* Again, looking purely at the task state would yield a memory FULL
* pressure number of 0%, since *somebody* is always making forward
* progress. But again this wouldn't capture the amount of execution
* potential lost, which is 1 out of 4 CPUs, or 25%.
*
* To calculate wasted potential (pressure) with multiple processors,
* we have to base our calculation on the number of non-idle tasks in
* conjunction with the number of available CPUs, which is the number
* of potential execution threads. SOME becomes then the proportion of
* delayed tasks to possible threads, and FULL is the share of possible
* threads that are unproductive due to delays:
*
* threads = min(nr_nonidle_tasks, nr_cpus)
* SOME = min(nr_delayed_tasks / threads, 1)
* FULL = (threads - min(nr_productive_tasks, threads)) / threads
*
* For the 257 number crunchers on 256 CPUs, this yields:
*
* threads = min(257, 256)
* SOME = min(1 / 256, 1) = 0.4%
* FULL = (256 - min(256, 256)) / 256 = 0%
*
* For the 1 out of 4 memory-delayed tasks, this yields:
*
* threads = min(4, 4)
* SOME = min(1 / 4, 1) = 25%
* FULL = (4 - min(3, 4)) / 4 = 25%
*
* [ Substitute nr_cpus with 1, and you can see that it's a natural
* extension of the single-CPU model. ]
*
* Implementation
*
* To assess the precise time spent in each such state, we would have
* to freeze the system on task changes and start/stop the state
* clocks accordingly. Obviously that doesn't scale in practice.
*
* Because the scheduler aims to distribute the compute load evenly
* among the available CPUs, we can track task state locally to each
* CPU and, at much lower frequency, extrapolate the global state for
* the cumulative stall times and the running averages.
*
* For each runqueue, we track:
*
* tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
* tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
* tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
*
* and then periodically aggregate:
*
* tNONIDLE = sum(tNONIDLE[i])
*
* tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
* tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
*
* %SOME = tSOME / period
* %FULL = tFULL / period
*
* This gives us an approximation of pressure that is practical
* cost-wise, yet way more sensitive and accurate than periodic
* sampling of the aggregate task states would be.
*/
static int psi_bug __read_mostly;
DEFINE_STATIC_KEY_FALSE(psi_disabled);
static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
#ifdef CONFIG_PSI_DEFAULT_DISABLED
static bool psi_enable;
#else
static bool psi_enable = true;
#endif
static int __init setup_psi(char *str)
{
return kstrtobool(str, &psi_enable) == 0;
}
__setup("psi=", setup_psi);
/* Running averages - we need to be higher-res than loadavg */
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
#define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
#define EXP_60s 1981 /* 1/exp(2s/60s) */
#define EXP_300s 2034 /* 1/exp(2s/300s) */
/* PSI trigger definitions */
#define WINDOW_MAX_US 10000000 /* Max window size is 10s */
#define UPDATES_PER_WINDOW 10 /* 10 updates per window */
/* Sampling frequency in nanoseconds */
static u64 psi_period __read_mostly;
/* System-level pressure and stall tracking */
static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
struct psi_group psi_system = {
.pcpu = &system_group_pcpu,
};
static void psi_avgs_work(struct work_struct *work);
static void poll_timer_fn(struct timer_list *t);
static void group_init(struct psi_group *group)
{
int cpu;
group->enabled = true;
for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->avg_last_update = sched_clock();
group->avg_next_update = group->avg_last_update + psi_period;
mutex_init(&group->avgs_lock);
/* Init avg trigger-related members */
INIT_LIST_HEAD(&group->avg_triggers);
memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
/* Init rtpoll trigger-related members */
atomic_set(&group->rtpoll_scheduled, 0);
mutex_init(&group->rtpoll_trigger_lock);
INIT_LIST_HEAD(&group->rtpoll_triggers);
group->rtpoll_min_period = U32_MAX;
group->rtpoll_next_update = ULLONG_MAX;
init_waitqueue_head(&group->rtpoll_wait);
timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->rtpoll_task, NULL);
}
void __init psi_init(void)
{
if (!psi_enable) {
static_branch_enable(&psi_disabled);
static_branch_disable(&psi_cgroups_enabled);
return;
}
if (!cgroup_psi_enabled())
static_branch_disable(&psi_cgroups_enabled);
psi_period = jiffies_to_nsecs(PSI_FREQ);
group_init(&psi_system);
}
static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
{
switch (state) {
case PSI_IO_SOME:
return unlikely(tasks[NR_IOWAIT]);
case PSI_IO_FULL:
return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
case PSI_MEM_SOME:
return unlikely(tasks[NR_MEMSTALL]);
case PSI_MEM_FULL:
return unlikely(tasks[NR_MEMSTALL] &&
tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
case PSI_CPU_SOME:
return unlikely(tasks[NR_RUNNING] > oncpu);
case PSI_CPU_FULL:
return unlikely(tasks[NR_RUNNING] && !oncpu);
case PSI_NONIDLE:
return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
tasks[NR_RUNNING];
default:
return false;
}
}
static void get_recent_times(struct psi_group *group, int cpu,
enum psi_aggregators aggregator, u32 *times,
u32 *pchanged_states)
{
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
int current_cpu = raw_smp_processor_id();
unsigned int tasks[NR_PSI_TASK_COUNTS];
u64 now, state_start;
enum psi_states s;
unsigned int seq;
u32 state_mask;
*pchanged_states = 0;
/* Snapshot a coherent view of the CPU state */
do {
seq = read_seqcount_begin(&groupc->seq);
now = cpu_clock(cpu);
memcpy(times, groupc->times, sizeof(groupc->times));
state_mask = groupc->state_mask;
state_start = groupc->state_start;
if (cpu == current_cpu)
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
} while (read_seqcount_retry(&groupc->seq, seq));
/* Calculate state time deltas against the previous snapshot */
for (s = 0; s < NR_PSI_STATES; s++) {
u32 delta;
/*
* In addition to already concluded states, we also
* incorporate currently active states on the CPU,
* since states may last for many sampling periods.
*
* This way we keep our delta sampling buckets small
* (u32) and our reported pressure close to what's
* actually happening.
*/
if (state_mask & (1 << s))
times[s] += now - state_start;
delta = times[s] - groupc->times_prev[aggregator][s];
groupc->times_prev[aggregator][s] = times[s];
times[s] = delta;
if (delta)
*pchanged_states |= (1 << s);
}
/*
* When collect_percpu_times() from the avgs_work, we don't want to
* re-arm avgs_work when all CPUs are IDLE. But the current CPU running
* this avgs_work is never IDLE, cause avgs_work can't be shut off.
* So for the current CPU, we need to re-arm avgs_work only when
* (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
* we can just check PSI_NONIDLE delta.
*/
if (current_work() == &group->avgs_work.work) {
bool reschedule;
if (cpu == current_cpu)
reschedule = tasks[NR_RUNNING] +
tasks[NR_IOWAIT] +
tasks[NR_MEMSTALL] > 1;
else
reschedule = *pchanged_states & (1 << PSI_NONIDLE);
if (reschedule)
*pchanged_states |= PSI_STATE_RESCHEDULE;
}
}
static void calc_avgs(unsigned long avg[3], int missed_periods,
u64 time, u64 period)
{
unsigned long pct;
/* Fill in zeroes for periods of no activity */
if (missed_periods) {
avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
}
/* Sample the most recent active period */
pct = div_u64(time * 100, period);
pct *= FIXED_1;
avg[0] = calc_load(avg[0], EXP_10s, pct);
avg[1] = calc_load(avg[1], EXP_60s, pct);
avg[2] = calc_load(avg[2], EXP_300s, pct);
}
static void collect_percpu_times(struct psi_group *group,
enum psi_aggregators aggregator,
u32 *pchanged_states)
{
u64 deltas[NR_PSI_STATES - 1] = { 0, };
unsigned long nonidle_total = 0;
u32 changed_states = 0;
int cpu;
int s;
/*
* Collect the per-cpu time buckets and average them into a
* single time sample that is normalized to wallclock time.
*
* For averaging, each CPU is weighted by its non-idle time in
* the sampling period. This eliminates artifacts from uneven
* loading, or even entirely idle CPUs.
*/
for_each_possible_cpu(cpu) {
u32 times[NR_PSI_STATES];
u32 nonidle;
u32 cpu_changed_states;
get_recent_times(group, cpu, aggregator, times,
&cpu_changed_states);
changed_states |= cpu_changed_states;
nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
nonidle_total += nonidle;
for (s = 0; s < PSI_NONIDLE; s++)
deltas[s] += (u64)times[s] * nonidle;
}
/*
* Integrate the sample into the running statistics that are
* reported to userspace: the cumulative stall times and the
* decaying averages.
*
* Pressure percentages are sampled at PSI_FREQ. We might be
* called more often when the user polls more frequently than
* that; we might be called less often when there is no task
* activity, thus no data, and clock ticks are sporadic. The
* below handles both.
*/
/* total= */
for (s = 0; s < NR_PSI_STATES - 1; s++)
group->total[aggregator][s] +=
div_u64(deltas[s], max(nonidle_total, 1UL));
if (pchanged_states)
*pchanged_states = changed_states;
}
/* Trigger tracking window manipulations */
static void window_reset(struct psi_window *win, u64 now, u64 value,
u64 prev_growth)
{
win->start_time = now;
win->start_value = value;
win->prev_growth = prev_growth;
}
/*
* PSI growth tracking window update and growth calculation routine.
*
* This approximates a sliding tracking window by interpolating
* partially elapsed windows using historical growth data from the
* previous intervals. This minimizes memory requirements (by not storing
* all the intermediate values in the previous window) and simplifies
* the calculations. It works well because PSI signal changes only in
* positive direction and over relatively small window sizes the growth
* is close to linear.
*/
static u64 window_update(struct psi_window *win, u64 now, u64 value)
{
u64 elapsed;
u64 growth;
elapsed = now - win->start_time;
growth = value - win->start_value;
/*
* After each tracking window passes win->start_value and
* win->start_time get reset and win->prev_growth stores
* the average per-window growth of the previous window.
* win->prev_growth is then used to interpolate additional
* growth from the previous window assuming it was linear.
*/
if (elapsed > win->size)
window_reset(win, now, value, growth);
else {
u32 remaining;
remaining = win->size - elapsed;
growth += div64_u64(win->prev_growth * remaining, win->size);
}
return growth;
}
static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total,
enum psi_aggregators aggregator)
{
struct psi_trigger *t;
u64 *total = group->total[aggregator];
struct list_head *triggers;
u64 *aggregator_total;
*update_total = false;
if (aggregator == PSI_AVGS) {
triggers = &group->avg_triggers;
aggregator_total = group->avg_total;
} else {
triggers = &group->rtpoll_triggers;
aggregator_total = group->rtpoll_total;
}
/*
* On subsequent updates, calculate growth deltas and let
* watchers know when their specified thresholds are exceeded.
*/
list_for_each_entry(t, triggers, node) {
u64 growth;
bool new_stall;
new_stall = aggregator_total[t->state] != total[t->state];
/* Check for stall activity or a previous threshold breach */
if (!new_stall && !t->pending_event)
continue;
/*
* Check for new stall activity, as well as deferred
* events that occurred in the last window after the
* trigger had already fired (we want to ratelimit
* events without dropping any).
*/
if (new_stall) {
/*
* Multiple triggers might be looking at the same state,
* remember to update group->polling_total[] once we've
* been through all of them. Also remember to extend the
* polling time if we see new stall activity.
*/
*update_total = true;
/* Calculate growth since last update */
growth = window_update(&t->win, now, total[t->state]);
if (!t->pending_event) {
if (growth < t->threshold)
continue;
t->pending_event = true;
}
}
/* Limit event signaling to once per window */
if (now < t->last_event_time + t->win.size)
continue;
/* Generate an event */
if (cmpxchg(&t->event, 0, 1) == 0) {
if (t->of)
kernfs_notify(t->of->kn);
else
wake_up_interruptible(&t->event_wait);
}
t->last_event_time = now;
/* Reset threshold breach flag once event got generated */
t->pending_event = false;
}
return now + group->rtpoll_min_period;
}
static u64 update_averages(struct psi_group *group, u64 now)
{
unsigned long missed_periods = 0;
u64 expires, period;
u64 avg_next_update;
int s;
/* avgX= */
expires = group->avg_next_update;
if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);
/*
* The periodic clock tick can get delayed for various
* reasons, especially on loaded systems. To avoid clock
* drift, we schedule the clock in fixed psi_period intervals.
* But the deltas we sample out of the per-cpu buckets above
* are based on the actual time elapsing between clock ticks.
*/
avg_next_update = expires + ((1 + missed_periods) * psi_period);
period = now - (group->avg_last_update + (missed_periods * psi_period));
group->avg_last_update = now;
for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample;
sample = group->total[PSI_AVGS][s] - group->avg_total[s];
/*
* Due to the lockless sampling of the time buckets,
* recorded time deltas can slip into the next period,
* which under full pressure can result in samples in
* excess of the period length.
*
* We don't want to report non-sensical pressures in
* excess of 100%, nor do we want to drop such events
* on the floor. Instead we punt any overage into the
* future until pressure subsides. By doing this we
* don't underreport the occurring pressure curve, we
* just report it delayed by one period length.
*
* The error isn't cumulative. As soon as another
* delta slips from a period P to P+1, by definition
* it frees up its time T in P.
*/
if (sample > period)
sample = period;
group->avg_total[s] += sample;
calc_avgs(group->avg[s], missed_periods, sample, period);
}
return avg_next_update;
}
static void psi_avgs_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct psi_group *group;
u32 changed_states;
bool update_total;
u64 now;
dwork = to_delayed_work(work);
group = container_of(dwork, struct psi_group, avgs_work);
mutex_lock(&group->avgs_lock);
now = sched_clock();
collect_percpu_times(group, PSI_AVGS, &changed_states);
/*
* If there is task activity, periodically fold the per-cpu
* times and feed samples into the running averages. If things
* are idle and there is no data to process, stop the clock.
* Once restarted, we'll catch up the running averages in one
* go - see calc_avgs() and missed_periods.
*/
if (now >= group->avg_next_update) {
update_triggers(group, now, &update_total, PSI_AVGS);
group->avg_next_update = update_averages(group, now);
}
if (changed_states & PSI_STATE_RESCHEDULE) {
schedule_delayed_work(dwork, nsecs_to_jiffies(
group->avg_next_update - now) + 1);
}
mutex_unlock(&group->avgs_lock);
}
static void init_rtpoll_triggers(struct psi_group *group, u64 now)
{
struct psi_trigger *t;
list_for_each_entry(t, &group->rtpoll_triggers, node)
window_reset(&t->win, now,
group->total[PSI_POLL][t->state], 0);
memcpy(group->rtpoll_total, group->total[PSI_POLL],
sizeof(group->rtpoll_total));
group->rtpoll_next_update = now + group->rtpoll_min_period;
}
/* Schedule polling if it's not already scheduled or forced. */
static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
bool force)
{
struct task_struct *task;
/*
* atomic_xchg should be called even when !force to provide a
* full memory barrier (see the comment inside psi_rtpoll_work).
*/
if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
return;
rcu_read_lock();
task = rcu_dereference(group->rtpoll_task);
/*
* kworker might be NULL in case psi_trigger_destroy races with
* psi_task_change (hotpath) which can't use locks
*/
if (likely(task))
mod_timer(&group->rtpoll_timer, jiffies + delay);
else
atomic_set(&group->rtpoll_scheduled, 0);
rcu_read_unlock();
}
static void psi_rtpoll_work(struct psi_group *group)
{
bool force_reschedule = false;
u32 changed_states;
bool update_total;
u64 now;
mutex_lock(&group->rtpoll_trigger_lock);
now = sched_clock();
if (now > group->rtpoll_until) {
/*
* We are either about to start or might stop polling if no
* state change was recorded. Resetting poll_scheduled leaves
* a small window for psi_group_change to sneak in and schedule
* an immediate poll_work before we get to rescheduling. One
* potential extra wakeup at the end of the polling window
* should be negligible and polling_next_update still keeps
* updates correctly on schedule.
*/
atomic_set(&group->rtpoll_scheduled, 0);
/*
* A task change can race with the poll worker that is supposed to
* report on it. To avoid missing events, ensure ordering between
* poll_scheduled and the task state accesses, such that if the poll
* worker misses the state update, the task change is guaranteed to
* reschedule the poll worker:
*
* poll worker:
* atomic_set(poll_scheduled, 0)
* smp_mb()
* LOAD states
*
* task change:
* STORE states
* if atomic_xchg(poll_scheduled, 1) == 0:
* schedule poll worker
*
* The atomic_xchg() implies a full barrier.
*/
smp_mb();
} else {
/* Polling window is not over, keep rescheduling */
force_reschedule = true;
}
collect_percpu_times(group, PSI_POLL, &changed_states);
if (changed_states & group->rtpoll_states) {
/* Initialize trigger windows when entering polling mode */
if (now > group->rtpoll_until)
init_rtpoll_triggers(group, now);
/*
* Keep the monitor active for at least the duration of the
* minimum tracking window as long as monitor states are
* changing.
*/
group->rtpoll_until = now +
group->rtpoll_min_period * UPDATES_PER_WINDOW;
}
if (now > group->rtpoll_until) {
group->rtpoll_next_update = ULLONG_MAX;
goto out;
}
if (now >= group->rtpoll_next_update) {
group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL);
if (update_total)
memcpy(group->rtpoll_total, group->total[PSI_POLL],
sizeof(group->rtpoll_total));
}
psi_schedule_rtpoll_work(group,
nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
force_reschedule);
out:
mutex_unlock(&group->rtpoll_trigger_lock);
}
static int psi_rtpoll_worker(void *data)
{
struct psi_group *group = (struct psi_group *)data;
sched_set_fifo_low(current);
while (true) {
wait_event_interruptible(group->rtpoll_wait,
atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
kthread_should_stop());
if (kthread_should_stop())
break;
psi_rtpoll_work(group);
}
return 0;
}
static void poll_timer_fn(struct timer_list *t)
{
struct psi_group *group = from_timer(group, t, rtpoll_timer);
atomic_set(&group->rtpoll_wakeup, 1);
wake_up_interruptible(&group->rtpoll_wait);
}
static void record_times(struct psi_group_cpu *groupc, u64 now)
{
u32 delta;
delta = now - groupc->state_start;
groupc->state_start = now;
if (groupc->state_mask & (1 << PSI_IO_SOME)) {
groupc->times[PSI_IO_SOME] += delta;
if (groupc->state_mask & (1 << PSI_IO_FULL))
groupc->times[PSI_IO_FULL] += delta;
}
if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
groupc->times[PSI_MEM_SOME] += delta;
if (groupc->state_mask & (1 << PSI_MEM_FULL))
groupc->times[PSI_MEM_FULL] += delta;
}
if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
groupc->times[PSI_CPU_SOME] += delta;
if (groupc->state_mask & (1 << PSI_CPU_FULL))
groupc->times[PSI_CPU_FULL] += delta;
}
if (groupc->state_mask & (1 << PSI_NONIDLE))
groupc->times[PSI_NONIDLE] += delta;
}
static void psi_group_change(struct psi_group *group, int cpu,
unsigned int clear, unsigned int set, u64 now,
bool wake_clock)
{
struct psi_group_cpu *groupc;
unsigned int t, m;
enum psi_states s;
u32 state_mask;
groupc = per_cpu_ptr(group->pcpu, cpu);
/*
* First we update the task counts according to the state
* change requested through the @clear and @set bits.
*
* Then if the cgroup PSI stats accounting enabled, we
* assess the aggregate resource states this CPU's tasks
* have been in since the last change, and account any
* SOME and FULL time these may have resulted in.
*/
write_seqcount_begin(&groupc->seq);
/*
* Start with TSK_ONCPU, which doesn't have a corresponding
* task count - it's just a boolean flag directly encoded in
* the state mask. Clear, set, or carry the current state if
* no changes are requested.
*/
if (unlikely(clear & TSK_ONCPU)) {
state_mask = 0;
clear &= ~TSK_ONCPU;
} else if (unlikely(set & TSK_ONCPU)) {
state_mask = PSI_ONCPU;
set &= ~TSK_ONCPU;
} else {
state_mask = groupc->state_mask & PSI_ONCPU;
}
/*
* The rest of the state mask is calculated based on the task
* counts. Update those first, then construct the mask.
*/
for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
if (!(m & (1 << t)))
continue;
if (groupc->tasks[t]) {
groupc->tasks[t]--;
} else if (!psi_bug) {
printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
cpu, t, groupc->tasks[0],
groupc->tasks[1], groupc->tasks[2],
groupc->tasks[3], clear, set);
psi_bug = 1;
}
}
for (t = 0; set; set &= ~(1 << t), t++)
if (set & (1 << t))
groupc->tasks[t]++;
if (!group->enabled) {
/*
* On the first group change after disabling PSI, conclude
* the current state and flush its time. This is unlikely
* to matter to the user, but aggregation (get_recent_times)
* may have already incorporated the live state into times_prev;
* avoid a delta sample underflow when PSI is later re-enabled.
*/
if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
record_times(groupc, now);
groupc->state_mask = state_mask;
write_seqcount_end(&groupc->seq);
return;
}
for (s = 0; s < NR_PSI_STATES; s++) {
if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
state_mask |= (1 << s);
}
/*
* Since we care about lost potential, a memstall is FULL
* when there are no other working tasks, but also when
* the CPU is actively reclaiming and nothing productive
* could run even if it were runnable. So when the current
* task in a cgroup is in_memstall, the corresponding groupc
* on that cpu is in PSI_MEM_FULL state.
*/
if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
state_mask |= (1 << PSI_MEM_FULL);
record_times(groupc, now);
groupc->state_mask = state_mask;
write_seqcount_end(&groupc->seq);
if (state_mask & group->rtpoll_states)
psi_schedule_rtpoll_work(group, 1, false);
if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
static inline struct psi_group *task_psi_group(struct task_struct *task)
{
#ifdef CONFIG_CGROUPS
if (static_branch_likely(&psi_cgroups_enabled))
return cgroup_psi(task_dfl_cgroup(task));
#endif
return &psi_system;
}
static void psi_flags_change(struct task_struct *task, int clear, int set)
{
if (((task->psi_flags & set) ||
(task->psi_flags & clear) != clear) &&
!psi_bug) {
printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
task->pid, task->comm, task_cpu(task),
task->psi_flags, clear, set);
psi_bug = 1;
}
task->psi_flags &= ~clear;
task->psi_flags |= set;
}
void psi_task_change(struct task_struct *task, int clear, int set)
{
int cpu = task_cpu(task);
struct psi_group *group;
u64 now;
if (!task->pid)
return;
psi_flags_change(task, clear, set);
now = cpu_clock(cpu);
group = task_psi_group(task);
do {
psi_group_change(group, cpu, clear, set, now, true);
} while ((group = group->parent));
}
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
bool sleep)
{
struct psi_group *group, *common = NULL;
int cpu = task_cpu(prev);
u64 now = cpu_clock(cpu);
if (next->pid) {
psi_flags_change(next, 0, TSK_ONCPU);
/*
* Set TSK_ONCPU on @next's cgroups. If @next shares any
* ancestors with @prev, those will already have @prev's
* TSK_ONCPU bit set, and we can stop the iteration there.
*/
group = task_psi_group(next);
do {
if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
PSI_ONCPU) {
common = group;
break;
}
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
} while ((group = group->parent));
}
if (prev->pid) {
int clear = TSK_ONCPU, set = 0;
bool wake_clock = true;
/*
* When we're going to sleep, psi_dequeue() lets us
* handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
* TSK_IOWAIT here, where we can combine it with
* TSK_ONCPU and save walking common ancestors twice.
*/
if (sleep) {
clear |= TSK_RUNNING;
if (prev->in_memstall)
clear |= TSK_MEMSTALL_RUNNING;
if (prev->in_iowait)
set |= TSK_IOWAIT;
/*
* Periodic aggregation shuts off if there is a period of no
* task changes, so we wake it back up if necessary. However,
* don't do this if the task change is the aggregation worker
* itself going to sleep, or we'll ping-pong forever.
*/
if (unlikely((prev->flags & PF_WQ_WORKER) &&
wq_worker_last_func(prev) == psi_avgs_work))
wake_clock = false;
}
psi_flags_change(prev, clear, set);
group = task_psi_group(prev);
do {
if (group == common)
break;
psi_group_change(group, cpu, clear, set, now, wake_clock);
} while ((group = group->parent));
/*
* TSK_ONCPU is handled up to the common ancestor. If there are
* any other differences between the two tasks (e.g. prev goes
* to sleep, or only one task is memstall), finish propagating
* those differences all the way up to the root.
*/
if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
clear &= ~TSK_ONCPU;
for (; group; group = group->parent)
psi_group_change(group, cpu, clear, set, now, wake_clock);
}
}
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
void psi_account_irqtime(struct task_struct *task, u32 delta)
{
int cpu = task_cpu(task);
struct psi_group *group;
struct psi_group_cpu *groupc;
u64 now;
if (!task->pid)
return;
now = cpu_clock(cpu);
group = task_psi_group(task);
do {
if (!group->enabled)
continue;
groupc = per_cpu_ptr(group->pcpu, cpu);
write_seqcount_begin(&groupc->seq);
record_times(groupc, now);
groupc->times[PSI_IRQ_FULL] += delta;
write_seqcount_end(&groupc->seq);
if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
psi_schedule_rtpoll_work(group, 1, false);
} while ((group = group->parent));
}
#endif
/**
* psi_memstall_enter - mark the beginning of a memory stall section
* @flags: flags to handle nested sections
*
* Marks the calling task as being stalled due to a lack of memory,
* such as waiting for a refault or performing reclaim.
*/
void psi_memstall_enter(unsigned long *flags)
{
struct rq_flags rf;
struct rq *rq;
if (static_branch_likely(&psi_disabled))
return;
*flags = current->in_memstall;
if (*flags)
return;
/*
* in_memstall setting & accounting needs to be atomic wrt
* changes to the task's scheduling state, otherwise we can
* race with CPU migration.
*/
rq = this_rq_lock_irq(&rf);
current->in_memstall = 1;
psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
rq_unlock_irq(rq, &rf);
}
EXPORT_SYMBOL_GPL(psi_memstall_enter);
/**
* psi_memstall_leave - mark the end of an memory stall section
* @flags: flags to handle nested memdelay sections
*
* Marks the calling task as no longer stalled due to lack of memory.
*/
void psi_memstall_leave(unsigned long *flags)
{
struct rq_flags rf;
struct rq *rq;
if (static_branch_likely(&psi_disabled))
return;
if (*flags)
return;
/*
* in_memstall clearing & accounting needs to be atomic wrt
* changes to the task's scheduling state, otherwise we could
* race with CPU migration.
*/
rq = this_rq_lock_irq(&rf);
current->in_memstall = 0;
psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
rq_unlock_irq(rq, &rf);
}
EXPORT_SYMBOL_GPL(psi_memstall_leave);
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
if (!static_branch_likely(&psi_cgroups_enabled))
return 0;
cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
if (!cgroup->psi)
return -ENOMEM;
cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
if (!cgroup->psi->pcpu) {
kfree(cgroup->psi);
return -ENOMEM;
}
group_init(cgroup->psi);
cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
return 0;
}
void psi_cgroup_free(struct cgroup *cgroup)
{
if (!static_branch_likely(&psi_cgroups_enabled))
return;
cancel_delayed_work_sync(&cgroup->psi->avgs_work);
free_percpu(cgroup->psi->pcpu);
/* All triggers must be removed by now */
WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
kfree(cgroup->psi);
}
/**
* cgroup_move_task - move task to a different cgroup
* @task: the task
* @to: the target css_set
*
* Move task to a new cgroup and safely migrate its associated stall
* state between the different groups.
*
* This function acquires the task's rq lock to lock out concurrent
* changes to the task's scheduling state and - in case the task is
* running - concurrent changes to its stall state.
*/
void cgroup_move_task(struct task_struct *task, struct css_set *to)
{
unsigned int task_flags;
struct rq_flags rf;
struct rq *rq;
if (!static_branch_likely(&psi_cgroups_enabled)) {
/*
* Lame to do this here, but the scheduler cannot be locked
* from the outside, so we move cgroups from inside sched/.
*/
rcu_assign_pointer(task->cgroups, to);
return;
}
rq = task_rq_lock(task, &rf);
/*
* We may race with schedule() dropping the rq lock between
* deactivating prev and switching to next. Because the psi
* updates from the deactivation are deferred to the switch
* callback to save cgroup tree updates, the task's scheduling
* state here is not coherent with its psi state:
*
* schedule() cgroup_move_task()
* rq_lock()
* deactivate_task()
* p->on_rq = 0
* psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
* pick_next_task()
* rq_unlock()
* rq_lock()
* psi_task_change() // old cgroup
* task->cgroups = to
* psi_task_change() // new cgroup
* rq_unlock()
* rq_lock()
* psi_sched_switch() // does deferred updates in new cgroup
*
* Don't rely on the scheduling state. Use psi_flags instead.
*/
task_flags = task->psi_flags;
if (task_flags)
psi_task_change(task, task_flags, 0);
/* See comment above */
rcu_assign_pointer(task->cgroups, to);
if (task_flags)
psi_task_change(task, 0, task_flags);
task_rq_unlock(rq, task, &rf);
}
void psi_cgroup_restart(struct psi_group *group)
{
int cpu;
/*
* After we disable psi_group->enabled, we don't actually
* stop percpu tasks accounting in each psi_group_cpu,
* instead only stop test_state() loop, record_times()
* and averaging worker, see psi_group_change() for details.
*
* When disable cgroup PSI, this function has nothing to sync
* since cgroup pressure files are hidden and percpu psi_group_cpu
* would see !psi_group->enabled and only do task accounting.
*
* When re-enable cgroup PSI, this function use psi_group_change()
* to get correct state mask from test_state() loop on tasks[],
* and restart groupc->state_start from now, use .clear = .set = 0
* here since no task status really changed.
*/
if (!group->enabled)
return;
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
u64 now;
rq_lock_irq(rq, &rf);
now = cpu_clock(cpu);
psi_group_change(group, cpu, 0, 0, now, true);
rq_unlock_irq(rq, &rf);
}
}
#endif /* CONFIG_CGROUPS */
int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
bool only_full = false;
int full;
u64 now;
if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
/* Update averages before reporting them */
mutex_lock(&group->avgs_lock);
now = sched_clock();
collect_percpu_times(group, PSI_AVGS, NULL);
if (now >= group->avg_next_update)
group->avg_next_update = update_averages(group, now);
mutex_unlock(&group->avgs_lock);
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
only_full = res == PSI_IRQ;
#endif
for (full = 0; full < 2 - only_full; full++) {
unsigned long avg[3] = { 0, };
u64 total = 0;
int w;
/* CPU FULL is undefined at the system level */
if (!(group == &psi_system && res == PSI_CPU && full)) {
for (w = 0; w < 3; w++)
avg[w] = group->avg[res * 2 + full][w];
total = div_u64(group->total[PSI_AVGS][res * 2 + full],
NSEC_PER_USEC);
}
seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
full || only_full ? "full" : "some",
LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
total);
}
return 0;
}
struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
enum psi_res res, struct file *file,
struct kernfs_open_file *of)
{
struct psi_trigger *t;
enum psi_states state;
u32 threshold_us;
bool privileged;
u32 window_us;
if (static_branch_likely(&psi_disabled))
return ERR_PTR(-EOPNOTSUPP);
/*
* Checking the privilege here on file->f_cred implies that a privileged user
* could open the file and delegate the write to an unprivileged one.
*/
privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_SOME + res * 2;
else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_FULL + res * 2;
else
return ERR_PTR(-EINVAL);
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
return ERR_PTR(-EINVAL);
#endif
if (state >= PSI_NONIDLE)
return ERR_PTR(-EINVAL);
if (window_us == 0 || window_us > WINDOW_MAX_US)
return ERR_PTR(-EINVAL);
/*
* Unprivileged users can only use 2s windows so that averages aggregation
* work is used, and no RT threads need to be spawned.
*/
if (!privileged && window_us % 2000000)
return ERR_PTR(-EINVAL);
/* Check threshold */
if (threshold_us == 0 || threshold_us > window_us)
return ERR_PTR(-EINVAL);
t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return ERR_PTR(-ENOMEM);
t->group = group;
t->state = state;
t->threshold = threshold_us * NSEC_PER_USEC;
t->win.size = window_us * NSEC_PER_USEC;
window_reset(&t->win, sched_clock(),
group->total[PSI_POLL][t->state], 0);
t->event = 0;
t->last_event_time = 0;
t->of = of;
if (!of)
init_waitqueue_head(&t->event_wait);
t->pending_event = false;
t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
if (privileged) {
mutex_lock(&group->rtpoll_trigger_lock);
if (!rcu_access_pointer(group->rtpoll_task)) {
struct task_struct *task;
task = kthread_create(psi_rtpoll_worker, group, "psimon");
if (IS_ERR(task)) {
kfree(t);
mutex_unlock(&group->rtpoll_trigger_lock);
return ERR_CAST(task);
}
atomic_set(&group->rtpoll_wakeup, 0);
wake_up_process(task);
rcu_assign_pointer(group->rtpoll_task, task);
}
list_add(&t->node, &group->rtpoll_triggers);
group->rtpoll_min_period = min(group->rtpoll_min_period,
div_u64(t->win.size, UPDATES_PER_WINDOW));
group->rtpoll_nr_triggers[t->state]++;
group->rtpoll_states |= (1 << t->state);
mutex_unlock(&group->rtpoll_trigger_lock);
} else {
mutex_lock(&group->avgs_lock);
list_add(&t->node, &group->avg_triggers);
group->avg_nr_triggers[t->state]++;
mutex_unlock(&group->avgs_lock);
}
return t;
}
void psi_trigger_destroy(struct psi_trigger *t)
{
struct psi_group *group;
struct task_struct *task_to_destroy = NULL;
/*
* We do not check psi_disabled since it might have been disabled after
* the trigger got created.
*/
if (!t)
return;
group = t->group;
/*
* Wakeup waiters to stop polling and clear the queue to prevent it from
* being accessed later. Can happen if cgroup is deleted from under a
* polling process.
*/
if (t->of)
kernfs_notify(t->of->kn);
else
wake_up_interruptible(&t->event_wait);
if (t->aggregator == PSI_AVGS) {
mutex_lock(&group->avgs_lock);
if (!list_empty(&t->node)) {
list_del(&t->node);
group->avg_nr_triggers[t->state]--;
}
mutex_unlock(&group->avgs_lock);
} else {
mutex_lock(&group->rtpoll_trigger_lock);
if (!list_empty(&t->node)) {
struct psi_trigger *tmp;
u64 period = ULLONG_MAX;
list_del(&t->node);
group->rtpoll_nr_triggers[t->state]--;
if (!group->rtpoll_nr_triggers[t->state])
group->rtpoll_states &= ~(1 << t->state);
/*
* Reset min update period for the remaining triggers
* iff the destroying trigger had the min window size.
*/
if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
list_for_each_entry(tmp, &group->rtpoll_triggers, node)
period = min(period, div_u64(tmp->win.size,
UPDATES_PER_WINDOW));
group->rtpoll_min_period = period;
}
/* Destroy rtpoll_task when the last trigger is destroyed */
if (group->rtpoll_states == 0) {
group->rtpoll_until = 0;
task_to_destroy = rcu_dereference_protected(
group->rtpoll_task,
lockdep_is_held(&group->rtpoll_trigger_lock));
rcu_assign_pointer(group->rtpoll_task, NULL);
del_timer(&group->rtpoll_timer);
}
}
mutex_unlock(&group->rtpoll_trigger_lock);
}
/*
* Wait for psi_schedule_rtpoll_work RCU to complete its read-side
* critical section before destroying the trigger and optionally the
* rtpoll_task.
*/
synchronize_rcu();
/*
* Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
* a deadlock while waiting for psi_rtpoll_work to acquire
* rtpoll_trigger_lock
*/
if (task_to_destroy) {
/*
* After the RCU grace period has expired, the worker
* can no longer be found through group->rtpoll_task.
*/
kthread_stop(task_to_destroy);
atomic_set(&group->rtpoll_scheduled, 0);
}
kfree(t);
}
__poll_t psi_trigger_poll(void **trigger_ptr,
struct file *file, poll_table *wait)
{
__poll_t ret = DEFAULT_POLLMASK;
struct psi_trigger *t;
if (static_branch_likely(&psi_disabled))
return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
t = smp_load_acquire(trigger_ptr);
if (!t)
return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
if (t->of)
kernfs_generic_poll(t->of, wait);
else
poll_wait(file, &t->event_wait, wait);
if (cmpxchg(&t->event, 1, 0) == 1)
ret |= EPOLLPRI;
return ret;
}
#ifdef CONFIG_PROC_FS
static int psi_io_show(struct seq_file *m, void *v)
{
return psi_show(m, &psi_system, PSI_IO);
}
static int psi_memory_show(struct seq_file *m, void *v)
{
return psi_show(m, &psi_system, PSI_MEM);
}
static int psi_cpu_show(struct seq_file *m, void *v)
{
return psi_show(m, &psi_system, PSI_CPU);
}
static int psi_io_open(struct inode *inode, struct file *file)
{
return single_open(file, psi_io_show, NULL);
}
static int psi_memory_open(struct inode *inode, struct file *file)
{
return single_open(file, psi_memory_show, NULL);
}
static int psi_cpu_open(struct inode *inode, struct file *file)
{
return single_open(file, psi_cpu_show, NULL);
}
static ssize_t psi_write(struct file *file, const char __user *user_buf,
size_t nbytes, enum psi_res res)
{
char buf[32];
size_t buf_size;
struct seq_file *seq;
struct psi_trigger *new;
if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
if (!nbytes)
return -EINVAL;
buf_size = min(nbytes, sizeof(buf));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size - 1] = '\0';
seq = file->private_data;
/* Take seq->lock to protect seq->private from concurrent writes */
mutex_lock(&seq->lock);
/* Allow only one trigger per file descriptor */
if (seq->private) {
mutex_unlock(&seq->lock);
return -EBUSY;
}
new = psi_trigger_create(&psi_system, buf, res, file, NULL);
if (IS_ERR(new)) {
mutex_unlock(&seq->lock);
return PTR_ERR(new);
}
smp_store_release(&seq->private, new);
mutex_unlock(&seq->lock);
return nbytes;
}
static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
size_t nbytes, loff_t *ppos)
{
return psi_write(file, user_buf, nbytes, PSI_IO);
}
static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
size_t nbytes, loff_t *ppos)
{
return psi_write(file, user_buf, nbytes, PSI_MEM);
}
static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
size_t nbytes, loff_t *ppos)
{
return psi_write(file, user_buf, nbytes, PSI_CPU);
}
static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
return psi_trigger_poll(&seq->private, file, wait);
}
static int psi_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
psi_trigger_destroy(seq->private);
return single_release(inode, file);
}
static const struct proc_ops psi_io_proc_ops = {
.proc_open = psi_io_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = psi_io_write,
.proc_poll = psi_fop_poll,
.proc_release = psi_fop_release,
};
static const struct proc_ops psi_memory_proc_ops = {
.proc_open = psi_memory_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = psi_memory_write,
.proc_poll = psi_fop_poll,
.proc_release = psi_fop_release,
};
static const struct proc_ops psi_cpu_proc_ops = {
.proc_open = psi_cpu_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = psi_cpu_write,
.proc_poll = psi_fop_poll,
.proc_release = psi_fop_release,
};
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static int psi_irq_show(struct seq_file *m, void *v)
{
return psi_show(m, &psi_system, PSI_IRQ);
}
static int psi_irq_open(struct inode *inode, struct file *file)
{
return single_open(file, psi_irq_show, NULL);
}
static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
size_t nbytes, loff_t *ppos)
{
return psi_write(file, user_buf, nbytes, PSI_IRQ);
}
static const struct proc_ops psi_irq_proc_ops = {
.proc_open = psi_irq_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = psi_irq_write,
.proc_poll = psi_fop_poll,
.proc_release = psi_fop_release,
};
#endif
static int __init psi_proc_init(void)
{
if (psi_enable) {
proc_mkdir("pressure", NULL);
proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
#endif
}
return 0;
}
module_init(psi_proc_init);
#endif /* CONFIG_PROC_FS */
| linux-master | kernel/sched/psi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* kernel/sched/loadavg.c
*
* This file contains the magic bits required to compute the global loadavg
* figure. Its a silly number but people think its important. We go through
* great pains to make it work on big machines and tickless kernels.
*/
/*
* Global load-average calculations
*
* We take a distributed and async approach to calculating the global load-avg
* in order to minimize overhead.
*
* The global load average is an exponentially decaying average of nr_running +
* nr_uninterruptible.
*
* Once every LOAD_FREQ:
*
* nr_active = 0;
* for_each_possible_cpu(cpu)
* nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
*
* avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
*
* Due to a number of reasons the above turns in the mess below:
*
* - for_each_possible_cpu() is prohibitively expensive on machines with
* serious number of CPUs, therefore we need to take a distributed approach
* to calculating nr_active.
*
* \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
* = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
*
* So assuming nr_active := 0 when we start out -- true per definition, we
* can simply take per-CPU deltas and fold those into a global accumulate
* to obtain the same result. See calc_load_fold_active().
*
* Furthermore, in order to avoid synchronizing all per-CPU delta folding
* across the machine, we assume 10 ticks is sufficient time for every
* CPU to have completed this task.
*
* This places an upper-bound on the IRQ-off latency of the machine. Then
* again, being late doesn't loose the delta, just wrecks the sample.
*
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
* this would add another cross-CPU cacheline miss and atomic operation
* to the wakeup path. Instead we increment on whatever CPU the task ran
* when it went into uninterruptible state and decrement on whatever CPU
* did the wakeup. This means that only the sum of nr_uninterruptible over
* all CPUs yields the correct result.
*
* This covers the NO_HZ=n code, for extra head-aches, see the comment below.
*/
/* Variables and functions for calc_load */
atomic_long_t calc_load_tasks;
unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun); /* should be removed */
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
* @offset: offset to add
* @shift: shift count to shift the result left
*
* These values are estimates at best, so no need for locking.
*/
void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
{
loads[0] = (avenrun[0] + offset) << shift;
loads[1] = (avenrun[1] + offset) << shift;
loads[2] = (avenrun[2] + offset) << shift;
}
long calc_load_fold_active(struct rq *this_rq, long adjust)
{
long nr_active, delta = 0;
nr_active = this_rq->nr_running - adjust;
nr_active += (int)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
}
return delta;
}
/**
* fixed_power_int - compute: x^n, in O(log n) time
*
* @x: base of the power
* @frac_bits: fractional bits of @x
* @n: power to raise @x to.
*
* By exploiting the relation between the definition of the natural power
* function: x^n := x*x*...*x (x multiplied by itself for n times), and
* the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
* (where: n_i \elem {0, 1}, the binary vector representing n),
* we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
* of course trivially computable in O(log_2 n), the length of our binary
* vector.
*/
static unsigned long
fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
{
unsigned long result = 1UL << frac_bits;
if (n) {
for (;;) {
if (n & 1) {
result *= x;
result += 1UL << (frac_bits - 1);
result >>= frac_bits;
}
n >>= 1;
if (!n)
break;
x *= x;
x += 1UL << (frac_bits - 1);
x >>= frac_bits;
}
}
return result;
}
/*
* a1 = a0 * e + a * (1 - e)
*
* a2 = a1 * e + a * (1 - e)
* = (a0 * e + a * (1 - e)) * e + a * (1 - e)
* = a0 * e^2 + a * (1 - e) * (1 + e)
*
* a3 = a2 * e + a * (1 - e)
* = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
* = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
*
* ...
*
* an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
* = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
* = a0 * e^n + a * (1 - e^n)
*
* [1] application of the geometric series:
*
* n 1 - x^(n+1)
* S_n := \Sum x^i = -------------
* i=0 1 - x
*/
unsigned long
calc_load_n(unsigned long load, unsigned long exp,
unsigned long active, unsigned int n)
{
return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
}
#ifdef CONFIG_NO_HZ_COMMON
/*
* Handle NO_HZ for the global load-average.
*
* Since the above described distributed algorithm to compute the global
* load-average relies on per-CPU sampling from the tick, it is affected by
* NO_HZ.
*
* The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
* entering NO_HZ state such that we can include this as an 'extra' CPU delta
* when we read the global state.
*
* Obviously reality has to ruin such a delightfully simple scheme:
*
* - When we go NO_HZ idle during the window, we can negate our sample
* contribution, causing under-accounting.
*
* We avoid this by keeping two NO_HZ-delta counters and flipping them
* when the window starts, thus separating old and new NO_HZ load.
*
* The only trick is the slight shift in index flip for read vs write.
*
* 0s 5s 10s 15s
* +10 +10 +10 +10
* |-|-----------|-|-----------|-|-----------|-|
* r:0 0 1 1 0 0 1 1 0
* w:0 1 1 0 0 1 1 0 0
*
* This ensures we'll fold the old NO_HZ contribution in this window while
* accumulating the new one.
*
* - When we wake up from NO_HZ during the window, we push up our
* contribution, since we effectively move our sample point to a known
* busy state.
*
* This is solved by pushing the window forward, and thus skipping the
* sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
* was in effect at the time the window opened). This also solves the issue
* of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
* intervals.
*
* When making the ILB scale, we should try to pull this in as well.
*/
static atomic_long_t calc_load_nohz[2];
static int calc_load_idx;
static inline int calc_load_write_idx(void)
{
int idx = calc_load_idx;
/*
* See calc_global_nohz(), if we observe the new index, we also
* need to observe the new update time.
*/
smp_rmb();
/*
* If the folding window started, make sure we start writing in the
* next NO_HZ-delta.
*/
if (!time_before(jiffies, READ_ONCE(calc_load_update)))
idx++;
return idx & 1;
}
static inline int calc_load_read_idx(void)
{
return calc_load_idx & 1;
}
static void calc_load_nohz_fold(struct rq *rq)
{
long delta;
delta = calc_load_fold_active(rq, 0);
if (delta) {
int idx = calc_load_write_idx();
atomic_long_add(delta, &calc_load_nohz[idx]);
}
}
void calc_load_nohz_start(void)
{
/*
* We're going into NO_HZ mode, if there's any pending delta, fold it
* into the pending NO_HZ delta.
*/
calc_load_nohz_fold(this_rq());
}
/*
* Keep track of the load for NOHZ_FULL, must be called between
* calc_load_nohz_{start,stop}().
*/
void calc_load_nohz_remote(struct rq *rq)
{
calc_load_nohz_fold(rq);
}
void calc_load_nohz_stop(void)
{
struct rq *this_rq = this_rq();
/*
* If we're still before the pending sample window, we're done.
*/
this_rq->calc_load_update = READ_ONCE(calc_load_update);
if (time_before(jiffies, this_rq->calc_load_update))
return;
/*
* We woke inside or after the sample window, this means we're already
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
static long calc_load_nohz_read(void)
{
int idx = calc_load_read_idx();
long delta = 0;
if (atomic_long_read(&calc_load_nohz[idx]))
delta = atomic_long_xchg(&calc_load_nohz[idx], 0);
return delta;
}
/*
* NO_HZ can leave us missing all per-CPU ticks calling
* calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
* calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
* in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
*
* Once we've updated the global active value, we need to apply the exponential
* weights adjusted to the number of cycles missed.
*/
static void calc_global_nohz(void)
{
unsigned long sample_window;
long delta, active, n;
sample_window = READ_ONCE(calc_load_update);
if (!time_before(jiffies, sample_window + 10)) {
/*
* Catch-up, fold however many we are behind still
*/
delta = jiffies - sample_window - 10;
n = 1 + (delta / LOAD_FREQ);
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
}
/*
* Flip the NO_HZ index...
*
* Make sure we first write the new time then flip the index, so that
* calc_load_write_idx() will see the new time when it reads the new
* index, this avoids a double flip messing things up.
*/
smp_wmb();
calc_load_idx++;
}
#else /* !CONFIG_NO_HZ_COMMON */
static inline long calc_load_nohz_read(void) { return 0; }
static inline void calc_global_nohz(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*
* Called from the global timer code.
*/
void calc_global_load(void)
{
unsigned long sample_window;
long active, delta;
sample_window = READ_ONCE(calc_load_update);
if (time_before(jiffies, sample_window + 10))
return;
/*
* Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
*/
delta = calc_load_nohz_read();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load(avenrun[0], EXP_1, active);
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
/*
* In case we went to NO_HZ for multiple LOAD_FREQ intervals
* catch up in bulk.
*/
calc_global_nohz();
}
/*
* Called from scheduler_tick() to periodically update this CPU's
* active count.
*/
void calc_global_load_tick(struct rq *this_rq)
{
long delta;
if (time_before(jiffies, this_rq->calc_load_update))
return;
delta = calc_load_fold_active(this_rq, 0);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
this_rq->calc_load_update += LOAD_FREQ;
}
| linux-master | kernel/sched/loadavg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* <linux/swait.h> (simple wait queues ) implementation:
*/
void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key)
{
raw_spin_lock_init(&q->lock);
lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}
EXPORT_SYMBOL(__init_swait_queue_head);
/*
* The thing about the wake_up_state() return value; I think we can ignore it.
*
* If for some reason it would return 0, that means the previously waiting
* task is already running, so it will observe condition true (or has already).
*/
void swake_up_locked(struct swait_queue_head *q, int wake_flags)
{
struct swait_queue *curr;
if (list_empty(&q->task_list))
return;
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
try_to_wake_up(curr->task, TASK_NORMAL, wake_flags);
list_del_init(&curr->task_list);
}
EXPORT_SYMBOL(swake_up_locked);
/*
* Wake up all waiters. This is an interface which is solely exposed for
* completions and not for general usage.
*
* It is intentionally different from swake_up_all() to allow usage from
* hard interrupt context and interrupt disabled regions.
*/
void swake_up_all_locked(struct swait_queue_head *q)
{
while (!list_empty(&q->task_list))
swake_up_locked(q, 0);
}
void swake_up_one(struct swait_queue_head *q)
{
unsigned long flags;
raw_spin_lock_irqsave(&q->lock, flags);
swake_up_locked(q, 0);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(swake_up_one);
/*
* Does not allow usage from IRQ disabled, since we must be able to
* release IRQs to guarantee bounded hold time.
*/
void swake_up_all(struct swait_queue_head *q)
{
struct swait_queue *curr;
LIST_HEAD(tmp);
raw_spin_lock_irq(&q->lock);
list_splice_init(&q->task_list, &tmp);
while (!list_empty(&tmp)) {
curr = list_first_entry(&tmp, typeof(*curr), task_list);
wake_up_state(curr->task, TASK_NORMAL);
list_del_init(&curr->task_list);
if (list_empty(&tmp))
break;
raw_spin_unlock_irq(&q->lock);
raw_spin_lock_irq(&q->lock);
}
raw_spin_unlock_irq(&q->lock);
}
EXPORT_SYMBOL(swake_up_all);
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))
list_add_tail(&wait->task_list, &q->task_list);
}
void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
unsigned long flags;
raw_spin_lock_irqsave(&q->lock, flags);
__prepare_to_swait(q, wait);
set_current_state(state);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_swait_exclusive);
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
unsigned long flags;
long ret = 0;
raw_spin_lock_irqsave(&q->lock, flags);
if (signal_pending_state(state, current)) {
/*
* See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
* must not see us.
*/
list_del_init(&wait->task_list);
ret = -ERESTARTSYS;
} else {
__prepare_to_swait(q, wait);
set_current_state(state);
}
raw_spin_unlock_irqrestore(&q->lock, flags);
return ret;
}
EXPORT_SYMBOL(prepare_to_swait_event);
void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
__set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list))
list_del_init(&wait->task_list);
}
void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
unsigned long flags;
__set_current_state(TASK_RUNNING);
if (!list_empty_careful(&wait->task_list)) {
raw_spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
}
EXPORT_SYMBOL(finish_swait);
| linux-master | kernel/sched/swait.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RDMA resource limiting controller for cgroups.
*
* Used to allow a cgroup hierarchy to stop processes from consuming
* additional RDMA resources after a certain limit is reached.
*
* Copyright (C) 2016 Parav Pandit <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cgroup.h>
#include <linux/parser.h>
#include <linux/cgroup_rdma.h>
#define RDMACG_MAX_STR "max"
/*
* Protects list of resource pools maintained on per cgroup basis
* and rdma device list.
*/
static DEFINE_MUTEX(rdmacg_mutex);
static LIST_HEAD(rdmacg_devices);
enum rdmacg_file_type {
RDMACG_RESOURCE_TYPE_MAX,
RDMACG_RESOURCE_TYPE_STAT,
};
/*
* resource table definition as to be seen by the user.
* Need to add entries to it when more resources are
* added/defined at IB verb/core layer.
*/
static char const *rdmacg_resource_names[] = {
[RDMACG_RESOURCE_HCA_HANDLE] = "hca_handle",
[RDMACG_RESOURCE_HCA_OBJECT] = "hca_object",
};
/* resource tracker for each resource of rdma cgroup */
struct rdmacg_resource {
int max;
int usage;
};
/*
* resource pool object which represents per cgroup, per device
* resources. There are multiple instances of this object per cgroup,
* therefore it cannot be embedded within rdma_cgroup structure. It
* is maintained as list.
*/
struct rdmacg_resource_pool {
struct rdmacg_device *device;
struct rdmacg_resource resources[RDMACG_RESOURCE_MAX];
struct list_head cg_node;
struct list_head dev_node;
/* count active user tasks of this pool */
u64 usage_sum;
/* total number counts which are set to max */
int num_max_cnt;
};
static struct rdma_cgroup *css_rdmacg(struct cgroup_subsys_state *css)
{
return container_of(css, struct rdma_cgroup, css);
}
static struct rdma_cgroup *parent_rdmacg(struct rdma_cgroup *cg)
{
return css_rdmacg(cg->css.parent);
}
static inline struct rdma_cgroup *get_current_rdmacg(void)
{
return css_rdmacg(task_get_css(current, rdma_cgrp_id));
}
static void set_resource_limit(struct rdmacg_resource_pool *rpool,
int index, int new_max)
{
if (new_max == S32_MAX) {
if (rpool->resources[index].max != S32_MAX)
rpool->num_max_cnt++;
} else {
if (rpool->resources[index].max == S32_MAX)
rpool->num_max_cnt--;
}
rpool->resources[index].max = new_max;
}
static void set_all_resource_max_limit(struct rdmacg_resource_pool *rpool)
{
int i;
for (i = 0; i < RDMACG_RESOURCE_MAX; i++)
set_resource_limit(rpool, i, S32_MAX);
}
static void free_cg_rpool_locked(struct rdmacg_resource_pool *rpool)
{
lockdep_assert_held(&rdmacg_mutex);
list_del(&rpool->cg_node);
list_del(&rpool->dev_node);
kfree(rpool);
}
static struct rdmacg_resource_pool *
find_cg_rpool_locked(struct rdma_cgroup *cg,
struct rdmacg_device *device)
{
struct rdmacg_resource_pool *pool;
lockdep_assert_held(&rdmacg_mutex);
list_for_each_entry(pool, &cg->rpools, cg_node)
if (pool->device == device)
return pool;
return NULL;
}
static struct rdmacg_resource_pool *
get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device)
{
struct rdmacg_resource_pool *rpool;
rpool = find_cg_rpool_locked(cg, device);
if (rpool)
return rpool;
rpool = kzalloc(sizeof(*rpool), GFP_KERNEL);
if (!rpool)
return ERR_PTR(-ENOMEM);
rpool->device = device;
set_all_resource_max_limit(rpool);
INIT_LIST_HEAD(&rpool->cg_node);
INIT_LIST_HEAD(&rpool->dev_node);
list_add_tail(&rpool->cg_node, &cg->rpools);
list_add_tail(&rpool->dev_node, &device->rpools);
return rpool;
}
/**
* uncharge_cg_locked - uncharge resource for rdma cgroup
* @cg: pointer to cg to uncharge and all parents in hierarchy
* @device: pointer to rdmacg device
* @index: index of the resource to uncharge in cg (resource pool)
*
* It also frees the resource pool which was created as part of
* charging operation when there are no resources attached to
* resource pool.
*/
static void
uncharge_cg_locked(struct rdma_cgroup *cg,
struct rdmacg_device *device,
enum rdmacg_resource_type index)
{
struct rdmacg_resource_pool *rpool;
rpool = find_cg_rpool_locked(cg, device);
/*
* rpool cannot be null at this stage. Let kernel operate in case
* if there a bug in IB stack or rdma controller, instead of crashing
* the system.
*/
if (unlikely(!rpool)) {
pr_warn("Invalid device %p or rdma cgroup %p\n", cg, device);
return;
}
rpool->resources[index].usage--;
/*
* A negative count (or overflow) is invalid,
* it indicates a bug in the rdma controller.
*/
WARN_ON_ONCE(rpool->resources[index].usage < 0);
rpool->usage_sum--;
if (rpool->usage_sum == 0 &&
rpool->num_max_cnt == RDMACG_RESOURCE_MAX) {
/*
* No user of the rpool and all entries are set to max, so
* safe to delete this rpool.
*/
free_cg_rpool_locked(rpool);
}
}
/**
* rdmacg_uncharge_hierarchy - hierarchically uncharge rdma resource count
* @cg: pointer to cg to uncharge and all parents in hierarchy
* @device: pointer to rdmacg device
* @stop_cg: while traversing hirerchy, when meet with stop_cg cgroup
* stop uncharging
* @index: index of the resource to uncharge in cg in given resource pool
*/
static void rdmacg_uncharge_hierarchy(struct rdma_cgroup *cg,
struct rdmacg_device *device,
struct rdma_cgroup *stop_cg,
enum rdmacg_resource_type index)
{
struct rdma_cgroup *p;
mutex_lock(&rdmacg_mutex);
for (p = cg; p != stop_cg; p = parent_rdmacg(p))
uncharge_cg_locked(p, device, index);
mutex_unlock(&rdmacg_mutex);
css_put(&cg->css);
}
/**
* rdmacg_uncharge - hierarchically uncharge rdma resource count
* @cg: pointer to cg to uncharge and all parents in hierarchy
* @device: pointer to rdmacg device
* @index: index of the resource to uncharge in cgroup in given resource pool
*/
void rdmacg_uncharge(struct rdma_cgroup *cg,
struct rdmacg_device *device,
enum rdmacg_resource_type index)
{
if (index >= RDMACG_RESOURCE_MAX)
return;
rdmacg_uncharge_hierarchy(cg, device, NULL, index);
}
EXPORT_SYMBOL(rdmacg_uncharge);
/**
* rdmacg_try_charge - hierarchically try to charge the rdma resource
* @rdmacg: pointer to rdma cgroup which will own this resource
* @device: pointer to rdmacg device
* @index: index of the resource to charge in cgroup (resource pool)
*
* This function follows charging resource in hierarchical way.
* It will fail if the charge would cause the new value to exceed the
* hierarchical limit.
* Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
* Returns pointer to rdmacg for this resource when charging is successful.
*
* Charger needs to account resources on two criteria.
* (a) per cgroup & (b) per device resource usage.
* Per cgroup resource usage ensures that tasks of cgroup doesn't cross
* the configured limits. Per device provides granular configuration
* in multi device usage. It allocates resource pool in the hierarchy
* for each parent it come across for first resource. Later on resource
* pool will be available. Therefore it will be much faster thereon
* to charge/uncharge.
*/
int rdmacg_try_charge(struct rdma_cgroup **rdmacg,
struct rdmacg_device *device,
enum rdmacg_resource_type index)
{
struct rdma_cgroup *cg, *p;
struct rdmacg_resource_pool *rpool;
s64 new;
int ret = 0;
if (index >= RDMACG_RESOURCE_MAX)
return -EINVAL;
/*
* hold on to css, as cgroup can be removed but resource
* accounting happens on css.
*/
cg = get_current_rdmacg();
mutex_lock(&rdmacg_mutex);
for (p = cg; p; p = parent_rdmacg(p)) {
rpool = get_cg_rpool_locked(p, device);
if (IS_ERR(rpool)) {
ret = PTR_ERR(rpool);
goto err;
} else {
new = rpool->resources[index].usage + 1;
if (new > rpool->resources[index].max) {
ret = -EAGAIN;
goto err;
} else {
rpool->resources[index].usage = new;
rpool->usage_sum++;
}
}
}
mutex_unlock(&rdmacg_mutex);
*rdmacg = cg;
return 0;
err:
mutex_unlock(&rdmacg_mutex);
rdmacg_uncharge_hierarchy(cg, device, p, index);
return ret;
}
EXPORT_SYMBOL(rdmacg_try_charge);
/**
* rdmacg_register_device - register rdmacg device to rdma controller.
* @device: pointer to rdmacg device whose resources need to be accounted.
*
* If IB stack wish a device to participate in rdma cgroup resource
* tracking, it must invoke this API to register with rdma cgroup before
* any user space application can start using the RDMA resources.
*/
void rdmacg_register_device(struct rdmacg_device *device)
{
INIT_LIST_HEAD(&device->dev_node);
INIT_LIST_HEAD(&device->rpools);
mutex_lock(&rdmacg_mutex);
list_add_tail(&device->dev_node, &rdmacg_devices);
mutex_unlock(&rdmacg_mutex);
}
EXPORT_SYMBOL(rdmacg_register_device);
/**
* rdmacg_unregister_device - unregister rdmacg device from rdma controller.
* @device: pointer to rdmacg device which was previously registered with rdma
* controller using rdmacg_register_device().
*
* IB stack must invoke this after all the resources of the IB device
* are destroyed and after ensuring that no more resources will be created
* when this API is invoked.
*/
void rdmacg_unregister_device(struct rdmacg_device *device)
{
struct rdmacg_resource_pool *rpool, *tmp;
/*
* Synchronize with any active resource settings,
* usage query happening via configfs.
*/
mutex_lock(&rdmacg_mutex);
list_del_init(&device->dev_node);
/*
* Now that this device is off the cgroup list, its safe to free
* all the rpool resources.
*/
list_for_each_entry_safe(rpool, tmp, &device->rpools, dev_node)
free_cg_rpool_locked(rpool);
mutex_unlock(&rdmacg_mutex);
}
EXPORT_SYMBOL(rdmacg_unregister_device);
static int parse_resource(char *c, int *intval)
{
substring_t argstr;
char *name, *value = c;
size_t len;
int ret, i;
name = strsep(&value, "=");
if (!name || !value)
return -EINVAL;
i = match_string(rdmacg_resource_names, RDMACG_RESOURCE_MAX, name);
if (i < 0)
return i;
len = strlen(value);
argstr.from = value;
argstr.to = value + len;
ret = match_int(&argstr, intval);
if (ret >= 0) {
if (*intval < 0)
return -EINVAL;
return i;
}
if (strncmp(value, RDMACG_MAX_STR, len) == 0) {
*intval = S32_MAX;
return i;
}
return -EINVAL;
}
static int rdmacg_parse_limits(char *options,
int *new_limits, unsigned long *enables)
{
char *c;
int err = -EINVAL;
/* parse resource options */
while ((c = strsep(&options, " ")) != NULL) {
int index, intval;
index = parse_resource(c, &intval);
if (index < 0)
goto err;
new_limits[index] = intval;
*enables |= BIT(index);
}
return 0;
err:
return err;
}
static struct rdmacg_device *rdmacg_get_device_locked(const char *name)
{
struct rdmacg_device *device;
lockdep_assert_held(&rdmacg_mutex);
list_for_each_entry(device, &rdmacg_devices, dev_node)
if (!strcmp(name, device->name))
return device;
return NULL;
}
static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct rdma_cgroup *cg = css_rdmacg(of_css(of));
const char *dev_name;
struct rdmacg_resource_pool *rpool;
struct rdmacg_device *device;
char *options = strstrip(buf);
int *new_limits;
unsigned long enables = 0;
int i = 0, ret = 0;
/* extract the device name first */
dev_name = strsep(&options, " ");
if (!dev_name) {
ret = -EINVAL;
goto err;
}
new_limits = kcalloc(RDMACG_RESOURCE_MAX, sizeof(int), GFP_KERNEL);
if (!new_limits) {
ret = -ENOMEM;
goto err;
}
ret = rdmacg_parse_limits(options, new_limits, &enables);
if (ret)
goto parse_err;
/* acquire lock to synchronize with hot plug devices */
mutex_lock(&rdmacg_mutex);
device = rdmacg_get_device_locked(dev_name);
if (!device) {
ret = -ENODEV;
goto dev_err;
}
rpool = get_cg_rpool_locked(cg, device);
if (IS_ERR(rpool)) {
ret = PTR_ERR(rpool);
goto dev_err;
}
/* now set the new limits of the rpool */
for_each_set_bit(i, &enables, RDMACG_RESOURCE_MAX)
set_resource_limit(rpool, i, new_limits[i]);
if (rpool->usage_sum == 0 &&
rpool->num_max_cnt == RDMACG_RESOURCE_MAX) {
/*
* No user of the rpool and all entries are set to max, so
* safe to delete this rpool.
*/
free_cg_rpool_locked(rpool);
}
dev_err:
mutex_unlock(&rdmacg_mutex);
parse_err:
kfree(new_limits);
err:
return ret ?: nbytes;
}
static void print_rpool_values(struct seq_file *sf,
struct rdmacg_resource_pool *rpool)
{
enum rdmacg_file_type sf_type;
int i;
u32 value;
sf_type = seq_cft(sf)->private;
for (i = 0; i < RDMACG_RESOURCE_MAX; i++) {
seq_puts(sf, rdmacg_resource_names[i]);
seq_putc(sf, '=');
if (sf_type == RDMACG_RESOURCE_TYPE_MAX) {
if (rpool)
value = rpool->resources[i].max;
else
value = S32_MAX;
} else {
if (rpool)
value = rpool->resources[i].usage;
else
value = 0;
}
if (value == S32_MAX)
seq_puts(sf, RDMACG_MAX_STR);
else
seq_printf(sf, "%d", value);
seq_putc(sf, ' ');
}
}
static int rdmacg_resource_read(struct seq_file *sf, void *v)
{
struct rdmacg_device *device;
struct rdmacg_resource_pool *rpool;
struct rdma_cgroup *cg = css_rdmacg(seq_css(sf));
mutex_lock(&rdmacg_mutex);
list_for_each_entry(device, &rdmacg_devices, dev_node) {
seq_printf(sf, "%s ", device->name);
rpool = find_cg_rpool_locked(cg, device);
print_rpool_values(sf, rpool);
seq_putc(sf, '\n');
}
mutex_unlock(&rdmacg_mutex);
return 0;
}
static struct cftype rdmacg_files[] = {
{
.name = "max",
.write = rdmacg_resource_set_max,
.seq_show = rdmacg_resource_read,
.private = RDMACG_RESOURCE_TYPE_MAX,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "current",
.seq_show = rdmacg_resource_read,
.private = RDMACG_RESOURCE_TYPE_STAT,
.flags = CFTYPE_NOT_ON_ROOT,
},
{ } /* terminate */
};
static struct cgroup_subsys_state *
rdmacg_css_alloc(struct cgroup_subsys_state *parent)
{
struct rdma_cgroup *cg;
cg = kzalloc(sizeof(*cg), GFP_KERNEL);
if (!cg)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cg->rpools);
return &cg->css;
}
static void rdmacg_css_free(struct cgroup_subsys_state *css)
{
struct rdma_cgroup *cg = css_rdmacg(css);
kfree(cg);
}
/**
* rdmacg_css_offline - cgroup css_offline callback
* @css: css of interest
*
* This function is called when @css is about to go away and responsible
* for shooting down all rdmacg associated with @css. As part of that it
* marks all the resource pool entries to max value, so that when resources are
* uncharged, associated resource pool can be freed as well.
*/
static void rdmacg_css_offline(struct cgroup_subsys_state *css)
{
struct rdma_cgroup *cg = css_rdmacg(css);
struct rdmacg_resource_pool *rpool;
mutex_lock(&rdmacg_mutex);
list_for_each_entry(rpool, &cg->rpools, cg_node)
set_all_resource_max_limit(rpool);
mutex_unlock(&rdmacg_mutex);
}
struct cgroup_subsys rdma_cgrp_subsys = {
.css_alloc = rdmacg_css_alloc,
.css_free = rdmacg_css_free,
.css_offline = rdmacg_css_offline,
.legacy_cftypes = rdmacg_files,
.dfl_cftypes = rdmacg_files,
};
| linux-master | kernel/cgroup/rdma.c |
/*
* kernel/cpuset.c
*
* Processor and Memory placement constraints for sets of tasks.
*
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2007 Silicon Graphics, Inc.
* Copyright (C) 2006 Google, Inc
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* 2006 Rework by Paul Menage to use generic cgroups
* 2008 Rework of the scheduler domains and CPU hotplug handling
* by Max Krasnyansky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/deadline.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/oom.h>
#include <linux/sched/isolation.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
/*
* There could be abnormal cpuset configurations for cpu or memory
* node binding, add this key to provide a quick low-cost judgment
* of the situation.
*/
DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
/* See "Frequency meter" comments, below. */
struct fmeter {
int cnt; /* unprocessed events count */
int val; /* most recent output value */
time64_t time; /* clock (secs) when val computed */
spinlock_t lock; /* guards read or write of above */
};
/*
* Invalid partition error code
*/
enum prs_errcode {
PERR_NONE = 0,
PERR_INVCPUS,
PERR_INVPARENT,
PERR_NOTPART,
PERR_NOTEXCL,
PERR_NOCPUS,
PERR_HOTPLUG,
PERR_CPUSEMPTY,
};
static const char * const perr_strings[] = {
[PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus",
[PERR_INVPARENT] = "Parent is an invalid partition root",
[PERR_NOTPART] = "Parent is not a partition root",
[PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
[PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
[PERR_HOTPLUG] = "No cpu available due to hotplug",
[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
};
struct cpuset {
struct cgroup_subsys_state css;
unsigned long flags; /* "unsigned long" so bitops work */
/*
* On default hierarchy:
*
* The user-configured masks can only be changed by writing to
* cpuset.cpus and cpuset.mems, and won't be limited by the
* parent masks.
*
* The effective masks is the real masks that apply to the tasks
* in the cpuset. They may be changed if the configured masks are
* changed or hotplug happens.
*
* effective_mask == configured_mask & parent's effective_mask,
* and if it ends up empty, it will inherit the parent's mask.
*
*
* On legacy hierarchy:
*
* The user-configured masks are always the same with effective masks.
*/
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
cpumask_var_t effective_cpus;
nodemask_t effective_mems;
/*
* CPUs allocated to child sub-partitions (default hierarchy only)
* - CPUs granted by the parent = effective_cpus U subparts_cpus
* - effective_cpus and subparts_cpus are mutually exclusive.
*
* effective_cpus contains only onlined CPUs, but subparts_cpus
* may have offlined ones.
*/
cpumask_var_t subparts_cpus;
/*
* This is old Memory Nodes tasks took on.
*
* - top_cpuset.old_mems_allowed is initialized to mems_allowed.
* - A new cpuset's old_mems_allowed is initialized when some
* task is moved into it.
* - old_mems_allowed is used in cpuset_migrate_mm() when we change
* cpuset.mems_allowed and have tasks' nodemask updated, and
* then old_mems_allowed is updated to mems_allowed.
*/
nodemask_t old_mems_allowed;
struct fmeter fmeter; /* memory_pressure filter */
/*
* Tasks are being attached to this cpuset. Used to prevent
* zeroing cpus/mems_allowed between ->can_attach() and ->attach().
*/
int attach_in_progress;
/* partition number for rebuild_sched_domains() */
int pn;
/* for custom sched domain */
int relax_domain_level;
/* number of CPUs in subparts_cpus */
int nr_subparts_cpus;
/* partition root state */
int partition_root_state;
/*
* Default hierarchy only:
* use_parent_ecpus - set if using parent's effective_cpus
* child_ecpus_count - # of children with use_parent_ecpus set
*/
int use_parent_ecpus;
int child_ecpus_count;
/*
* number of SCHED_DEADLINE tasks attached to this cpuset, so that we
* know when to rebuild associated root domain bandwidth information.
*/
int nr_deadline_tasks;
int nr_migrate_dl_tasks;
u64 sum_migrate_dl_bw;
/* Invalid partition error code, not lock protected */
enum prs_errcode prs_err;
/* Handle for cpuset.cpus.partition */
struct cgroup_file partition_file;
};
/*
* Partition root states:
*
* 0 - member (not a partition root)
* 1 - partition root
* 2 - partition root without load balancing (isolated)
* -1 - invalid partition root
* -2 - invalid isolated partition root
*/
#define PRS_MEMBER 0
#define PRS_ROOT 1
#define PRS_ISOLATED 2
#define PRS_INVALID_ROOT -1
#define PRS_INVALID_ISOLATED -2
static inline bool is_prs_invalid(int prs_state)
{
return prs_state < 0;
}
/*
* Temporary cpumasks for working with partitions that are passed among
* functions to avoid memory allocation in inner functions.
*/
struct tmpmasks {
cpumask_var_t addmask, delmask; /* For partition root */
cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
};
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct cpuset, css) : NULL;
}
/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
return css_cs(task_css(task, cpuset_cgrp_id));
}
static inline struct cpuset *parent_cs(struct cpuset *cs)
{
return css_cs(cs->css.parent);
}
void inc_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks++;
}
void dec_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks--;
}
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
CS_MEM_HARDWALL,
CS_MEMORY_MIGRATE,
CS_SCHED_LOAD_BALANCE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
} cpuset_flagbits_t;
/* convenient tests for these bits */
static inline bool is_cpuset_online(struct cpuset *cs)
{
return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
}
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_exclusive(const struct cpuset *cs)
{
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_hardwall(const struct cpuset *cs)
{
return test_bit(CS_MEM_HARDWALL, &cs->flags);
}
static inline int is_sched_load_balance(const struct cpuset *cs)
{
return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
static inline int is_memory_migrate(const struct cpuset *cs)
{
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
}
static inline int is_spread_page(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_PAGE, &cs->flags);
}
static inline int is_spread_slab(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
static inline int is_partition_valid(const struct cpuset *cs)
{
return cs->partition_root_state > 0;
}
static inline int is_partition_invalid(const struct cpuset *cs)
{
return cs->partition_root_state < 0;
}
/*
* Callers should hold callback_lock to modify partition_root_state.
*/
static inline void make_partition_invalid(struct cpuset *cs)
{
if (is_partition_valid(cs))
cs->partition_root_state = -cs->partition_root_state;
}
/*
* Send notification event of whenever partition_root_state changes.
*/
static inline void notify_partition_change(struct cpuset *cs, int old_prs)
{
if (old_prs == cs->partition_root_state)
return;
cgroup_file_notify(&cs->partition_file);
/* Reset prs_err if not invalid */
if (is_partition_valid(cs))
WRITE_ONCE(cs->prs_err, PERR_NONE);
}
static struct cpuset top_cpuset = {
.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
(1 << CS_MEM_EXCLUSIVE)),
.partition_root_state = PRS_ROOT,
};
/**
* cpuset_for_each_child - traverse online children of a cpuset
* @child_cs: loop cursor pointing to the current child
* @pos_css: used for iteration
* @parent_cs: target cpuset to walk children of
*
* Walk @child_cs through the online children of @parent_cs. Must be used
* with RCU read locked.
*/
#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
css_for_each_child((pos_css), &(parent_cs)->css) \
if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
/**
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
* @des_cs: loop cursor pointing to the current descendant
* @pos_css: used for iteration
* @root_cs: target cpuset to walk ancestor of
*
* Walk @des_cs through the online descendants of @root_cs. Must be used
* with RCU read locked. The caller may modify @pos_css by calling
* css_rightmost_descendant() to skip subtree. @root_cs is included in the
* iteration and the first node to be visited.
*/
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
* There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
* comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
* can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
* correctness.
*
* A task must hold both locks to modify cpusets. If a task holds
* cpuset_mutex, it blocks others, ensuring that it is the only task able to
* also acquire callback_lock and be able to modify cpusets. It can perform
* various checks on the cpuset structure first, knowing nothing will change.
* It can also allocate memory while just holding cpuset_mutex. While it is
* performing these checks, various callback routines can briefly acquire
* callback_lock to query cpusets. Once it is ready to make the changes, it
* takes callback_lock, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_lock, as that would risk double tripping on callback_lock
* from one of the callbacks into the cpuset code from within
* __alloc_pages().
*
* If a task is only holding callback_lock, then it has read-only
* access to cpusets.
*
* Now, the task_struct fields mems_allowed and mempolicy may be changed
* by other task, we use alloc_lock in the task_struct fields to protect
* them.
*
* The cpuset_common_file_read() handlers only hold callback_lock across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
* Accessing a task's cpuset should be done in accordance with the
* guidelines for accessing subsystem state in kernel/cgroup.c
*/
static DEFINE_MUTEX(cpuset_mutex);
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
}
void cpuset_unlock(void)
{
mutex_unlock(&cpuset_mutex);
}
static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
/*
* CPU / memory hotplug is handled asynchronously.
*/
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
static inline void check_insane_mems_config(nodemask_t *nodes)
{
if (!cpusets_insane_config() &&
movable_only_nodes(nodes)) {
static_branch_enable(&cpusets_insane_config_key);
pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
"Cpuset allocations might fail even with a lot of memory available.\n",
nodemask_pr_args(nodes));
}
}
/*
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
* the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
* With v2 behavior, "cpus" and "mems" are always what the users have
* requested and won't be changed by hotplug events. Only the effective
* cpus or mems will be affected.
*/
static inline bool is_in_v2_mode(void)
{
return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
(cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
}
/**
* partition_is_populated - check if partition has tasks
* @cs: partition root to be checked
* @excluded_child: a child cpuset to be excluded in task checking
* Return: true if there are tasks, false otherwise
*
* It is assumed that @cs is a valid partition root. @excluded_child should
* be non-NULL when this cpuset is going to become a partition itself.
*/
static inline bool partition_is_populated(struct cpuset *cs,
struct cpuset *excluded_child)
{
struct cgroup_subsys_state *css;
struct cpuset *child;
if (cs->css.cgroup->nr_populated_csets)
return true;
if (!excluded_child && !cs->nr_subparts_cpus)
return cgroup_is_populated(cs->css.cgroup);
rcu_read_lock();
cpuset_for_each_child(child, css, cs) {
if (child == excluded_child)
continue;
if (is_partition_valid(child))
continue;
if (cgroup_is_populated(child->css.cgroup)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/*
* Return in pmask the portion of a task's cpusets's cpus_allowed that
* are online and are capable of running the task. If none are found,
* walk up the cpuset hierarchy until we find one that does have some
* appropriate cpus.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
{
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
struct cpuset *cs;
if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
cpumask_copy(pmask, cpu_online_mask);
rcu_read_lock();
cs = task_cs(tsk);
while (!cpumask_intersects(cs->effective_cpus, pmask)) {
cs = parent_cs(cs);
if (unlikely(!cs)) {
/*
* The top cpuset doesn't have any online cpu as a
* consequence of a race between cpuset_hotplug_work
* and cpu hotplug notifier. But we know the top
* cpuset's effective_cpus is on its way to be
* identical to cpu_online_mask.
*/
goto out_unlock;
}
}
cpumask_and(pmask, pmask, cs->effective_cpus);
out_unlock:
rcu_read_unlock();
}
/*
* Return in *pmask the portion of a cpusets's mems_allowed that
* are online, with memory. If none are online with memory, walk
* up the cpuset hierarchy until we find one that does have some
* online mems. The top cpuset always has some mems online.
*
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
cs = parent_cs(cs);
nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
}
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Call with callback_lock or cpuset_mutex held. The check can be skipped
* if on default hierarchy.
*/
static void cpuset_update_task_spread_flags(struct cpuset *cs,
struct task_struct *tsk)
{
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
return;
if (is_spread_page(cs))
task_set_spread_page(tsk);
else
task_clear_spread_page(tsk);
if (is_spread_slab(cs))
task_set_spread_slab(tsk);
else
task_clear_spread_slab(tsk);
}
/*
* is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
* are only set if the other's are set. Call holding cpuset_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
}
/**
* alloc_cpumasks - allocate three cpumasks for cpuset
* @cs: the cpuset that have cpumasks to be allocated.
* @tmp: the tmpmasks structure pointer
* Return: 0 if successful, -ENOMEM otherwise.
*
* Only one of the two input arguments should be non-NULL.
*/
static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
{
cpumask_var_t *pmask1, *pmask2, *pmask3;
if (cs) {
pmask1 = &cs->cpus_allowed;
pmask2 = &cs->effective_cpus;
pmask3 = &cs->subparts_cpus;
} else {
pmask1 = &tmp->new_cpus;
pmask2 = &tmp->addmask;
pmask3 = &tmp->delmask;
}
if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
return -ENOMEM;
if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
goto free_one;
if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
goto free_two;
return 0;
free_two:
free_cpumask_var(*pmask2);
free_one:
free_cpumask_var(*pmask1);
return -ENOMEM;
}
/**
* free_cpumasks - free cpumasks in a tmpmasks structure
* @cs: the cpuset that have cpumasks to be free.
* @tmp: the tmpmasks structure pointer
*/
static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
{
if (cs) {
free_cpumask_var(cs->cpus_allowed);
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->subparts_cpus);
}
if (tmp) {
free_cpumask_var(tmp->new_cpus);
free_cpumask_var(tmp->addmask);
free_cpumask_var(tmp->delmask);
}
}
/**
* alloc_trial_cpuset - allocate a trial cpuset
* @cs: the cpuset that the trial cpuset duplicates
*/
static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
{
struct cpuset *trial;
trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
if (!trial)
return NULL;
if (alloc_cpumasks(trial, NULL)) {
kfree(trial);
return NULL;
}
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
return trial;
}
/**
* free_cpuset - free the cpuset
* @cs: the cpuset to be freed
*/
static inline void free_cpuset(struct cpuset *cs)
{
free_cpumasks(cs, NULL);
kfree(cs);
}
/*
* validate_change_legacy() - Validate conditions specific to legacy (v1)
* behavior.
*/
static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
{
struct cgroup_subsys_state *css;
struct cpuset *c, *par;
int ret;
WARN_ON_ONCE(!rcu_read_lock_held());
/* Each of our child cpusets must be a subset of us */
ret = -EBUSY;
cpuset_for_each_child(c, css, cur)
if (!is_cpuset_subset(c, trial))
goto out;
/* On legacy hierarchy, we must be a subset of our parent cpuset. */
ret = -EACCES;
par = parent_cs(cur);
if (par && !is_cpuset_subset(trial, par))
goto out;
ret = 0;
out:
return ret;
}
/*
* validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets.
*
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
* cpuset_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
* cpuset in the list must use cur below, not trial.
*
* 'trial' is the address of bulk structure copy of cur, with
* perhaps one or more of the fields cpus_allowed, mems_allowed,
* or flags changed to new, trial values.
*
* Return 0 if valid, -errno if not.
*/
static int validate_change(struct cpuset *cur, struct cpuset *trial)
{
struct cgroup_subsys_state *css;
struct cpuset *c, *par;
int ret = 0;
rcu_read_lock();
if (!is_in_v2_mode())
ret = validate_change_legacy(cur, trial);
if (ret)
goto out;
/* Remaining checks don't apply to root cpuset */
if (cur == &top_cpuset)
goto out;
par = parent_cs(cur);
/*
* Cpusets with tasks - existing or newly being attached - can't
* be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
if (!cpumask_empty(cur->cpus_allowed) &&
cpumask_empty(trial->cpus_allowed))
goto out;
if (!nodes_empty(cur->mems_allowed) &&
nodes_empty(trial->mems_allowed))
goto out;
}
/*
* We can't shrink if we won't have enough room for SCHED_DEADLINE
* tasks.
*/
ret = -EBUSY;
if (is_cpu_exclusive(cur) &&
!cpuset_cpumask_can_shrink(cur->cpus_allowed,
trial->cpus_allowed))
goto out;
/*
* If either I or some sibling (!= me) is exclusive, we can't
* overlap
*/
ret = -EINVAL;
cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
nodes_intersects(trial->mems_allowed, c->mems_allowed))
goto out;
}
ret = 0;
out:
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_SMP
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks?
*/
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
return cpumask_intersects(a->effective_cpus, b->effective_cpus);
}
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
if (dattr->relax_domain_level < c->relax_domain_level)
dattr->relax_domain_level = c->relax_domain_level;
return;
}
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
struct cpuset *root_cs)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (is_sched_load_balance(cp))
update_domain_attr(dattr, cp);
}
rcu_read_unlock();
}
/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
return static_key_count(&cpusets_enabled_key.key) + 1;
}
/*
* generate_sched_domains()
*
* This function builds a partial partition of the systems CPUs
* A 'partial partition' is a set of non-overlapping subsets whose
* union is a subset of that set.
* The output of this function needs to be passed to kernel/sched/core.c
* partition_sched_domains() routine, which will rebuild the scheduler's
* load balancing domains (sched domains) as specified by that partial
* partition.
*
* See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
* for a background explanation of this.
*
* Does not return errors, on the theory that the callers of this
* routine would rather not worry about failures to rebuild sched
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
* Must be called with cpuset_mutex held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
* top-down scan of all cpusets. For our purposes, rebuilding
* the schedulers sched domains, we can ignore !is_sched_load_
* balance cpusets.
* csa - (for CpuSet Array) Array of pointers to all the cpusets
* that need to be load balanced, for convenient iterative
* access by the subsequent code that finds the best partition,
* i.e the set of domains (subsets) of CPUs such that the
* cpus_allowed of every cpuset marked is_sched_load_balance
* is a subset of one of these domains, while there are as
* many such domains as possible, each as small as possible.
* doms - Conversion of 'csa' to an array of cpumasks, for passing to
* the kernel/sched/core.c routine partition_sched_domains() in a
* convenient format, that can be easily compared to the prior
* value to determine what partition elements (sched domains)
* were changed (added or removed.)
*
* Finding the best partition (set of domains):
* The triple nested loops below over i, j, k scan over the
* load balanced cpusets (using the array of cpuset pointers in
* csa[]) looking for pairs of cpusets that have overlapping
* cpus_allowed, but which don't have the same 'pn' partition
* number and gives them in the same partition number. It keeps
* looping on the 'restart' label until it can no longer find
* any such pairs.
*
* The union of the cpus_allowed masks from the set of
* all cpusets having the same 'pn' value then form the one
* element of the partition (one sched domain) to be passed to
* partition_sched_domains().
*/
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
struct cpuset *cp; /* top-down scan of cpusets */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
struct cgroup_subsys_state *pos_css;
bool root_load_balance = is_sched_load_balance(&top_cpuset);
doms = NULL;
dattr = NULL;
csa = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
ndoms = 1;
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
if (dattr) {
*dattr = SD_ATTR_INIT;
update_domain_attr_tree(dattr, &top_cpuset);
}
cpumask_and(doms[0], top_cpuset.effective_cpus,
housekeeping_cpumask(HK_TYPE_DOMAIN));
goto done;
}
csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
rcu_read_lock();
if (root_load_balance)
csa[csn++] = &top_cpuset;
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
if (cp == &top_cpuset)
continue;
/*
* Continue traversing beyond @cp iff @cp has some CPUs and
* isn't load balancing. The former is obvious. The
* latter: All child cpusets contain a subset of the
* parent's cpus, so just skip them, and then we call
* update_domain_attr_tree() to calc relax_domain_level of
* the corresponding sched domain.
*
* If root is load-balancing, we can skip @cp if it
* is a subset of the root's effective_cpus.
*/
if (!cpumask_empty(cp->cpus_allowed) &&
!(is_sched_load_balance(cp) &&
cpumask_intersects(cp->cpus_allowed,
housekeeping_cpumask(HK_TYPE_DOMAIN))))
continue;
if (root_load_balance &&
cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
continue;
if (is_sched_load_balance(cp) &&
!cpumask_empty(cp->effective_cpus))
csa[csn++] = cp;
/* skip @cp's subtree if not a partition root */
if (!is_partition_valid(cp))
pos_css = css_rightmost_descendant(pos_css);
}
rcu_read_unlock();
for (i = 0; i < csn; i++)
csa[i]->pn = i;
ndoms = csn;
restart:
/* Find the best partition (set of sched domains) */
for (i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
int apn = a->pn;
for (j = 0; j < csn; j++) {
struct cpuset *b = csa[j];
int bpn = b->pn;
if (apn != bpn && cpusets_overlap(a, b)) {
for (k = 0; k < csn; k++) {
struct cpuset *c = csa[k];
if (c->pn == bpn)
c->pn = apn;
}
ndoms--; /* one less element */
goto restart;
}
}
}
/*
* Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
/*
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
struct cpumask *dp;
int apn = a->pn;
if (apn < 0) {
/* Skip completed partitions */
continue;
}
dp = doms[nslot];
if (nslot == ndoms) {
static int warnings = 10;
if (warnings) {
pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
nslot, ndoms, csn, i, apn);
warnings--;
}
continue;
}
cpumask_clear(dp);
if (dattr)
*(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) {
struct cpuset *b = csa[j];
if (apn == b->pn) {
cpumask_or(dp, dp, b->effective_cpus);
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
if (dattr)
update_domain_attr_tree(dattr + nslot, b);
/* Done with this partition */
b->pn = -1;
}
}
nslot++;
}
BUG_ON(nslot != ndoms);
done:
kfree(csa);
/*
* Fallback to the default domain if kmalloc() failed.
* See comments in partition_sched_domains().
*/
if (doms == NULL)
ndoms = 1;
*domains = doms;
*attributes = dattr;
return ndoms;
}
static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
if (cs->nr_deadline_tasks == 0)
return;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
dl_add_task_root_domain(task);
css_task_iter_end(&it);
}
static void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
lockdep_assert_held(&cpuset_mutex);
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
rcu_read_lock();
/*
* Clear default root domain DL accounting, it will be computed again
* if a task belongs to it.
*/
dl_clear_root_domain(&def_root_domain);
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (cpumask_empty(cs->effective_cpus)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
css_get(&cs->css);
rcu_read_unlock();
dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
}
rcu_read_unlock();
}
static void
partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
}
/*
* Rebuild scheduler domains.
*
* If the flag 'sched_load_balance' of any cpuset with non-empty
* 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
* which has that flag enabled, or if any cpuset with a non-empty
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
* Call with cpuset_mutex held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
struct cgroup_subsys_state *pos_css;
struct sched_domain_attr *attr;
cpumask_var_t *doms;
struct cpuset *cs;
int ndoms;
lockdep_assert_cpus_held();
lockdep_assert_held(&cpuset_mutex);
/*
* If we have raced with CPU hotplug, return early to avoid
* passing doms with offlined cpu to partition_sched_domains().
* Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
*
* With no CPUs in any subpartitions, top_cpuset's effective CPUs
* should be the same as the active CPUs, so checking only top_cpuset
* is enough to detect racing CPU offlines.
*/
if (!top_cpuset.nr_subparts_cpus &&
!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
return;
/*
* With subpartition CPUs, however, the effective CPUs of a partition
* root should be only a subset of the active CPUs. Since a CPU in any
* partition root could be offlined, all must be checked.
*/
if (top_cpuset.nr_subparts_cpus) {
rcu_read_lock();
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (!is_partition_valid(cs)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (!cpumask_subset(cs->effective_cpus,
cpu_active_mask)) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
}
/* Generate domain masks and attrs */
ndoms = generate_sched_domains(&doms, &attr);
/* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr);
}
#else /* !CONFIG_SMP */
static void rebuild_sched_domains_locked(void)
{
}
#endif /* CONFIG_SMP */
void rebuild_sched_domains(void)
{
cpus_read_lock();
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @new_cpus: the temp variable for the new effective_cpus mask
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
* is used instead of effective_cpus to make sure all offline CPUs are also
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
*/
static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{
struct css_task_iter it;
struct task_struct *task;
bool top_cs = cs == &top_cpuset;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it))) {
const struct cpumask *possible_mask = task_cpu_possible_mask(task);
if (top_cs) {
/*
* Percpu kthreads in top_cpuset are ignored
*/
if (kthread_is_per_cpu(task))
continue;
cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
} else {
cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
}
set_cpus_allowed_ptr(task, new_cpus);
}
css_task_iter_end(&it);
}
/**
* compute_effective_cpumask - Compute the effective cpumask of the cpuset
* @new_cpus: the temp variable for the new effective_cpus mask
* @cs: the cpuset the need to recompute the new effective_cpus mask
* @parent: the parent cpuset
*
* If the parent has subpartition CPUs, include them in the list of
* allowable CPUs in computing the new effective_cpus mask. Since offlined
* CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
* to mask those out.
*/
static void compute_effective_cpumask(struct cpumask *new_cpus,
struct cpuset *cs, struct cpuset *parent)
{
if (parent->nr_subparts_cpus && is_partition_valid(cs)) {
cpumask_or(new_cpus, parent->effective_cpus,
parent->subparts_cpus);
cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
cpumask_and(new_cpus, new_cpus, cpu_active_mask);
} else {
cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
}
}
/*
* Commands for update_parent_subparts_cpumask
*/
enum subparts_cmd {
partcmd_enable, /* Enable partition root */
partcmd_disable, /* Disable partition root */
partcmd_update, /* Update parent's subparts_cpus */
partcmd_invalidate, /* Make partition invalid */
};
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on);
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct tmpmasks *tmp);
/*
* Update partition exclusive flag
*
* Return: 0 if successful, an error code otherwise
*/
static int update_partition_exclusive(struct cpuset *cs, int new_prs)
{
bool exclusive = (new_prs > 0);
if (exclusive && !is_cpu_exclusive(cs)) {
if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
return PERR_NOTEXCL;
} else if (!exclusive && is_cpu_exclusive(cs)) {
/* Turning off CS_CPU_EXCLUSIVE will not return error */
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
}
return 0;
}
/*
* Update partition load balance flag and/or rebuild sched domain
*
* Changing load balance flag will automatically call
* rebuild_sched_domains_locked().
*/
static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
{
int new_prs = cs->partition_root_state;
bool new_lb = (new_prs != PRS_ISOLATED);
bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
if (new_lb != !!is_sched_load_balance(cs)) {
rebuild_domains = true;
if (new_lb)
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
else
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
if (rebuild_domains)
rebuild_sched_domains_locked();
}
/**
* update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
* @cs: The cpuset that requests change in partition root state
* @cmd: Partition root state change command
* @newmask: Optional new cpumask for partcmd_update
* @tmp: Temporary addmask and delmask
* Return: 0 or a partition root state error code
*
* For partcmd_enable, the cpuset is being transformed from a non-partition
* root to a partition root. The cpus_allowed mask of the given cpuset will
* be put into parent's subparts_cpus and taken away from parent's
* effective_cpus. The function will return 0 if all the CPUs listed in
* cpus_allowed can be granted or an error code will be returned.
*
* For partcmd_disable, the cpuset is being transformed from a partition
* root back to a non-partition root. Any CPUs in cpus_allowed that are in
* parent's subparts_cpus will be taken away from that cpumask and put back
* into parent's effective_cpus. 0 will always be returned.
*
* For partcmd_update, if the optional newmask is specified, the cpu list is
* to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
* assumed to remain the same. The cpuset should either be a valid or invalid
* partition root. The partition root state may change from valid to invalid
* or vice versa. An error code will only be returned if transitioning from
* invalid to valid violates the exclusivity rule.
*
* For partcmd_invalidate, the current partition will be made invalid.
*
* The partcmd_enable and partcmd_disable commands are used by
* update_prstate(). An error code may be returned and the caller will check
* for error.
*
* The partcmd_update command is used by update_cpumasks_hier() with newmask
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
* by update_cpumask() with NULL newmask. In both cases, the callers won't
* check for error and so partition_root_state and prs_error will be updated
* directly.
*/
static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
struct cpumask *newmask,
struct tmpmasks *tmp)
{
struct cpuset *parent = parent_cs(cs);
int adding; /* Moving cpus from effective_cpus to subparts_cpus */
int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
lockdep_assert_held(&cpuset_mutex);
/*
* The parent must be a partition root.
* The new cpumask, if present, or the current cpus_allowed must
* not be empty.
*/
if (!is_partition_valid(parent)) {
return is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART;
}
if (!newmask && cpumask_empty(cs->cpus_allowed))
return PERR_CPUSEMPTY;
/*
* new_prs will only be changed for the partcmd_update and
* partcmd_invalidate commands.
*/
adding = deleting = false;
old_prs = new_prs = cs->partition_root_state;
if (cmd == partcmd_enable) {
/*
* Enabling partition root is not allowed if cpus_allowed
* doesn't overlap parent's cpus_allowed.
*/
if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
return PERR_INVCPUS;
/*
* A parent can be left with no CPU as long as there is no
* task directly associated with the parent partition.
*/
if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
partition_is_populated(parent, cs))
return PERR_NOCPUS;
cpumask_copy(tmp->addmask, cs->cpus_allowed);
adding = true;
} else if (cmd == partcmd_disable) {
/*
* Need to remove cpus from parent's subparts_cpus for valid
* partition root.
*/
deleting = !is_prs_invalid(old_prs) &&
cpumask_and(tmp->delmask, cs->cpus_allowed,
parent->subparts_cpus);
} else if (cmd == partcmd_invalidate) {
if (is_prs_invalid(old_prs))
return 0;
/*
* Make the current partition invalid. It is assumed that
* invalidation is caused by violating cpu exclusivity rule.
*/
deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
parent->subparts_cpus);
if (old_prs > 0) {
new_prs = -old_prs;
part_error = PERR_NOTEXCL;
}
} else if (newmask) {
/*
* partcmd_update with newmask:
*
* Compute add/delete mask to/from subparts_cpus
*
* delmask = cpus_allowed & ~newmask & parent->subparts_cpus
* addmask = newmask & parent->cpus_allowed
* & ~parent->subparts_cpus
*/
cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
deleting = cpumask_and(tmp->delmask, tmp->delmask,
parent->subparts_cpus);
cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
adding = cpumask_andnot(tmp->addmask, tmp->addmask,
parent->subparts_cpus);
/*
* Empty cpumask is not allowed
*/
if (cpumask_empty(newmask)) {
part_error = PERR_CPUSEMPTY;
/*
* Make partition invalid if parent's effective_cpus could
* become empty and there are tasks in the parent.
*/
} else if (adding &&
cpumask_subset(parent->effective_cpus, tmp->addmask) &&
!cpumask_intersects(tmp->delmask, cpu_active_mask) &&
partition_is_populated(parent, cs)) {
part_error = PERR_NOCPUS;
adding = false;
deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
parent->subparts_cpus);
}
} else {
/*
* partcmd_update w/o newmask:
*
* delmask = cpus_allowed & parent->subparts_cpus
* addmask = cpus_allowed & parent->cpus_allowed
* & ~parent->subparts_cpus
*
* This gets invoked either due to a hotplug event or from
* update_cpumasks_hier(). This can cause the state of a
* partition root to transition from valid to invalid or vice
* versa. So we still need to compute the addmask and delmask.
* A partition error happens when:
* 1) Cpuset is valid partition, but parent does not distribute
* out any CPUs.
* 2) Parent has tasks and all its effective CPUs will have
* to be distributed out.
*/
cpumask_and(tmp->addmask, cs->cpus_allowed,
parent->cpus_allowed);
adding = cpumask_andnot(tmp->addmask, tmp->addmask,
parent->subparts_cpus);
if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
(adding &&
cpumask_subset(parent->effective_cpus, tmp->addmask) &&
partition_is_populated(parent, cs))) {
part_error = PERR_NOCPUS;
adding = false;
}
if (part_error && is_partition_valid(cs) &&
parent->nr_subparts_cpus)
deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
parent->subparts_cpus);
}
if (part_error)
WRITE_ONCE(cs->prs_err, part_error);
if (cmd == partcmd_update) {
/*
* Check for possible transition between valid and invalid
* partition root.
*/
switch (cs->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
if (part_error)
new_prs = -old_prs;
break;
case PRS_INVALID_ROOT:
case PRS_INVALID_ISOLATED:
if (!part_error)
new_prs = -old_prs;
break;
}
}
if (!adding && !deleting && (new_prs == old_prs))
return 0;
/*
* Transitioning between invalid to valid or vice versa may require
* changing CS_CPU_EXCLUSIVE.
*/
if (old_prs != new_prs) {
int err = update_partition_exclusive(cs, new_prs);
if (err)
return err;
}
/*
* Change the parent's subparts_cpus.
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
spin_lock_irq(&callback_lock);
if (adding) {
cpumask_or(parent->subparts_cpus,
parent->subparts_cpus, tmp->addmask);
cpumask_andnot(parent->effective_cpus,
parent->effective_cpus, tmp->addmask);
}
if (deleting) {
cpumask_andnot(parent->subparts_cpus,
parent->subparts_cpus, tmp->delmask);
/*
* Some of the CPUs in subparts_cpus might have been offlined.
*/
cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
cpumask_or(parent->effective_cpus,
parent->effective_cpus, tmp->delmask);
}
parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
if (old_prs != new_prs)
cs->partition_root_state = new_prs;
spin_unlock_irq(&callback_lock);
if (adding || deleting) {
update_tasks_cpumask(parent, tmp->addmask);
if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, tmp);
}
/*
* For partcmd_update without newmask, it is being called from
* cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
* Update the load balance flag and scheduling domain if
* cpus_read_trylock() is successful.
*/
if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
update_partition_sd_lb(cs, old_prs);
cpus_read_unlock();
}
notify_partition_change(cs, old_prs);
return 0;
}
/*
* update_cpumasks_hier() flags
*/
#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
/*
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
* @cs: the cpuset to consider
* @tmp: temp variables for calculating effective_cpus & partition setup
* @force: don't skip any descendant cpusets if set
*
* When configured cpumask is changed, the effective cpumasks of this cpuset
* and all its descendants need to be updated.
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
int flags)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
bool need_rebuild_sched_domains = false;
int old_prs, new_prs;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
struct cpuset *parent = parent_cs(cp);
bool update_parent = false;
compute_effective_cpumask(tmp->new_cpus, cp, parent);
/*
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs unless
* it is a partition root that has explicitly distributed
* out all its CPUs.
*/
if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
if (is_partition_valid(cp) &&
cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
goto update_parent_subparts;
cpumask_copy(tmp->new_cpus, parent->effective_cpus);
if (!cp->use_parent_ecpus) {
cp->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
} else if (cp->use_parent_ecpus) {
cp->use_parent_ecpus = false;
WARN_ON_ONCE(!parent->child_ecpus_count);
parent->child_ecpus_count--;
}
/*
* Skip the whole subtree if
* 1) the cpumask remains the same,
* 2) has no partition root state,
* 3) HIER_CHECKALL flag not set, and
* 4) for v2 load balance state same as its parent.
*/
if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
update_parent_subparts:
/*
* update_parent_subparts_cpumask() should have been called
* for cs already in update_cpumask(). We should also call
* update_tasks_cpumask() again for tasks in the parent
* cpuset if the parent's subparts_cpus changes.
*/
old_prs = new_prs = cp->partition_root_state;
if ((cp != cs) && old_prs) {
switch (parent->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
update_parent = true;
break;
default:
/*
* When parent is not a partition root or is
* invalid, child partition roots become
* invalid too.
*/
if (is_partition_valid(cp))
new_prs = -cp->partition_root_state;
WRITE_ONCE(cp->prs_err,
is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART);
break;
}
}
if (!css_tryget_online(&cp->css))
continue;
rcu_read_unlock();
if (update_parent) {
update_parent_subparts_cpumask(cp, partcmd_update, NULL,
tmp);
/*
* The cpuset partition_root_state may become
* invalid. Capture it.
*/
new_prs = cp->partition_root_state;
}
spin_lock_irq(&callback_lock);
if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
/*
* Put all active subparts_cpus back to effective_cpus.
*/
cpumask_or(tmp->new_cpus, tmp->new_cpus,
cp->subparts_cpus);
cpumask_and(tmp->new_cpus, tmp->new_cpus,
cpu_active_mask);
cp->nr_subparts_cpus = 0;
cpumask_clear(cp->subparts_cpus);
}
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
if (cp->nr_subparts_cpus) {
/*
* Make sure that effective_cpus & subparts_cpus
* are mutually exclusive.
*/
cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
cp->subparts_cpus);
}
cp->partition_root_state = new_prs;
spin_unlock_irq(&callback_lock);
notify_partition_change(cp, old_prs);
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
update_tasks_cpumask(cp, tmp->new_cpus);
/*
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
* from parent if current cpuset isn't a valid partition root
* and their load balance states differ.
*/
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!is_partition_valid(cp) &&
(is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
if (is_sched_load_balance(parent))
set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
else
clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
}
/*
* On legacy hierarchy, if the effective cpumask of any non-
* empty cpuset is changed, we need to rebuild sched domains.
* On default hierarchy, the cpuset needs to be a partition
* root as well.
*/
if (!cpumask_empty(cp->cpus_allowed) &&
is_sched_load_balance(cp) &&
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
is_partition_valid(cp)))
need_rebuild_sched_domains = true;
rcu_read_lock();
css_put(&cp->css);
}
rcu_read_unlock();
if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
rebuild_sched_domains_locked();
}
/**
* update_sibling_cpumasks - Update siblings cpumasks
* @parent: Parent cpuset
* @cs: Current cpuset
* @tmp: Temp variables
*/
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct tmpmasks *tmp)
{
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
lockdep_assert_held(&cpuset_mutex);
/*
* Check all its siblings and call update_cpumasks_hier()
* if their use_parent_ecpus flag is set in order for them
* to use the right effective_cpus value.
*
* The update_cpumasks_hier() function may sleep. So we have to
* release the RCU read lock before calling it. HIER_NO_SD_REBUILD
* flag is used to suppress rebuild of sched domains as the callers
* will take care of that.
*/
rcu_read_lock();
cpuset_for_each_child(sibling, pos_css, parent) {
if (sibling == cs)
continue;
if (!sibling->use_parent_ecpus)
continue;
if (!css_tryget_online(&sibling->css))
continue;
rcu_read_unlock();
update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
rcu_read_lock();
css_put(&sibling->css);
}
rcu_read_unlock();
}
/**
* update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
* @cs: the cpuset to consider
* @trialcs: trial cpuset
* @buf: buffer of cpu numbers written to this cpuset
*/
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
int retval;
struct tmpmasks tmp;
bool invalidate = false;
int old_prs = cs->partition_root_state;
/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
if (cs == &top_cpuset)
return -EACCES;
/*
* An empty cpus_allowed is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have cpus.
*/
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
} else {
retval = cpulist_parse(buf, trialcs->cpus_allowed);
if (retval < 0)
return retval;
if (!cpumask_subset(trialcs->cpus_allowed,
top_cpuset.cpus_allowed))
return -EINVAL;
}
/* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0;
if (alloc_cpumasks(NULL, &tmp))
return -ENOMEM;
retval = validate_change(cs, trialcs);
if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
struct cpuset *cp, *parent;
struct cgroup_subsys_state *css;
/*
* The -EINVAL error code indicates that partition sibling
* CPU exclusivity rule has been violated. We still allow
* the cpumask change to proceed while invalidating the
* partition. However, any conflicting sibling partitions
* have to be marked as invalid too.
*/
invalidate = true;
rcu_read_lock();
parent = parent_cs(cs);
cpuset_for_each_child(cp, css, parent)
if (is_partition_valid(cp) &&
cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
rcu_read_unlock();
update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
rcu_read_lock();
}
rcu_read_unlock();
retval = 0;
}
if (retval < 0)
goto out_free;
if (cs->partition_root_state) {
if (invalidate)
update_parent_subparts_cpumask(cs, partcmd_invalidate,
NULL, &tmp);
else
update_parent_subparts_cpumask(cs, partcmd_update,
trialcs->cpus_allowed, &tmp);
}
compute_effective_cpumask(trialcs->effective_cpus, trialcs,
parent_cs(cs));
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
/*
* Make sure that subparts_cpus, if not empty, is a subset of
* cpus_allowed. Clear subparts_cpus if partition not valid or
* empty effective cpus with tasks.
*/
if (cs->nr_subparts_cpus) {
if (!is_partition_valid(cs) ||
(cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
partition_is_populated(cs, NULL))) {
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
} else {
cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
}
spin_unlock_irq(&callback_lock);
/* effective_cpus will be updated here */
update_cpumasks_hier(cs, &tmp, 0);
if (cs->partition_root_state) {
struct cpuset *parent = parent_cs(cs);
/*
* For partition root, update the cpumasks of sibling
* cpusets if they use parent's effective_cpus.
*/
if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, &tmp);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
update_partition_sd_lb(cs, old_prs);
}
out_free:
free_cpumasks(NULL, &tmp);
return 0;
}
/*
* Migrate memory region from one set of nodes to another. This is
* performed asynchronously as it can be called from process migration path
* holding locks involved in process management. All mm migrations are
* performed in the queued order and can be waited for by flushing
* cpuset_migrate_mm_wq.
*/
struct cpuset_migrate_mm_work {
struct work_struct work;
struct mm_struct *mm;
nodemask_t from;
nodemask_t to;
};
static void cpuset_migrate_mm_workfn(struct work_struct *work)
{
struct cpuset_migrate_mm_work *mwork =
container_of(work, struct cpuset_migrate_mm_work, work);
/* on a wq worker, no need to worry about %current's mems_allowed */
do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
mmput(mwork->mm);
kfree(mwork);
}
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
struct cpuset_migrate_mm_work *mwork;
if (nodes_equal(*from, *to)) {
mmput(mm);
return;
}
mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
if (mwork) {
mwork->mm = mm;
mwork->from = *from;
mwork->to = *to;
INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
queue_work(cpuset_migrate_mm_wq, &mwork->work);
} else {
mmput(mm);
}
}
static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
/*
* cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
* @tsk: the task to change
* @newmems: new nodes that the task will be set
*
* We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
* and rebind an eventual tasks' mempolicy. If the task is allocating in
* parallel, it might temporarily see an empty intersection, which results in
* a seqlock check and retry before OOM or allocation failure.
*/
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
task_lock(tsk);
local_irq_disable();
write_seqcount_begin(&tsk->mems_allowed_seq);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems);
tsk->mems_allowed = *newmems;
write_seqcount_end(&tsk->mems_allowed_seq);
local_irq_enable();
task_unlock(tsk);
}
static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
struct task_struct *task;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
guarantee_online_mems(cs, &newmems);
/*
* The mpol_rebind_mm() call takes mmap_lock, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
* the global cpuset_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it))) {
struct mm_struct *mm;
bool migrate;
cpuset_change_task_nodemask(task, &newmems);
mm = get_task_mm(task);
if (!mm)
continue;
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
else
mmput(mm);
}
css_task_iter_end(&it);
/*
* All the tasks' nodemasks have been updated, update
* cs->old_mems_allowed.
*/
cs->old_mems_allowed = newmems;
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
cpuset_being_rebound = NULL;
}
/*
* update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
* @cs: the cpuset to consider
* @new_mems: a temp variable for calculating new effective_mems
*
* When configured nodemask is changed, the effective nodemasks of this cpuset
* and all its descendants need to be updated.
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
* Called with cpuset_mutex held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
struct cpuset *parent = parent_cs(cp);
nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
/*
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
if (is_in_v2_mode() && nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
if (nodes_equal(*new_mems, cp->effective_mems)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (!css_tryget_online(&cp->css))
continue;
rcu_read_unlock();
spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
update_tasks_nodemask(cp);
rcu_read_lock();
css_put(&cp->css);
}
rcu_read_unlock();
}
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed, and for each task in the cpuset,
* update mems_allowed and rebind task's mempolicy and any vma
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
* Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
int retval;
/*
* top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
* it's read-only
*/
if (cs == &top_cpuset) {
retval = -EACCES;
goto done;
}
/*
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
* Since nodelist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have memory.
*/
if (!*buf) {
nodes_clear(trialcs->mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs->mems_allowed);
if (retval < 0)
goto done;
if (!nodes_subset(trialcs->mems_allowed,
top_cpuset.mems_allowed)) {
retval = -EINVAL;
goto done;
}
}
if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
retval = validate_change(cs, trialcs);
if (retval < 0)
goto done;
check_insane_mems_config(&trialcs->mems_allowed);
spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
bool current_cpuset_is_being_rebound(void)
{
bool ret;
rcu_read_lock();
ret = task_cs(current) == cpuset_being_rebound;
rcu_read_unlock();
return ret;
}
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
#ifdef CONFIG_SMP
if (val < -1 || val >= sched_domain_level_max)
return -EINVAL;
#endif
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
if (!cpumask_empty(cs->cpus_allowed) &&
is_sched_load_balance(cs))
rebuild_sched_domains_locked();
}
return 0;
}
/**
* update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
* function is called with cpuset_mutex held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
cpuset_update_task_spread_flags(cs, task);
css_task_iter_end(&it);
}
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (see cpuset_flagbits_t)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
* Call with cpuset_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
struct cpuset *trialcs;
int balance_flag_changed;
int spread_flag_changed;
int err;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs)
return -ENOMEM;
if (turning_on)
set_bit(bit, &trialcs->flags);
else
clear_bit(bit, &trialcs->flags);
err = validate_change(cs, trialcs);
if (err < 0)
goto out;
balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(trialcs));
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
if (spread_flag_changed)
update_tasks_flags(cs);
out:
free_cpuset(trialcs);
return err;
}
/**
* update_prstate - update partition_root_state
* @cs: the cpuset to update
* @new_prs: new partition root state
* Return: 0 if successful, != 0 if error
*
* Call with cpuset_mutex held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
int err = PERR_NONE, old_prs = cs->partition_root_state;
struct cpuset *parent = parent_cs(cs);
struct tmpmasks tmpmask;
if (old_prs == new_prs)
return 0;
/*
* For a previously invalid partition root, leave it at being
* invalid if new_prs is not "member".
*/
if (new_prs && is_prs_invalid(old_prs)) {
cs->partition_root_state = -new_prs;
return 0;
}
if (alloc_cpumasks(NULL, &tmpmask))
return -ENOMEM;
err = update_partition_exclusive(cs, new_prs);
if (err)
goto out;
if (!old_prs) {
/*
* cpus_allowed cannot be empty.
*/
if (cpumask_empty(cs->cpus_allowed)) {
err = PERR_CPUSEMPTY;
goto out;
}
err = update_parent_subparts_cpumask(cs, partcmd_enable,
NULL, &tmpmask);
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
*/
;
} else {
/*
* Switching back to member is always allowed even if it
* disables child partitions.
*/
update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
&tmpmask);
/*
* If there are child partitions, they will all become invalid.
*/
if (unlikely(cs->nr_subparts_cpus)) {
spin_lock_irq(&callback_lock);
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
compute_effective_cpumask(cs->effective_cpus, cs, parent);
spin_unlock_irq(&callback_lock);
}
}
out:
/*
* Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
* happens.
*/
if (err) {
new_prs = -new_prs;
update_partition_exclusive(cs, new_prs);
}
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
WRITE_ONCE(cs->prs_err, err);
spin_unlock_irq(&callback_lock);
/*
* Update child cpusets, if present.
* Force update if switching back to member.
*/
if (!list_empty(&cs->css.children))
update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
/* Update sched domains and load balance flag */
update_partition_sd_lb(cs, old_prs);
notify_partition_change(cs, old_prs);
free_cpumasks(NULL, &tmpmask);
return 0;
}
/*
* Frequency meter - How fast is some event occurring?
*
* These routines manage a digitally filtered, constant time based,
* event frequency meter. There are four routines:
* fmeter_init() - initialize a frequency meter.
* fmeter_markevent() - called each time the event happens.
* fmeter_getrate() - returns the recent rate of such events.
* fmeter_update() - internal routine used to update fmeter.
*
* A common data structure is passed to each of these routines,
* which is used to keep track of the state required to manage the
* frequency meter and its digital filter.
*
* The filter works on the number of events marked per unit time.
* The filter is single-pole low-pass recursive (IIR). The time unit
* is 1 second. Arithmetic is done using 32-bit integers scaled to
* simulate 3 decimal digits of precision (multiplied by 1000).
*
* With an FM_COEF of 933, and a time base of 1 second, the filter
* has a half-life of 10 seconds, meaning that if the events quit
* happening, then the rate returned from the fmeter_getrate()
* will be cut in half each 10 seconds, until it converges to zero.
*
* It is not worth doing a real infinitely recursive filter. If more
* than FM_MAXTICKS ticks have elapsed since the last filter event,
* just compute FM_MAXTICKS ticks worth, by which point the level
* will be stable.
*
* Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
* arithmetic overflow in the fmeter_update() routine.
*
* Given the simple 32 bit integer arithmetic used, this meter works
* best for reporting rates between one per millisecond (msec) and
* one per 32 (approx) seconds. At constant rates faster than one
* per msec it maxes out at values just under 1,000,000. At constant
* rates between one per msec, and one per second it will stabilize
* to a value N*1000, where N is the rate of events per second.
* At constant rates between one per second and one per 32 seconds,
* it will be choppy, moving up on the seconds that have an event,
* and then decaying until the next event. At rates slower than
* about one in 32 seconds, it decays all the way back to zero between
* each event.
*/
#define FM_COEF 933 /* coefficient for half-life of 10 secs */
#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
#define FM_SCALE 1000 /* faux fixed point scale */
/* Initialize a frequency meter */
static void fmeter_init(struct fmeter *fmp)
{
fmp->cnt = 0;
fmp->val = 0;
fmp->time = 0;
spin_lock_init(&fmp->lock);
}
/* Internal meter update - process cnt events and update value */
static void fmeter_update(struct fmeter *fmp)
{
time64_t now;
u32 ticks;
now = ktime_get_seconds();
ticks = now - fmp->time;
if (ticks == 0)
return;
ticks = min(FM_MAXTICKS, ticks);
while (ticks-- > 0)
fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
fmp->time = now;
fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
fmp->cnt = 0;
}
/* Process any previous ticks, then bump cnt by one (times scale). */
static void fmeter_markevent(struct fmeter *fmp)
{
spin_lock(&fmp->lock);
fmeter_update(fmp);
fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
spin_unlock(&fmp->lock);
}
/* Process any previous ticks, then return current value. */
static int fmeter_getrate(struct fmeter *fmp)
{
int val;
spin_lock(&fmp->lock);
fmeter_update(fmp);
val = fmp->val;
spin_unlock(&fmp->lock);
return val;
}
static struct cpuset *cpuset_attach_old_cs;
/*
* Check to see if a cpuset can accept a new task
* For v1, cpus_allowed and mems_allowed can't be empty.
* For v2, effective_cpus can't be empty.
* Note that in v1, effective_cpus = cpus_allowed.
*/
static int cpuset_can_attach_check(struct cpuset *cs)
{
if (cpumask_empty(cs->effective_cpus) ||
(!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
return -ENOSPC;
return 0;
}
static void reset_migrate_dl_data(struct cpuset *cs)
{
cs->nr_migrate_dl_tasks = 0;
cs->sum_migrate_dl_bw = 0;
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs, *oldcs;
struct task_struct *task;
bool cpus_updated, mems_updated;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task);
if (ret)
goto out_unlock;
/*
* Skip rights over task check in v2 when nothing changes,
* migration permission derives from hierarchy ownership in
* cgroup_procs_write_permission()).
*/
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
(cpus_updated || mems_updated)) {
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
}
if (dl_task(task)) {
cs->nr_migrate_dl_tasks++;
cs->sum_migrate_dl_bw += task->dl.dl_bw;
}
}
if (!cs->nr_migrate_dl_tasks)
goto out_success;
if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
if (unlikely(cpu >= nr_cpu_ids)) {
reset_migrate_dl_data(cs);
ret = -EINVAL;
goto out_unlock;
}
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
if (ret) {
reset_migrate_dl_data(cs);
goto out_unlock;
}
}
out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
mutex_unlock(&cpuset_mutex);
return ret;
}
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
if (cs->nr_migrate_dl_tasks) {
int cpu = cpumask_any(cs->effective_cpus);
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
reset_migrate_dl_data(cs);
}
mutex_unlock(&cpuset_mutex);
}
/*
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
static cpumask_var_t cpus_attach;
static nodemask_t cpuset_attach_nodemask_to;
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
{
lockdep_assert_held(&cpuset_mutex);
if (cs != &top_cpuset)
guarantee_online_cpus(task, cpus_attach);
else
cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
cs->subparts_cpus);
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flags(cs, task);
}
static void cpuset_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct task_struct *leader;
struct cgroup_subsys_state *css;
struct cpuset *cs;
struct cpuset *oldcs = cpuset_attach_old_cs;
bool cpus_updated, mems_updated;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
mutex_lock(&cpuset_mutex);
cpus_updated = !cpumask_equal(cs->effective_cpus,
oldcs->effective_cpus);
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
/*
* In the default hierarchy, enabling cpuset in the child cgroups
* will trigger a number of cpuset_attach() calls with no change
* in effective cpus and mems. In that case, we can optimize out
* by skipping the task iteration and update.
*/
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpus_updated && !mems_updated) {
cpuset_attach_nodemask_to = cs->effective_mems;
goto out;
}
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cgroup_taskset_for_each(task, css, tset)
cpuset_attach_task(cs, task);
/*
* Change mm for all threadgroup leaders. This is expensive and may
* sleep and should be moved outside migration path proper. Skip it
* if there is no change in effective_mems and CS_MEMORY_MIGRATE is
* not set.
*/
cpuset_attach_nodemask_to = cs->effective_mems;
if (!is_memory_migrate(cs) && !mems_updated)
goto out;
cgroup_taskset_for_each_leader(leader, css, tset) {
struct mm_struct *mm = get_task_mm(leader);
if (mm) {
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
/*
* old_mems_allowed is the same with mems_allowed
* here, except if this task is being moved
* automatically due to hotplug. In that case
* @mems_allowed has been updated and is empty, so
* @old_mems_allowed is the right nodesets that we
* migrate mm from.
*/
if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
&cpuset_attach_nodemask_to);
else
mmput(mm);
}
}
out:
cs->old_mems_allowed = cpuset_attach_nodemask_to;
if (cs->nr_migrate_dl_tasks) {
cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
reset_migrate_dl_data(cs);
}
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
}
/* The various types of files and directories in a cpuset file system */
typedef enum {
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_EFFECTIVE_CPULIST,
FILE_EFFECTIVE_MEMLIST,
FILE_SUBPARTS_CPULIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
FILE_SCHED_LOAD_BALANCE,
FILE_PARTITION_ROOT,
FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
FILE_SPREAD_SLAB,
} cpuset_filetype_t;
static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val)
{
struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
int retval = 0;
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
}
switch (type) {
case FILE_CPU_EXCLUSIVE:
retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
break;
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
break;
case FILE_MEM_HARDWALL:
retval = update_flag(CS_MEM_HARDWALL, cs, val);
break;
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
cpuset_memory_pressure_enabled = !!val;
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, val);
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, val);
break;
default:
retval = -EINVAL;
break;
}
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
s64 val)
{
struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
int retval = -ENODEV;
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, val);
break;
default:
retval = -EINVAL;
break;
}
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
struct cpuset *trialcs;
int retval = -ENODEV;
buf = strstrip(buf);
/*
* CPU or memory hotunplug may leave @cs w/o any execution
* resources, in which case the hotplug code asynchronously updates
* configuration and transfers all tasks to the nearest ancestor
* which can execute.
*
* As writes to "cpus" or "mems" may restore @cs's execution
* resources, wait for the previously scheduled operations before
* proceeding, so that we don't end up keep removing tasks added
* after execution capability is restored.
*
* cpuset_hotplug_work calls back into cgroup core via
* cgroup_transfer_tasks() and waiting for it from a cgroupfs
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
* grabbing cpuset_mutex anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
kernfs_break_active_protection(of->kn);
flush_work(&cpuset_hotplug_work);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs) {
retval = -ENOMEM;
goto out_unlock;
}
switch (of_cft(of)->private) {
case FILE_CPULIST:
retval = update_cpumask(cs, trialcs, buf);
break;
case FILE_MEMLIST:
retval = update_nodemask(cs, trialcs, buf);
break;
default:
retval = -EINVAL;
break;
}
free_cpuset(trialcs);
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
/*
* These ascii lists should be read in a single call, by using a user
* buffer large enough to hold the entire map. If read in smaller
* chunks, there is no guarantee of atomicity. Since the display format
* used, list of ranges of sequential numbers, is variable length,
* and since these maps can change value dynamically, one could read
* gibberish by doing partial reads while a list was changing.
*/
static int cpuset_common_seq_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
break;
case FILE_EFFECTIVE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
break;
case FILE_EFFECTIVE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
break;
case FILE_SUBPARTS_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
break;
default:
ret = -EINVAL;
}
spin_unlock_irq(&callback_lock);
return ret;
}
static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
return is_cpu_exclusive(cs);
case FILE_MEM_EXCLUSIVE:
return is_mem_exclusive(cs);
case FILE_MEM_HARDWALL:
return is_mem_hardwall(cs);
case FILE_SCHED_LOAD_BALANCE:
return is_sched_load_balance(cs);
case FILE_MEMORY_MIGRATE:
return is_memory_migrate(cs);
case FILE_MEMORY_PRESSURE_ENABLED:
return cpuset_memory_pressure_enabled;
case FILE_MEMORY_PRESSURE:
return fmeter_getrate(&cs->fmeter);
case FILE_SPREAD_PAGE:
return is_spread_page(cs);
case FILE_SPREAD_SLAB:
return is_spread_slab(cs);
default:
BUG();
}
/* Unreachable but makes gcc happy */
return 0;
}
static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
return cs->relax_domain_level;
default:
BUG();
}
/* Unreachable but makes gcc happy */
return 0;
}
static int sched_partition_show(struct seq_file *seq, void *v)
{
struct cpuset *cs = css_cs(seq_css(seq));
const char *err, *type = NULL;
switch (cs->partition_root_state) {
case PRS_ROOT:
seq_puts(seq, "root\n");
break;
case PRS_ISOLATED:
seq_puts(seq, "isolated\n");
break;
case PRS_MEMBER:
seq_puts(seq, "member\n");
break;
case PRS_INVALID_ROOT:
type = "root";
fallthrough;
case PRS_INVALID_ISOLATED:
if (!type)
type = "isolated";
err = perr_strings[READ_ONCE(cs->prs_err)];
if (err)
seq_printf(seq, "%s invalid (%s)\n", type, err);
else
seq_printf(seq, "%s invalid\n", type);
break;
}
return 0;
}
static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
int val;
int retval = -ENODEV;
buf = strstrip(buf);
/*
* Convert "root" to ENABLED, and convert "member" to DISABLED.
*/
if (!strcmp(buf, "root"))
val = PRS_ROOT;
else if (!strcmp(buf, "member"))
val = PRS_MEMBER;
else if (!strcmp(buf, "isolated"))
val = PRS_ISOLATED;
else
return -EINVAL;
css_get(&cs->css);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
retval = update_prstate(cs, val);
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
css_put(&cs->css);
return retval ?: nbytes;
}
/*
* for the common functions, 'private' gives the type of file
*/
static struct cftype legacy_files[] = {
{
.name = "cpus",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * NR_CPUS),
.private = FILE_CPULIST,
},
{
.name = "mems",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * MAX_NUMNODES),
.private = FILE_MEMLIST,
},
{
.name = "effective_cpus",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_CPULIST,
},
{
.name = "effective_mems",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_MEMLIST,
},
{
.name = "cpu_exclusive",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_CPU_EXCLUSIVE,
},
{
.name = "mem_exclusive",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEM_EXCLUSIVE,
},
{
.name = "mem_hardwall",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEM_HARDWALL,
},
{
.name = "sched_load_balance",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SCHED_LOAD_BALANCE,
},
{
.name = "sched_relax_domain_level",
.read_s64 = cpuset_read_s64,
.write_s64 = cpuset_write_s64,
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
},
{
.name = "memory_migrate",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_MIGRATE,
},
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
.private = FILE_MEMORY_PRESSURE,
},
{
.name = "memory_spread_page",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_PAGE,
},
{
.name = "memory_spread_slab",
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_SLAB,
},
{
.name = "memory_pressure_enabled",
.flags = CFTYPE_ONLY_ON_ROOT,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED,
},
{ } /* terminate */
};
/*
* This is currently a minimal set for the default hierarchy. It can be
* expanded later on by migrating more features and control files from v1.
*/
static struct cftype dfl_files[] = {
{
.name = "cpus",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * NR_CPUS),
.private = FILE_CPULIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "mems",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * MAX_NUMNODES),
.private = FILE_MEMLIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "cpus.effective",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_CPULIST,
},
{
.name = "mems.effective",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_MEMLIST,
},
{
.name = "cpus.partition",
.seq_show = sched_partition_show,
.write = sched_partition_write,
.private = FILE_PARTITION_ROOT,
.flags = CFTYPE_NOT_ON_ROOT,
.file_offset = offsetof(struct cpuset, partition_file),
},
{
.name = "cpus.subpartitions",
.seq_show = cpuset_common_seq_show,
.private = FILE_SUBPARTS_CPULIST,
.flags = CFTYPE_DEBUG,
},
{ } /* terminate */
};
/**
* cpuset_css_alloc - Allocate a cpuset css
* @parent_css: Parent css of the control group that the new cpuset will be
* part of
* Return: cpuset css on success, -ENOMEM on failure.
*
* Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
* top cpuset css otherwise.
*/
static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuset *cs;
if (!parent_css)
return &top_cpuset.css;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
if (alloc_cpumasks(cs, NULL)) {
kfree(cs);
return ERR_PTR(-ENOMEM);
}
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
nodes_clear(cs->mems_allowed);
nodes_clear(cs->effective_mems);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
/* Set CS_MEMORY_MIGRATE for default hierarchy */
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
return &cs->css;
}
static int cpuset_css_online(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
struct cgroup_subsys_state *pos_css;
if (!parent)
return 0;
cpus_read_lock();
mutex_lock(&cpuset_mutex);
set_bit(CS_ONLINE, &cs->flags);
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
cpuset_inc();
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
cs->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
/*
* For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
*/
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!is_sched_load_balance(parent))
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
/*
* Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
* set. This flag handling is implemented in cgroup core for
* historical reasons - the flag may be specified during mount.
*
* Currently, if any sibling cpusets have exclusive cpus or mem, we
* refuse to clone the configuration - thereby refusing the task to
* be entered, and as a result refusing the sys_unshare() or
* clone() which initiated it. If this becomes a problem for some
* users who wish to allow that scenario, then this could be
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
* (and likewise for mems) to the new cgroup.
*/
rcu_read_lock();
cpuset_for_each_child(tmp_cs, pos_css, parent) {
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
rcu_read_unlock();
goto out_unlock;
}
}
rcu_read_unlock();
spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return 0;
}
/*
* If the cpuset being removed has its flag 'sched_load_balance'
* enabled, then simulate turning sched_load_balance off, which
* will call rebuild_sched_domains_locked(). That is not needed
* in the default hierarchy where only changes in partition
* will cause repartitioning.
*
* If the cpuset has the 'sched.partition' flag enabled, simulate
* turning 'sched.partition" off.
*/
static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
if (is_partition_valid(cs))
update_prstate(cs, 0);
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
if (cs->use_parent_ecpus) {
struct cpuset *parent = parent_cs(cs);
cs->use_parent_ecpus = false;
parent->child_ecpus_count--;
}
cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
static void cpuset_css_free(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
free_cpuset(cs);
}
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
top_cpuset.mems_allowed = node_possible_map;
} else {
cpumask_copy(top_cpuset.cpus_allowed,
top_cpuset.effective_cpus);
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
/*
* In case the child is cloned into a cpuset different from its parent,
* additional checks are done to see if the move is allowed.
*/
static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
int ret;
rcu_read_lock();
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs)
return 0;
lockdep_assert_held(&cgroup_mutex);
mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
mutex_unlock(&cpuset_mutex);
return ret;
}
static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
rcu_read_lock();
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs)
return;
mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
}
/*
* Make sure the new task conform to the current state of its parent,
* which could have been changed by cpuset just after it inherits the
* state from the parent and before it sits on the cgroup's task list.
*/
static void cpuset_fork(struct task_struct *task)
{
struct cpuset *cs;
bool same_cs;
rcu_read_lock();
cs = task_cs(task);
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs) {
if (cs == &top_cpuset)
return;
set_cpus_allowed_ptr(task, current->cpus_ptr);
task->mems_allowed = current->mems_allowed;
return;
}
/* CLONE_INTO_CGROUP */
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cpuset_attach_task(cs, task);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
}
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
.css_offline = cpuset_css_offline,
.css_free = cpuset_css_free,
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
.post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.can_fork = cpuset_can_fork,
.cancel_fork = cpuset_cancel_fork,
.fork = cpuset_fork,
.legacy_cftypes = legacy_files,
.dfl_cftypes = dfl_files,
.early_init = true,
.threaded = true,
};
/**
* cpuset_init - initialize cpusets at system boot
*
* Description: Initialize top_cpuset
**/
int __init cpuset_init(void)
{
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems);
fmeter_init(&top_cpuset.fmeter);
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
top_cpuset.relax_domain_level = -1;
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
return 0;
}
/*
* If CPU and/or memory hotplug handlers, below, unplug any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
* last CPU or node from a cpuset, then move the tasks in the empty
* cpuset to its next-highest non-empty parent.
*/
static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
{
struct cpuset *parent;
/*
* Find its next-highest non-empty parent, (top cpuset
* has online cpus, so can't be empty).
*/
parent = parent_cs(cs);
while (cpumask_empty(parent->cpus_allowed) ||
nodes_empty(parent->mems_allowed))
parent = parent_cs(parent);
if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
pr_cont_cgroup_name(cs->css.cgroup);
pr_cont("\n");
}
}
static void
hotplug_update_tasks_legacy(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
bool cpus_updated, bool mems_updated)
{
bool is_empty;
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
* as the tasks will be migrated to an ancestor.
*/
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
update_tasks_cpumask(cs, new_cpus);
if (mems_updated && !nodes_empty(cs->mems_allowed))
update_tasks_nodemask(cs);
is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed);
/*
* Move tasks to the nearest ancestor with execution resources,
* This is full cgroup operation which will also call back into
* cpuset. Should be done outside any lock.
*/
if (is_empty) {
mutex_unlock(&cpuset_mutex);
remove_tasks_in_empty_cpuset(cs);
mutex_lock(&cpuset_mutex);
}
}
static void
hotplug_update_tasks(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
bool cpus_updated, bool mems_updated)
{
/* A partition root is allowed to have empty effective cpus */
if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
if (cpus_updated)
update_tasks_cpumask(cs, new_cpus);
if (mems_updated)
update_tasks_nodemask(cs);
}
static bool force_rebuild;
void cpuset_force_rebuild(void)
{
force_rebuild = true;
}
/**
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
* @cs: cpuset in interest
* @tmp: the tmpmasks structure pointer
*
* Compare @cs's cpu and mem masks against top_cpuset and if some have gone
* offline, update @cs accordingly. If @cs ends up with no CPU or memory,
* all its tasks are moved to the nearest ancestor with both resources.
*/
static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated;
bool mems_updated;
struct cpuset *parent;
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
mutex_lock(&cpuset_mutex);
/*
* We have raced with task attaching. We wait until attaching
* is finished, so we won't attach a task to an empty cpuset.
*/
if (cs->attach_in_progress) {
mutex_unlock(&cpuset_mutex);
goto retry;
}
parent = parent_cs(cs);
compute_effective_cpumask(&new_cpus, cs, parent);
nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
if (cs->nr_subparts_cpus)
/*
* Make sure that CPUs allocated to child partitions
* do not show up in effective_cpus.
*/
cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
if (!tmp || !cs->partition_root_state)
goto update_tasks;
/*
* In the unlikely event that a partition root has empty
* effective_cpus with tasks, we will have to invalidate child
* partitions, if present, by setting nr_subparts_cpus to 0 to
* reclaim their cpus.
*/
if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
spin_lock_irq(&callback_lock);
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
spin_unlock_irq(&callback_lock);
compute_effective_cpumask(&new_cpus, cs, parent);
}
/*
* Force the partition to become invalid if either one of
* the following conditions hold:
* 1) empty effective cpus but not valid empty partition.
* 2) parent is invalid or doesn't grant any cpus to child
* partitions.
*/
if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
(cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
int old_prs, parent_prs;
update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
if (cs->nr_subparts_cpus) {
spin_lock_irq(&callback_lock);
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
spin_unlock_irq(&callback_lock);
compute_effective_cpumask(&new_cpus, cs, parent);
}
old_prs = cs->partition_root_state;
parent_prs = parent->partition_root_state;
if (is_partition_valid(cs)) {
spin_lock_irq(&callback_lock);
make_partition_invalid(cs);
spin_unlock_irq(&callback_lock);
if (is_prs_invalid(parent_prs))
WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
else if (!parent_prs)
WRITE_ONCE(cs->prs_err, PERR_NOTPART);
else
WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
notify_partition_change(cs, old_prs);
}
cpuset_force_rebuild();
}
/*
* On the other hand, an invalid partition root may be transitioned
* back to a regular one.
*/
else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
if (is_partition_valid(cs))
cpuset_force_rebuild();
}
update_tasks:
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
if (!cpus_updated && !mems_updated)
goto unlock; /* Hotplug doesn't affect this cpuset */
if (mems_updated)
check_insane_mems_config(&new_mems);
if (is_in_v2_mode())
hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
else
hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
unlock:
mutex_unlock(&cpuset_mutex);
}
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
* @work: unused
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
* synchronized to cpu_active_mask and N_MEMORY, which is necessary in
* order to make cpusets transparent (of no affect) on systems that are
* actively using CPU hotplug but making no active use of cpusets.
*
* Non-root cpusets are only affected by offlining. If any CPUs or memory
* nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
* all descendants.
*
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
*/
static void cpuset_hotplug_workfn(struct work_struct *work)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated, mems_updated;
bool on_dfl = is_in_v2_mode();
struct tmpmasks tmp, *ptmp = NULL;
if (on_dfl && !alloc_cpumasks(NULL, &tmp))
ptmp = &tmp;
mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
cpumask_copy(&new_cpus, cpu_active_mask);
new_mems = node_states[N_MEMORY];
/*
* If subparts_cpus is populated, it is likely that the check below
* will produce a false positive on cpus_updated when the cpu list
* isn't changed. It is extra work, but it is better to be safe.
*/
cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
/*
* In the rare case that hotplug removes all the cpus in subparts_cpus,
* we assumed that cpus are updated.
*/
if (!cpus_updated && top_cpuset.nr_subparts_cpus)
cpus_updated = true;
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
* Make sure that CPUs allocated to child partitions
* do not show up in effective_cpus. If no CPU is left,
* we clear the subparts_cpus & let the child partitions
* fight for the CPUs again.
*/
if (top_cpuset.nr_subparts_cpus) {
if (cpumask_subset(&new_cpus,
top_cpuset.subparts_cpus)) {
top_cpuset.nr_subparts_cpus = 0;
cpumask_clear(top_cpuset.subparts_cpus);
} else {
cpumask_andnot(&new_cpus, &new_cpus,
top_cpuset.subparts_cpus);
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}
mutex_unlock(&cpuset_mutex);
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
struct cpuset *cs;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (cs == &top_cpuset || !css_tryget_online(&cs->css))
continue;
rcu_read_unlock();
cpuset_hotplug_update_tasks(cs, ptmp);
rcu_read_lock();
css_put(&cs->css);
}
rcu_read_unlock();
}
/* rebuild sched domains if cpus_allowed has changed */
if (cpus_updated || force_rebuild) {
force_rebuild = false;
rebuild_sched_domains();
}
free_cpumasks(NULL, ptmp);
}
void cpuset_update_active_cpus(void)
{
/*
* We're inside cpu hotplug critical region which usually nests
* inside cgroup synchronization. Bounce actual hotplug processing
* to a work item to avoid reverse locking order.
*/
schedule_work(&cpuset_hotplug_work);
}
void cpuset_wait_for_hotplug(void)
{
flush_work(&cpuset_hotplug_work);
}
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
* See cpuset_update_active_cpus() for CPU hotplug handling.
*/
static int cpuset_track_online_nodes(struct notifier_block *self,
unsigned long action, void *arg)
{
schedule_work(&cpuset_hotplug_work);
return NOTIFY_OK;
}
/**
* cpuset_init_smp - initialize cpus_allowed
*
* Description: Finish top cpuset after cpu, node maps are initialized
*/
void __init cpuset_init_smp(void)
{
/*
* cpus_allowd/mems_allowed set to v2 values in the initial
* cpuset_bind() call will be reset to v1 values in another
* cpuset_bind() call when v1 cpuset is mounted.
*/
top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
top_cpuset.effective_mems = node_states[N_MEMORY];
hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
BUG_ON(!cpuset_migrate_mm_wq);
}
/**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
*
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of cpu_online_mask, even if this means going outside the
* tasks cpuset, except when the task is in the top cpuset.
**/
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
struct cpuset *cs;
spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = task_cs(tsk);
if (cs != &top_cpuset)
guarantee_online_cpus(tsk, pmask);
/*
* Tasks in the top cpuset won't get update to their cpumasks
* when a hotplug online/offline event happens. So we include all
* offline cpus in the allowed cpu list.
*/
if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
/*
* We first exclude cpus allocated to partitions. If there is no
* allowable online cpu left, we fall back to all possible cpus.
*/
cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
if (!cpumask_intersects(pmask, cpu_online_mask))
cpumask_copy(pmask, possible_mask);
}
rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
}
/**
* cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
* @tsk: pointer to task_struct with which the scheduler is struggling
*
* Description: In the case that the scheduler cannot find an allowed cpu in
* tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
* mode however, this value is the same as task_cs(tsk)->effective_cpus,
* which will not contain a sane cpumask during cases such as cpu hotplugging.
* This is the absolute last resort for the scheduler and it is only used if
* _every_ other avenue has been traveled.
*
* Returns true if the affinity of @tsk was changed, false otherwise.
**/
bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
const struct cpumask *cs_mask;
bool changed = false;
rcu_read_lock();
cs_mask = task_cs(tsk)->cpus_allowed;
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
do_set_cpus_allowed(tsk, cs_mask);
changed = true;
}
rcu_read_unlock();
/*
* We own tsk->cpus_allowed, nobody can change it under us.
*
* But we used cs && cs->cpus_allowed lockless and thus can
* race with cgroup_attach_task() or update_cpumask() and get
* the wrong tsk->cpus_allowed. However, both cases imply the
* subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
* which takes task_rq_lock().
*
* If we are called after it dropped the lock we must see all
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things.
*
* select_fallback_rq() will fix things ups and set cpu_possible_mask
* if required.
*/
return changed;
}
void __init cpuset_init_current_mems_allowed(void)
{
nodes_setall(current->mems_allowed);
}
/**
* cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of node_states[N_MEMORY], even if this means going outside the
* tasks cpuset.
**/
nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
nodemask_t mask;
unsigned long flags;
spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
/**
* cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
* @nodemask: the nodemask to be checked
*
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
*/
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return nodes_intersects(*nodemask, current->mems_allowed);
}
/*
* nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
* mem_hardwall ancestor to the specified cpuset. Call holding
* callback_lock. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
{
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
cs = parent_cs(cs);
return cs;
}
/*
* cpuset_node_allowed - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
* If we're in interrupt, yes, we can always allocate. If @node is set in
* current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
* node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
* yes. If current has access to memory reserves as an oom victim, yes.
* Otherwise, no.
*
* GFP_USER allocations are marked with the __GFP_HARDWALL bit,
* and do not allow allocations outside the current tasks cpuset
* unless the task has been OOM killed.
* GFP_KERNEL allocations are not so marked, so can escape to the
* nearest enclosing hardwalled ancestor cpuset.
*
* Scanning up parent cpusets requires callback_lock. The
* __alloc_pages() routine only calls here with __GFP_HARDWALL bit
* _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
* current tasks mems_allowed came up empty on the first pass over
* the zonelist. So only GFP_KERNEL allocations, if all nodes in the
* cpuset are short of memory, might require taking the callback_lock.
*
* The first call here from mm/page_alloc:get_page_from_freelist()
* has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
* so no allocation on a node outside the cpuset is allowed (unless
* in interrupt, of course).
*
* The second pass through get_page_from_freelist() doesn't even call
* here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
* variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
* in alloc_flags. That logic and the checks below have the combined
* affect that:
* in_interrupt - any node ok (current task context irrelevant)
* GFP_ATOMIC - any node ok
* tsk_is_oom_victim - any node ok
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
*/
bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
struct cpuset *cs; /* current cpuset ancestors */
bool allowed; /* is allocation in zone z allowed? */
unsigned long flags;
if (in_interrupt())
return true;
if (node_isset(node, current->mems_allowed))
return true;
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
if (unlikely(tsk_is_oom_victim(current)))
return true;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
return false;
if (current->flags & PF_EXITING) /* Let dying task have memory */
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
/**
* cpuset_spread_node() - On which node to begin search for a page
* @rotor: round robin rotor
*
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set),
* and if the memory allocation used cpuset_mem_spread_node()
* to determine on which node to start looking, as it will for
* certain page cache or slab cache pages such as used for file
* system buffers and inode caches, then instead of starting on the
* local node to look for a free page, rather spread the starting
* node around the tasks mems_allowed nodes.
*
* We don't have to worry about the returned node being offline
* because "it can't happen", and even if it did, it would be ok.
*
* The routines calling guarantee_online_mems() are careful to
* only set nodes in task->mems_allowed that are online. So it
* should not be possible for the following code to return an
* offline node. But if it did, that would be ok, as this routine
* is not returning the node where the allocation must be, only
* the node where the search should start. The zonelist passed to
* __alloc_pages() will include all nodes. If the slab allocator
* is passed an offline node, it will fall back to the local node.
* See kmem_cache_alloc_node().
*/
static int cpuset_spread_node(int *rotor)
{
return *rotor = next_node_in(*rotor, current->mems_allowed);
}
/**
* cpuset_mem_spread_node() - On which node to begin search for a file page
*/
int cpuset_mem_spread_node(void)
{
if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
current->cpuset_mem_spread_rotor =
node_random(¤t->mems_allowed);
return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
}
/**
* cpuset_slab_spread_node() - On which node to begin search for a slab page
*/
int cpuset_slab_spread_node(void)
{
if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
current->cpuset_slab_spread_rotor =
node_random(¤t->mems_allowed);
return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
}
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**
* cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
* @tsk1: pointer to task_struct of some task.
* @tsk2: pointer to task_struct of some other task.
*
* Description: Return true if @tsk1's mems_allowed intersects the
* mems_allowed of @tsk2. Used by the OOM killer to determine if
* one of the task's memory usage might impact the memory available
* to the other.
**/
int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/**
* cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
*
* Description: Prints current's name, cpuset name, and cached copy of its
* mems_allowed to the kernel log.
*/
void cpuset_print_current_mems_allowed(void)
{
struct cgroup *cgrp;
rcu_read_lock();
cgrp = task_cs(current)->css.cgroup;
pr_cont(",cpuset=");
pr_cont_cgroup_name(cgrp);
pr_cont(",mems_allowed=%*pbl",
nodemask_pr_args(¤t->mems_allowed));
rcu_read_unlock();
}
/*
* Collection of memory_pressure is suppressed unless
* this flag is enabled by writing "1" to the special
* cpuset file 'memory_pressure_enabled' in the root cpuset.
*/
int cpuset_memory_pressure_enabled __read_mostly;
/*
* __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
*
* Keep a running average of the rate of synchronous (direct)
* page reclaim efforts initiated by tasks in each cpuset.
*
* This represents the rate at which some task in the cpuset
* ran low on memory on all nodes it was allowed to use, and
* had to enter the kernels page reclaim code in an effort to
* create more free memory by tossing clean pages or swapping
* or writing dirty pages.
*
* Display to user space in the per-cpuset read-only file
* "memory_pressure". Value displayed is an integer
* representing the recent rate of entry into the synchronous
* (direct) page reclaim by any task attached to the cpuset.
*/
void __cpuset_memory_pressure_bump(void)
{
rcu_read_lock();
fmeter_markevent(&task_cs(current)->fmeter);
rcu_read_unlock();
}
#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
char *buf;
struct cgroup_subsys_state *css;
int retval;
retval = -ENOMEM;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
goto out;
css = task_get_css(tsk, cpuset_cgrp_id);
retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
current->nsproxy->cgroup_ns);
css_put(css);
if (retval >= PATH_MAX)
retval = -ENAMETOOLONG;
if (retval < 0)
goto out_free;
seq_puts(m, buf);
seq_putc(m, '\n');
retval = 0;
out_free:
kfree(buf);
out:
return retval;
}
#endif /* CONFIG_PROC_PID_CPUSET */
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Mems_allowed:\t%*pb\n",
nodemask_pr_args(&task->mems_allowed));
seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
nodemask_pr_args(&task->mems_allowed));
}
| linux-master | kernel/cgroup/cpuset.c |
/*
* cgroup_freezer.c - control group freezer subsystem
*
* Copyright IBM Corporation, 2007
*
* Author : Cedric Le Goater <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/cgroup.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
/*
* A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
* set if "FROZEN" is written to freezer.state cgroupfs file, and cleared
* for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING
* for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of
* its ancestors has FREEZING_SELF set.
*/
enum freezer_state_flags {
CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */
CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */
CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */
CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */
/* mask for all FREEZING flags */
CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT,
};
struct freezer {
struct cgroup_subsys_state css;
unsigned int state;
};
static DEFINE_MUTEX(freezer_mutex);
static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct freezer, css) : NULL;
}
static inline struct freezer *task_freezer(struct task_struct *task)
{
return css_freezer(task_css(task, freezer_cgrp_id));
}
static struct freezer *parent_freezer(struct freezer *freezer)
{
return css_freezer(freezer->css.parent);
}
bool cgroup_freezing(struct task_struct *task)
{
bool ret;
rcu_read_lock();
ret = task_freezer(task)->state & CGROUP_FREEZING;
rcu_read_unlock();
return ret;
}
static const char *freezer_state_strs(unsigned int state)
{
if (state & CGROUP_FROZEN)
return "FROZEN";
if (state & CGROUP_FREEZING)
return "FREEZING";
return "THAWED";
};
static struct cgroup_subsys_state *
freezer_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct freezer *freezer;
freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
if (!freezer)
return ERR_PTR(-ENOMEM);
return &freezer->css;
}
/**
* freezer_css_online - commit creation of a freezer css
* @css: css being created
*
* We're committing to creation of @css. Mark it online and inherit
* parent's freezing state while holding both parent's and our
* freezer->lock.
*/
static int freezer_css_online(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
struct freezer *parent = parent_freezer(freezer);
cpus_read_lock();
mutex_lock(&freezer_mutex);
freezer->state |= CGROUP_FREEZER_ONLINE;
if (parent && (parent->state & CGROUP_FREEZING)) {
freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
static_branch_inc_cpuslocked(&freezer_active);
}
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
return 0;
}
/**
* freezer_css_offline - initiate destruction of a freezer css
* @css: css being destroyed
*
* @css is going away. Mark it dead and decrement system_freezing_count if
* it was holding one.
*/
static void freezer_css_offline(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
cpus_read_lock();
mutex_lock(&freezer_mutex);
if (freezer->state & CGROUP_FREEZING)
static_branch_dec_cpuslocked(&freezer_active);
freezer->state = 0;
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
}
static void freezer_css_free(struct cgroup_subsys_state *css)
{
kfree(css_freezer(css));
}
/*
* Tasks can be migrated into a different freezer anytime regardless of its
* current state. freezer_attach() is responsible for making new tasks
* conform to the current state.
*
* Freezer state changes and task migration are synchronized via
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
static void freezer_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *new_css;
mutex_lock(&freezer_mutex);
/*
* Make the new tasks conform to the current state of @new_css.
* For simplicity, when migrating any task to a FROZEN cgroup, we
* revert it to FREEZING and let update_if_frozen() determine the
* correct state later.
*
* Tasks in @tset are on @new_css but may not conform to its
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
cgroup_taskset_for_each(task, new_css, tset) {
struct freezer *freezer = css_freezer(new_css);
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
freeze_task(task);
/* clear FROZEN and propagate upwards */
while (freezer && (freezer->state & CGROUP_FROZEN)) {
freezer->state &= ~CGROUP_FROZEN;
freezer = parent_freezer(freezer);
}
}
}
mutex_unlock(&freezer_mutex);
}
/**
* freezer_fork - cgroup post fork callback
* @task: a task which has just been forked
*
* @task has just been created and should conform to the current state of
* the cgroup_freezer it belongs to. This function may race against
* freezer_attach(). Losing to freezer_attach() means that we don't have
* to do anything as freezer_attach() will put @task into the appropriate
* state.
*/
static void freezer_fork(struct task_struct *task)
{
struct freezer *freezer;
/*
* The root cgroup is non-freezable, so we can skip locking the
* freezer. This is safe regardless of race with task migration.
* If we didn't race or won, skipping is obviously the right thing
* to do. If we lost and root is the new cgroup, noop is still the
* right thing to do.
*/
if (task_css_is_root(task, freezer_cgrp_id))
return;
mutex_lock(&freezer_mutex);
rcu_read_lock();
freezer = task_freezer(task);
if (freezer->state & CGROUP_FREEZING)
freeze_task(task);
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
}
/**
* update_if_frozen - update whether a cgroup finished freezing
* @css: css of interest
*
* Once FREEZING is initiated, transition to FROZEN is lazily updated by
* calling this function. If the current state is FREEZING but not FROZEN,
* this function checks whether all tasks of this cgroup and the descendant
* cgroups finished freezing and, if so, sets FROZEN.
*
* The caller is responsible for grabbing RCU read lock and calling
* update_if_frozen() on all descendants prior to invoking this function.
*
* Task states and freezer state might disagree while tasks are being
* migrated into or out of @css, so we can't verify task states against
* @freezer state here. See freezer_attach() for details.
*/
static void update_if_frozen(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
struct cgroup_subsys_state *pos;
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZING) ||
(freezer->state & CGROUP_FROZEN))
return;
/* are all (live) children frozen? */
rcu_read_lock();
css_for_each_child(pos, css) {
struct freezer *child = css_freezer(pos);
if ((child->state & CGROUP_FREEZER_ONLINE) &&
!(child->state & CGROUP_FROZEN)) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
/* are all tasks frozen? */
css_task_iter_start(css, 0, &it);
while ((task = css_task_iter_next(&it))) {
if (freezing(task) && !frozen(task))
goto out_iter_end;
}
freezer->state |= CGROUP_FROZEN;
out_iter_end:
css_task_iter_end(&it);
}
static int freezer_read(struct seq_file *m, void *v)
{
struct cgroup_subsys_state *css = seq_css(m), *pos;
mutex_lock(&freezer_mutex);
rcu_read_lock();
/* update states bottom-up */
css_for_each_descendant_post(pos, css) {
if (!css_tryget_online(pos))
continue;
rcu_read_unlock();
update_if_frozen(pos);
rcu_read_lock();
css_put(pos);
}
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
seq_puts(m, freezer_state_strs(css_freezer(css)->state));
seq_putc(m, '\n');
return 0;
}
static void freeze_cgroup(struct freezer *freezer)
{
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&freezer->css, 0, &it);
while ((task = css_task_iter_next(&it)))
freeze_task(task);
css_task_iter_end(&it);
}
static void unfreeze_cgroup(struct freezer *freezer)
{
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&freezer->css, 0, &it);
while ((task = css_task_iter_next(&it)))
__thaw_task(task);
css_task_iter_end(&it);
}
/**
* freezer_apply_state - apply state change to a single cgroup_freezer
* @freezer: freezer to apply state change to
* @freeze: whether to freeze or unfreeze
* @state: CGROUP_FREEZING_* flag to set or clear
*
* Set or clear @state on @cgroup according to @freeze, and perform
* freezing or thawing as necessary.
*/
static void freezer_apply_state(struct freezer *freezer, bool freeze,
unsigned int state)
{
/* also synchronizes against task migration, see freezer_attach() */
lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZER_ONLINE))
return;
if (freeze) {
if (!(freezer->state & CGROUP_FREEZING))
static_branch_inc_cpuslocked(&freezer_active);
freezer->state |= state;
freeze_cgroup(freezer);
} else {
bool was_freezing = freezer->state & CGROUP_FREEZING;
freezer->state &= ~state;
if (!(freezer->state & CGROUP_FREEZING)) {
freezer->state &= ~CGROUP_FROZEN;
if (was_freezing)
static_branch_dec_cpuslocked(&freezer_active);
unfreeze_cgroup(freezer);
}
}
}
/**
* freezer_change_state - change the freezing state of a cgroup_freezer
* @freezer: freezer of interest
* @freeze: whether to freeze or thaw
*
* Freeze or thaw @freezer according to @freeze. The operations are
* recursive - all descendants of @freezer will be affected.
*/
static void freezer_change_state(struct freezer *freezer, bool freeze)
{
struct cgroup_subsys_state *pos;
cpus_read_lock();
/*
* Update all its descendants in pre-order traversal. Each
* descendant will try to inherit its parent's FREEZING state as
* CGROUP_FREEZING_PARENT.
*/
mutex_lock(&freezer_mutex);
rcu_read_lock();
css_for_each_descendant_pre(pos, &freezer->css) {
struct freezer *pos_f = css_freezer(pos);
struct freezer *parent = parent_freezer(pos_f);
if (!css_tryget_online(pos))
continue;
rcu_read_unlock();
if (pos_f == freezer)
freezer_apply_state(pos_f, freeze,
CGROUP_FREEZING_SELF);
else
freezer_apply_state(pos_f,
parent->state & CGROUP_FREEZING,
CGROUP_FREEZING_PARENT);
rcu_read_lock();
css_put(pos);
}
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
}
static ssize_t freezer_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
bool freeze;
buf = strstrip(buf);
if (strcmp(buf, freezer_state_strs(0)) == 0)
freeze = false;
else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0)
freeze = true;
else
return -EINVAL;
freezer_change_state(css_freezer(of_css(of)), freeze);
return nbytes;
}
static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_SELF);
}
static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
}
static struct cftype files[] = {
{
.name = "state",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = freezer_read,
.write = freezer_write,
},
{
.name = "self_freezing",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = freezer_self_freezing_read,
},
{
.name = "parent_freezing",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = freezer_parent_freezing_read,
},
{ } /* terminate */
};
struct cgroup_subsys freezer_cgrp_subsys = {
.css_alloc = freezer_css_alloc,
.css_online = freezer_css_online,
.css_offline = freezer_css_offline,
.css_free = freezer_css_free,
.attach = freezer_attach,
.fork = freezer_fork,
.legacy_cftypes = files,
};
| linux-master | kernel/cgroup/legacy_freezer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Debug controller
*
* WARNING: This controller is for cgroup core debugging only.
* Its interfaces are unstable and subject to changes at any time.
*/
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "cgroup-internal.h"
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
if (!css)
return ERR_PTR(-ENOMEM);
return css;
}
static void debug_css_free(struct cgroup_subsys_state *css)
{
kfree(css);
}
/*
* debug_taskcount_read - return the number of tasks in a cgroup.
* @cgrp: the cgroup in question
*/
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return cgroup_task_count(css->cgroup);
}
static int current_css_set_read(struct seq_file *seq, void *v)
{
struct kernfs_open_file *of = seq->private;
struct css_set *cset;
struct cgroup_subsys *ss;
struct cgroup_subsys_state *css;
int i, refcnt;
if (!cgroup_kn_lock_live(of->kn, false))
return -ENODEV;
spin_lock_irq(&css_set_lock);
rcu_read_lock();
cset = task_css_set(current);
refcnt = refcount_read(&cset->refcount);
seq_printf(seq, "css_set %pK %d", cset, refcnt);
if (refcnt > cset->nr_tasks)
seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
seq_puts(seq, "\n");
/*
* Print the css'es stored in the current css_set.
*/
for_each_subsys(ss, i) {
css = cset->subsys[ss->id];
if (!css)
continue;
seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name,
css, css->id);
}
rcu_read_unlock();
spin_unlock_irq(&css_set_lock);
cgroup_kn_unlock(of->kn);
return 0;
}
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
u64 count;
rcu_read_lock();
count = refcount_read(&task_css_set(current)->refcount);
rcu_read_unlock();
return count;
}
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
{
struct cgrp_cset_link *link;
struct css_set *cset;
char *name_buf;
name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!name_buf)
return -ENOMEM;
spin_lock_irq(&css_set_lock);
rcu_read_lock();
cset = task_css_set(current);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
cgroup_name(c, name_buf, NAME_MAX + 1);
seq_printf(seq, "Root %d group %s\n",
c->root->hierarchy_id, name_buf);
}
rcu_read_unlock();
spin_unlock_irq(&css_set_lock);
kfree(name_buf);
return 0;
}
#define MAX_TASKS_SHOWN_PER_CSS 25
static int cgroup_css_links_read(struct seq_file *seq, void *v)
{
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
int count = 0;
int refcnt = refcount_read(&cset->refcount);
/*
* Print out the proc_cset and threaded_cset relationship
* and highlight difference between refcount and task_count.
*/
seq_printf(seq, "css_set %pK", cset);
if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
threaded_csets++;
seq_printf(seq, "=>%pK", cset->dom_cset);
}
if (!list_empty(&cset->threaded_csets)) {
struct css_set *tcset;
int idx = 0;
list_for_each_entry(tcset, &cset->threaded_csets,
threaded_csets_node) {
seq_puts(seq, idx ? "," : "<=");
seq_printf(seq, "%pK", tcset);
idx++;
}
} else {
seq_printf(seq, " %d", refcnt);
if (refcnt - cset->nr_tasks > 0) {
int extra = refcnt - cset->nr_tasks;
seq_printf(seq, " +%d", extra);
/*
* Take out the one additional reference in
* init_css_set.
*/
if (cset == &init_css_set)
extra--;
extra_refs += extra;
}
}
seq_puts(seq, "\n");
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
seq_printf(seq, " task %d\n",
task_pid_vnr(task));
}
list_for_each_entry(task, &cset->mg_tasks, cg_list) {
if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
seq_printf(seq, " task %d\n",
task_pid_vnr(task));
}
/* show # of overflowed tasks */
if (count > MAX_TASKS_SHOWN_PER_CSS)
seq_printf(seq, " ... (%d)\n",
count - MAX_TASKS_SHOWN_PER_CSS);
if (cset->dead) {
seq_puts(seq, " [dead]\n");
dead_cnt++;
}
WARN_ON(count != cset->nr_tasks);
}
spin_unlock_irq(&css_set_lock);
if (!dead_cnt && !extra_refs && !threaded_csets)
return 0;
seq_puts(seq, "\n");
if (threaded_csets)
seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
if (extra_refs)
seq_printf(seq, "extra references = %d\n", extra_refs);
if (dead_cnt)
seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
return 0;
}
static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
{
struct kernfs_open_file *of = seq->private;
struct cgroup *cgrp;
struct cgroup_subsys *ss;
struct cgroup_subsys_state *css;
char pbuf[16];
int i;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
for_each_subsys(ss, i) {
css = rcu_dereference_check(cgrp->subsys[ss->id], true);
if (!css)
continue;
pbuf[0] = '\0';
/* Show the parent CSS if applicable*/
if (css->parent)
snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
css->parent->id);
seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name,
css, css->id,
atomic_read(&css->online_cnt), pbuf);
}
cgroup_kn_unlock(of->kn);
return 0;
}
static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
u16 mask)
{
struct cgroup_subsys *ss;
int ssid;
bool first = true;
seq_printf(seq, "%-17s: ", name);
for_each_subsys(ss, ssid) {
if (!(mask & (1 << ssid)))
continue;
if (!first)
seq_puts(seq, ", ");
seq_puts(seq, ss->name);
first = false;
}
seq_putc(seq, '\n');
}
static int cgroup_masks_read(struct seq_file *seq, void *v)
{
struct kernfs_open_file *of = seq->private;
struct cgroup *cgrp;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
cgroup_kn_unlock(of->kn);
return 0;
}
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
return (!cgroup_is_populated(css->cgroup) &&
!css_has_online_children(&css->cgroup->self));
}
static struct cftype debug_legacy_files[] = {
{
.name = "taskcount",
.read_u64 = debug_taskcount_read,
},
{
.name = "current_css_set",
.seq_show = current_css_set_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_refcount",
.read_u64 = current_css_set_refcount_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_cg_links",
.seq_show = current_css_set_cg_links_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "cgroup_css_links",
.seq_show = cgroup_css_links_read,
},
{
.name = "cgroup_subsys_states",
.seq_show = cgroup_subsys_states_read,
},
{
.name = "cgroup_masks",
.seq_show = cgroup_masks_read,
},
{
.name = "releasable",
.read_u64 = releasable_read,
},
{ } /* terminate */
};
static struct cftype debug_files[] = {
{
.name = "taskcount",
.read_u64 = debug_taskcount_read,
},
{
.name = "current_css_set",
.seq_show = current_css_set_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_refcount",
.read_u64 = current_css_set_refcount_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "current_css_set_cg_links",
.seq_show = current_css_set_cg_links_read,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "css_links",
.seq_show = cgroup_css_links_read,
},
{
.name = "csses",
.seq_show = cgroup_subsys_states_read,
},
{
.name = "masks",
.seq_show = cgroup_masks_read,
},
{ } /* terminate */
};
struct cgroup_subsys debug_cgrp_subsys = {
.css_alloc = debug_css_alloc,
.css_free = debug_css_free,
.legacy_cftypes = debug_legacy_files,
};
/*
* On v2, debug is an implicit controller enabled by "cgroup_debug" boot
* parameter.
*/
void __init enable_debug_cgroup(void)
{
debug_cgrp_subsys.dfl_cftypes = debug_files;
debug_cgrp_subsys.implicit_on_dfl = true;
debug_cgrp_subsys.threaded = true;
}
| linux-master | kernel/cgroup/debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Process number limiting controller for cgroups.
*
* Used to allow a cgroup hierarchy to stop any new processes from fork()ing
* after a certain limit is reached.
*
* Since it is trivial to hit the task limit without hitting any kmemcg limits
* in place, PIDs are a fundamental resource. As such, PID exhaustion must be
* preventable in the scope of a cgroup hierarchy by allowing resource limiting
* of the number of tasks in a cgroup.
*
* In order to use the `pids` controller, set the maximum number of tasks in
* pids.max (this is not available in the root cgroup for obvious reasons). The
* number of processes currently in the cgroup is given by pids.current.
* Organisational operations are not blocked by cgroup policies, so it is
* possible to have pids.current > pids.max. However, it is not possible to
* violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
* would cause a cgroup policy to be violated.
*
* To set a cgroup to have no limit, set pids.max to "max". This is the default
* for all new cgroups (N.B. that PID limits are hierarchical, so the most
* stringent limit in the hierarchy is followed).
*
* pids.current tracks all child cgroup hierarchies, so parent/pids.current is
* a superset of parent/child/pids.current.
*
* Copyright (C) 2015 Aleksa Sarai <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cgroup.h>
#include <linux/slab.h>
#include <linux/sched/task.h>
#define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
#define PIDS_MAX_STR "max"
struct pids_cgroup {
struct cgroup_subsys_state css;
/*
* Use 64-bit types so that we can safely represent "max" as
* %PIDS_MAX = (%PID_MAX_LIMIT + 1).
*/
atomic64_t counter;
atomic64_t limit;
int64_t watermark;
/* Handle for "pids.events" */
struct cgroup_file events_file;
/* Number of times fork failed because limit was hit. */
atomic64_t events_limit;
};
static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
{
return container_of(css, struct pids_cgroup, css);
}
static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
{
return css_pids(pids->css.parent);
}
static struct cgroup_subsys_state *
pids_css_alloc(struct cgroup_subsys_state *parent)
{
struct pids_cgroup *pids;
pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
if (!pids)
return ERR_PTR(-ENOMEM);
atomic64_set(&pids->counter, 0);
atomic64_set(&pids->limit, PIDS_MAX);
atomic64_set(&pids->events_limit, 0);
return &pids->css;
}
static void pids_css_free(struct cgroup_subsys_state *css)
{
kfree(css_pids(css));
}
static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids)
{
/*
* This is racy, but we don't need perfectly accurate tallying of
* the watermark, and this lets us avoid extra atomic overhead.
*/
if (nr_pids > READ_ONCE(p->watermark))
WRITE_ONCE(p->watermark, nr_pids);
}
/**
* pids_cancel - uncharge the local pid count
* @pids: the pid cgroup state
* @num: the number of pids to cancel
*
* This function will WARN if the pid count goes under 0, because such a case is
* a bug in the pids controller proper.
*/
static void pids_cancel(struct pids_cgroup *pids, int num)
{
/*
* A negative count (or overflow for that matter) is invalid,
* and indicates a bug in the `pids` controller proper.
*/
WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
}
/**
* pids_uncharge - hierarchically uncharge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to uncharge
*/
static void pids_uncharge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
for (p = pids; parent_pids(p); p = parent_pids(p))
pids_cancel(p, num);
}
/**
* pids_charge - hierarchically charge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to charge
*
* This function does *not* follow the pid limit set. It cannot fail and the new
* pid count may exceed the limit. This is only used for reverting failed
* attaches, where there is no other way out than violating the limit.
*/
static void pids_charge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
pids_update_watermark(p, new);
}
}
/**
* pids_try_charge - hierarchically try to charge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to charge
*
* This function follows the set limit. It will fail if the charge would cause
* the new value to exceed the hierarchical limit. Returns 0 if the charge
* succeeded, otherwise -EAGAIN.
*/
static int pids_try_charge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p, *q;
for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
int64_t limit = atomic64_read(&p->limit);
/*
* Since new is capped to the maximum number of pid_t, if
* p->limit is %PIDS_MAX then we know that this test will never
* fail.
*/
if (new > limit)
goto revert;
/*
* Not technically accurate if we go over limit somewhere up
* the hierarchy, but that's tolerable for the watermark.
*/
pids_update_watermark(p, new);
}
return 0;
revert:
for (q = pids; q != p; q = parent_pids(q))
pids_cancel(q, num);
pids_cancel(p, num);
return -EAGAIN;
}
static int pids_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
cgroup_taskset_for_each(task, dst_css, tset) {
struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
/*
* No need to pin @old_css between here and cancel_attach()
* because cgroup core protects it from being freed before
* the migration completes or fails.
*/
old_css = task_css(task, pids_cgrp_id);
old_pids = css_pids(old_css);
pids_charge(pids, 1);
pids_uncharge(old_pids, 1);
}
return 0;
}
static void pids_cancel_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
cgroup_taskset_for_each(task, dst_css, tset) {
struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
old_css = task_css(task, pids_cgrp_id);
old_pids = css_pids(old_css);
pids_charge(old_pids, 1);
pids_uncharge(pids, 1);
}
}
/*
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
* on cgroup_threadgroup_change_begin() held by the copy_process().
*/
static int pids_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
int err;
if (cset)
css = cset->subsys[pids_cgrp_id];
else
css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
err = pids_try_charge(pids, 1);
if (err) {
/* Only log the first time events_limit is incremented. */
if (atomic64_inc_return(&pids->events_limit) == 1) {
pr_info("cgroup: fork rejected by pids controller in ");
pr_cont_cgroup_path(css->cgroup);
pr_cont("\n");
}
cgroup_file_notify(&pids->events_file);
}
return err;
}
static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
if (cset)
css = cset->subsys[pids_cgrp_id];
else
css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
pids_uncharge(pids, 1);
}
static void pids_release(struct task_struct *task)
{
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
pids_uncharge(pids, 1);
}
static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cgroup_subsys_state *css = of_css(of);
struct pids_cgroup *pids = css_pids(css);
int64_t limit;
int err;
buf = strstrip(buf);
if (!strcmp(buf, PIDS_MAX_STR)) {
limit = PIDS_MAX;
goto set_limit;
}
err = kstrtoll(buf, 0, &limit);
if (err)
return err;
if (limit < 0 || limit >= PIDS_MAX)
return -EINVAL;
set_limit:
/*
* Limit updates don't need to be mutex'd, since it isn't
* critical that any racing fork()s follow the new limit.
*/
atomic64_set(&pids->limit, limit);
return nbytes;
}
static int pids_max_show(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);
struct pids_cgroup *pids = css_pids(css);
int64_t limit = atomic64_read(&pids->limit);
if (limit >= PIDS_MAX)
seq_printf(sf, "%s\n", PIDS_MAX_STR);
else
seq_printf(sf, "%lld\n", limit);
return 0;
}
static s64 pids_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct pids_cgroup *pids = css_pids(css);
return atomic64_read(&pids->counter);
}
static s64 pids_peak_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct pids_cgroup *pids = css_pids(css);
return READ_ONCE(pids->watermark);
}
static int pids_events_show(struct seq_file *sf, void *v)
{
struct pids_cgroup *pids = css_pids(seq_css(sf));
seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
return 0;
}
static struct cftype pids_files[] = {
{
.name = "max",
.write = pids_max_write,
.seq_show = pids_max_show,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "current",
.read_s64 = pids_current_read,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "peak",
.flags = CFTYPE_NOT_ON_ROOT,
.read_s64 = pids_peak_read,
},
{
.name = "events",
.seq_show = pids_events_show,
.file_offset = offsetof(struct pids_cgroup, events_file),
.flags = CFTYPE_NOT_ON_ROOT,
},
{ } /* terminate */
};
struct cgroup_subsys pids_cgrp_subsys = {
.css_alloc = pids_css_alloc,
.css_free = pids_css_free,
.can_attach = pids_can_attach,
.cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
.release = pids_release,
.legacy_cftypes = pids_files,
.dfl_cftypes = pids_files,
.threaded = true,
};
| linux-master | kernel/cgroup/pids.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/cgroup.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/signal.h>
#include "cgroup-internal.h"
#include <trace/events/cgroup.h>
/*
* Propagate the cgroup frozen state upwards by the cgroup tree.
*/
static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen)
{
int desc = 1;
/*
* If the new state is frozen, some freezing ancestor cgroups may change
* their state too, depending on if all their descendants are frozen.
*
* Otherwise, all ancestor cgroups are forced into the non-frozen state.
*/
while ((cgrp = cgroup_parent(cgrp))) {
if (frozen) {
cgrp->freezer.nr_frozen_descendants += desc;
if (!test_bit(CGRP_FROZEN, &cgrp->flags) &&
test_bit(CGRP_FREEZE, &cgrp->flags) &&
cgrp->freezer.nr_frozen_descendants ==
cgrp->nr_descendants) {
set_bit(CGRP_FROZEN, &cgrp->flags);
cgroup_file_notify(&cgrp->events_file);
TRACE_CGROUP_PATH(notify_frozen, cgrp, 1);
desc++;
}
} else {
cgrp->freezer.nr_frozen_descendants -= desc;
if (test_bit(CGRP_FROZEN, &cgrp->flags)) {
clear_bit(CGRP_FROZEN, &cgrp->flags);
cgroup_file_notify(&cgrp->events_file);
TRACE_CGROUP_PATH(notify_frozen, cgrp, 0);
desc++;
}
}
}
}
/*
* Revisit the cgroup frozen state.
* Checks if the cgroup is really frozen and perform all state transitions.
*/
void cgroup_update_frozen(struct cgroup *cgrp)
{
bool frozen;
lockdep_assert_held(&css_set_lock);
/*
* If the cgroup has to be frozen (CGRP_FREEZE bit set),
* and all tasks are frozen and/or stopped, let's consider
* the cgroup frozen. Otherwise it's not frozen.
*/
frozen = test_bit(CGRP_FREEZE, &cgrp->flags) &&
cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp);
if (frozen) {
/* Already there? */
if (test_bit(CGRP_FROZEN, &cgrp->flags))
return;
set_bit(CGRP_FROZEN, &cgrp->flags);
} else {
/* Already there? */
if (!test_bit(CGRP_FROZEN, &cgrp->flags))
return;
clear_bit(CGRP_FROZEN, &cgrp->flags);
}
cgroup_file_notify(&cgrp->events_file);
TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen);
/* Update the state of ancestor cgroups. */
cgroup_propagate_frozen(cgrp, frozen);
}
/*
* Increment cgroup's nr_frozen_tasks.
*/
static void cgroup_inc_frozen_cnt(struct cgroup *cgrp)
{
cgrp->freezer.nr_frozen_tasks++;
}
/*
* Decrement cgroup's nr_frozen_tasks.
*/
static void cgroup_dec_frozen_cnt(struct cgroup *cgrp)
{
cgrp->freezer.nr_frozen_tasks--;
WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0);
}
/*
* Enter frozen/stopped state, if not yet there. Update cgroup's counters,
* and revisit the state of the cgroup, if necessary.
*/
void cgroup_enter_frozen(void)
{
struct cgroup *cgrp;
if (current->frozen)
return;
spin_lock_irq(&css_set_lock);
current->frozen = true;
cgrp = task_dfl_cgroup(current);
cgroup_inc_frozen_cnt(cgrp);
cgroup_update_frozen(cgrp);
spin_unlock_irq(&css_set_lock);
}
/*
* Conditionally leave frozen/stopped state. Update cgroup's counters,
* and revisit the state of the cgroup, if necessary.
*
* If always_leave is not set, and the cgroup is freezing,
* we're racing with the cgroup freezing. In this case, we don't
* drop the frozen counter to avoid a transient switch to
* the unfrozen state.
*/
void cgroup_leave_frozen(bool always_leave)
{
struct cgroup *cgrp;
spin_lock_irq(&css_set_lock);
cgrp = task_dfl_cgroup(current);
if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) {
cgroup_dec_frozen_cnt(cgrp);
cgroup_update_frozen(cgrp);
WARN_ON_ONCE(!current->frozen);
current->frozen = false;
} else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) {
spin_lock(¤t->sighand->siglock);
current->jobctl |= JOBCTL_TRAP_FREEZE;
set_thread_flag(TIF_SIGPENDING);
spin_unlock(¤t->sighand->siglock);
}
spin_unlock_irq(&css_set_lock);
}
/*
* Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
* jobctl bit.
*/
static void cgroup_freeze_task(struct task_struct *task, bool freeze)
{
unsigned long flags;
/* If the task is about to die, don't bother with freezing it. */
if (!lock_task_sighand(task, &flags))
return;
if (freeze) {
task->jobctl |= JOBCTL_TRAP_FREEZE;
signal_wake_up(task, false);
} else {
task->jobctl &= ~JOBCTL_TRAP_FREEZE;
wake_up_process(task);
}
unlock_task_sighand(task, &flags);
}
/*
* Freeze or unfreeze all tasks in the given cgroup.
*/
static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
{
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
if (freeze)
set_bit(CGRP_FREEZE, &cgrp->flags);
else
clear_bit(CGRP_FREEZE, &cgrp->flags);
spin_unlock_irq(&css_set_lock);
if (freeze)
TRACE_CGROUP_PATH(freeze, cgrp);
else
TRACE_CGROUP_PATH(unfreeze, cgrp);
css_task_iter_start(&cgrp->self, 0, &it);
while ((task = css_task_iter_next(&it))) {
/*
* Ignore kernel threads here. Freezing cgroups containing
* kthreads isn't supported.
*/
if (task->flags & PF_KTHREAD)
continue;
cgroup_freeze_task(task, freeze);
}
css_task_iter_end(&it);
/*
* Cgroup state should be revisited here to cover empty leaf cgroups
* and cgroups which descendants are already in the desired state.
*/
spin_lock_irq(&css_set_lock);
if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants)
cgroup_update_frozen(cgrp);
spin_unlock_irq(&css_set_lock);
}
/*
* Adjust the task state (freeze or unfreeze) and revisit the state of
* source and destination cgroups.
*/
void cgroup_freezer_migrate_task(struct task_struct *task,
struct cgroup *src, struct cgroup *dst)
{
lockdep_assert_held(&css_set_lock);
/*
* Kernel threads are not supposed to be frozen at all.
*/
if (task->flags & PF_KTHREAD)
return;
/*
* It's not necessary to do changes if both of the src and dst cgroups
* are not freezing and task is not frozen.
*/
if (!test_bit(CGRP_FREEZE, &src->flags) &&
!test_bit(CGRP_FREEZE, &dst->flags) &&
!task->frozen)
return;
/*
* Adjust counters of freezing and frozen tasks.
* Note, that if the task is frozen, but the destination cgroup is not
* frozen, we bump both counters to keep them balanced.
*/
if (task->frozen) {
cgroup_inc_frozen_cnt(dst);
cgroup_dec_frozen_cnt(src);
}
cgroup_update_frozen(dst);
cgroup_update_frozen(src);
/*
* Force the task to the desired state.
*/
cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
}
void cgroup_freeze(struct cgroup *cgrp, bool freeze)
{
struct cgroup_subsys_state *css;
struct cgroup *dsct;
bool applied = false;
lockdep_assert_held(&cgroup_mutex);
/*
* Nothing changed? Just exit.
*/
if (cgrp->freezer.freeze == freeze)
return;
cgrp->freezer.freeze = freeze;
/*
* Propagate changes downwards the cgroup tree.
*/
css_for_each_descendant_pre(css, &cgrp->self) {
dsct = css->cgroup;
if (cgroup_is_dead(dsct))
continue;
if (freeze) {
dsct->freezer.e_freeze++;
/*
* Already frozen because of ancestor's settings?
*/
if (dsct->freezer.e_freeze > 1)
continue;
} else {
dsct->freezer.e_freeze--;
/*
* Still frozen because of ancestor's settings?
*/
if (dsct->freezer.e_freeze > 0)
continue;
WARN_ON_ONCE(dsct->freezer.e_freeze < 0);
}
/*
* Do change actual state: freeze or unfreeze.
*/
cgroup_do_freeze(dsct, freeze);
applied = true;
}
/*
* Even if the actual state hasn't changed, let's notify a user.
* The state can be enforced by an ancestor cgroup: the cgroup
* can already be in the desired state or it can be locked in the
* opposite state, so that the transition will never happen.
* In both cases it's better to notify a user, that there is
* nothing to wait for.
*/
if (!applied) {
TRACE_CGROUP_PATH(notify_frozen, cgrp,
test_bit(CGRP_FROZEN, &cgrp->flags));
cgroup_file_notify(&cgrp->events_file);
}
}
| linux-master | kernel/cgroup/freezer.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "cgroup-internal.h"
#include <linux/sched/cputime.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
static DEFINE_SPINLOCK(cgroup_rstat_lock);
static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
{
return per_cpu_ptr(cgrp->rstat_cpu, cpu);
}
/**
* cgroup_rstat_updated - keep track of updated rstat_cpu
* @cgrp: target cgroup
* @cpu: cpu on which rstat_cpu was updated
*
* @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
/*
* Speculative already-on-list test. This may race leading to
* temporary inaccuracies, which is fine.
*
* Because @parent's updated_children is terminated with @parent
* instead of NULL, we can tell whether @cgrp is on the list by
* testing the next pointer for NULL.
*/
if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
return;
raw_spin_lock_irqsave(cpu_lock, flags);
/* put @cgrp and all ancestors on the corresponding updated lists */
while (true) {
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_rstat_cpu *prstatc;
/*
* Both additions and removals are bottom-up. If a cgroup
* is already in the tree, all ancestors are.
*/
if (rstatc->updated_next)
break;
/* Root has no parent to link it to, but mark it busy */
if (!parent) {
rstatc->updated_next = cgrp;
break;
}
prstatc = cgroup_rstat_cpu(parent, cpu);
rstatc->updated_next = prstatc->updated_children;
prstatc->updated_children = cgrp;
cgrp = parent;
}
raw_spin_unlock_irqrestore(cpu_lock, flags);
}
/**
* cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
* @pos: current position
* @root: root of the tree to traversal
* @cpu: target cpu
*
* Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts
* the traversal and %NULL return indicates the end. During traversal,
* each returned cgroup is unlinked from the tree. Must be called with the
* matching cgroup_rstat_cpu_lock held.
*
* The only ordering guarantee is that, for a parent and a child pair
* covered by a given traversal, if a child is visited, its parent is
* guaranteed to be visited afterwards.
*/
static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
struct cgroup *root, int cpu)
{
struct cgroup_rstat_cpu *rstatc;
struct cgroup *parent;
if (pos == root)
return NULL;
/*
* We're gonna walk down to the first leaf and visit/remove it. We
* can pick whatever unvisited node as the starting point.
*/
if (!pos) {
pos = root;
/* return NULL if this subtree is not on-list */
if (!cgroup_rstat_cpu(pos, cpu)->updated_next)
return NULL;
} else {
pos = cgroup_parent(pos);
}
/* walk down to the first leaf */
while (true) {
rstatc = cgroup_rstat_cpu(pos, cpu);
if (rstatc->updated_children == pos)
break;
pos = rstatc->updated_children;
}
/*
* Unlink @pos from the tree. As the updated_children list is
* singly linked, we have to walk it to find the removal point.
* However, due to the way we traverse, @pos will be the first
* child in most cases. The only exception is @root.
*/
parent = cgroup_parent(pos);
if (parent) {
struct cgroup_rstat_cpu *prstatc;
struct cgroup **nextp;
prstatc = cgroup_rstat_cpu(parent, cpu);
nextp = &prstatc->updated_children;
while (*nextp != pos) {
struct cgroup_rstat_cpu *nrstatc;
nrstatc = cgroup_rstat_cpu(*nextp, cpu);
WARN_ON_ONCE(*nextp == parent);
nextp = &nrstatc->updated_next;
}
*nextp = rstatc->updated_next;
}
rstatc->updated_next = NULL;
return pos;
}
/*
* A hook for bpf stat collectors to attach to and flush their stats.
* Together with providing bpf kfuncs for cgroup_rstat_updated() and
* cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
* collect cgroup stats can integrate with rstat for efficient flushing.
*
* A static noinline declaration here could cause the compiler to optimize away
* the function. A global noinline declaration will keep the definition, but may
* optimize away the callsite. Therefore, __weak is needed to ensure that the
* call is still emitted, by telling the compiler that we don't know what the
* function might eventually be.
*
* __diag_* below are needed to dismiss the missing prototype warning.
*/
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"kfuncs which will be used in BPF programs");
__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
struct cgroup *parent, int cpu)
{
}
__diag_pop();
/* see cgroup_rstat_flush() */
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
{
int cpu;
lockdep_assert_held(&cgroup_rstat_lock);
for_each_possible_cpu(cpu) {
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
unsigned long flags;
/*
* The _irqsave() is needed because cgroup_rstat_lock is
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
* this lock with the _irq() suffix only disables interrupts on
* a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
* interrupts on both configurations. The _irqsave() ensures
* that interrupts are always disabled and later restored.
*/
raw_spin_lock_irqsave(cpu_lock, flags);
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
cgroup_base_stat_flush(pos, cpu);
bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
rcu_read_lock();
list_for_each_entry_rcu(css, &pos->rstat_css_list,
rstat_css_node)
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
raw_spin_unlock_irqrestore(cpu_lock, flags);
/* play nice and yield if necessary */
if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
spin_unlock_irq(&cgroup_rstat_lock);
if (!cond_resched())
cpu_relax();
spin_lock_irq(&cgroup_rstat_lock);
}
}
}
/**
* cgroup_rstat_flush - flush stats in @cgrp's subtree
* @cgrp: target cgroup
*
* Collect all per-cpu stats in @cgrp's subtree into the global counters
* and propagate them upwards. After this function returns, all cgroups in
* the subtree have up-to-date ->stat.
*
* This also gets all cgroups in the subtree including @cgrp off the
* ->updated_children lists.
*
* This function may block.
*/
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
might_sleep();
spin_lock_irq(&cgroup_rstat_lock);
cgroup_rstat_flush_locked(cgrp);
spin_unlock_irq(&cgroup_rstat_lock);
}
/**
* cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
* @cgrp: target cgroup
*
* Flush stats in @cgrp's subtree and prevent further flushes. Must be
* paired with cgroup_rstat_flush_release().
*
* This function may block.
*/
void cgroup_rstat_flush_hold(struct cgroup *cgrp)
__acquires(&cgroup_rstat_lock)
{
might_sleep();
spin_lock_irq(&cgroup_rstat_lock);
cgroup_rstat_flush_locked(cgrp);
}
/**
* cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
*/
void cgroup_rstat_flush_release(void)
__releases(&cgroup_rstat_lock)
{
spin_unlock_irq(&cgroup_rstat_lock);
}
int cgroup_rstat_init(struct cgroup *cgrp)
{
int cpu;
/* the root cgrp has rstat_cpu preallocated */
if (!cgrp->rstat_cpu) {
cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
if (!cgrp->rstat_cpu)
return -ENOMEM;
}
/* ->updated_children list is self terminated */
for_each_possible_cpu(cpu) {
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
rstatc->updated_children = cgrp;
u64_stats_init(&rstatc->bsync);
}
return 0;
}
void cgroup_rstat_exit(struct cgroup *cgrp)
{
int cpu;
cgroup_rstat_flush(cgrp);
/* sanity check */
for_each_possible_cpu(cpu) {
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
WARN_ON_ONCE(rstatc->updated_next))
return;
}
free_percpu(cgrp->rstat_cpu);
cgrp->rstat_cpu = NULL;
}
void __init cgroup_rstat_boot(void)
{
int cpu;
for_each_possible_cpu(cpu)
raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
}
/*
* Functions for cgroup basic resource statistics implemented on top of
* rstat.
*/
static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
struct cgroup_base_stat *src_bstat)
{
dst_bstat->cputime.utime += src_bstat->cputime.utime;
dst_bstat->cputime.stime += src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
#endif
}
static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
struct cgroup_base_stat *src_bstat)
{
dst_bstat->cputime.utime -= src_bstat->cputime.utime;
dst_bstat->cputime.stime -= src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
#endif
}
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
{
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_rstat_cpu *prstatc;
struct cgroup_base_stat delta;
unsigned seq;
/* Root-level stats are sourced from system-wide CPU stats */
if (!parent)
return;
/* fetch the current per-cpu values */
do {
seq = __u64_stats_fetch_begin(&rstatc->bsync);
delta = rstatc->bstat;
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
/* propagate per-cpu delta to cgroup and per-cpu global statistics */
cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
cgroup_base_stat_add(&cgrp->bstat, &delta);
cgroup_base_stat_add(&rstatc->last_bstat, &delta);
cgroup_base_stat_add(&rstatc->subtree_bstat, &delta);
/* propagate cgroup and per-cpu global delta to parent (unless that's root) */
if (cgroup_parent(parent)) {
delta = cgrp->bstat;
cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
cgroup_base_stat_add(&parent->bstat, &delta);
cgroup_base_stat_add(&cgrp->last_bstat, &delta);
delta = rstatc->subtree_bstat;
prstatc = cgroup_rstat_cpu(parent, cpu);
cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat);
cgroup_base_stat_add(&prstatc->subtree_bstat, &delta);
cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta);
}
}
static struct cgroup_rstat_cpu *
cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
{
struct cgroup_rstat_cpu *rstatc;
rstatc = get_cpu_ptr(cgrp->rstat_cpu);
*flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
return rstatc;
}
static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
struct cgroup_rstat_cpu *rstatc,
unsigned long flags)
{
u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
cgroup_rstat_updated(cgrp, smp_processor_id());
put_cpu_ptr(rstatc);
}
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
{
struct cgroup_rstat_cpu *rstatc;
unsigned long flags;
rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
}
void __cgroup_account_cputime_field(struct cgroup *cgrp,
enum cpu_usage_stat index, u64 delta_exec)
{
struct cgroup_rstat_cpu *rstatc;
unsigned long flags;
rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
switch (index) {
case CPUTIME_USER:
case CPUTIME_NICE:
rstatc->bstat.cputime.utime += delta_exec;
break;
case CPUTIME_SYSTEM:
case CPUTIME_IRQ:
case CPUTIME_SOFTIRQ:
rstatc->bstat.cputime.stime += delta_exec;
break;
#ifdef CONFIG_SCHED_CORE
case CPUTIME_FORCEIDLE:
rstatc->bstat.forceidle_sum += delta_exec;
break;
#endif
default:
break;
}
cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
}
/*
* compute the cputime for the root cgroup by getting the per cpu data
* at a global level, then categorizing the fields in a manner consistent
* with how it is done by __cgroup_account_cputime_field for each bit of
* cpu time attributed to a cgroup.
*/
static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
{
struct task_cputime *cputime = &bstat->cputime;
int i;
memset(bstat, 0, sizeof(*bstat));
for_each_possible_cpu(i) {
struct kernel_cpustat kcpustat;
u64 *cpustat = kcpustat.cpustat;
u64 user = 0;
u64 sys = 0;
kcpustat_cpu_fetch(&kcpustat, i);
user += cpustat[CPUTIME_USER];
user += cpustat[CPUTIME_NICE];
cputime->utime += user;
sys += cpustat[CPUTIME_SYSTEM];
sys += cpustat[CPUTIME_IRQ];
sys += cpustat[CPUTIME_SOFTIRQ];
cputime->stime += sys;
cputime->sum_exec_runtime += user;
cputime->sum_exec_runtime += sys;
cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
#ifdef CONFIG_SCHED_CORE
bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
#endif
}
}
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
struct cgroup_base_stat bstat;
#ifdef CONFIG_SCHED_CORE
u64 forceidle_time;
#endif
if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
#ifdef CONFIG_SCHED_CORE
forceidle_time = cgrp->bstat.forceidle_sum;
#endif
cgroup_rstat_flush_release();
} else {
root_cgroup_cputime(&bstat);
usage = bstat.cputime.sum_exec_runtime;
utime = bstat.cputime.utime;
stime = bstat.cputime.stime;
#ifdef CONFIG_SCHED_CORE
forceidle_time = bstat.forceidle_sum;
#endif
}
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
do_div(stime, NSEC_PER_USEC);
#ifdef CONFIG_SCHED_CORE
do_div(forceidle_time, NSEC_PER_USEC);
#endif
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n",
usage, utime, stime);
#ifdef CONFIG_SCHED_CORE
seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
#endif
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
BTF_SET8_START(bpf_rstat_kfunc_ids)
BTF_ID_FLAGS(func, cgroup_rstat_updated)
BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
BTF_SET8_END(bpf_rstat_kfunc_ids)
static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_rstat_kfunc_ids,
};
static int __init bpf_rstat_kfunc_init(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
&bpf_rstat_kfunc_set);
}
late_initcall(bpf_rstat_kfunc_init);
| linux-master | kernel/cgroup/rstat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Miscellaneous cgroup controller
*
* Copyright 2020 Google LLC
* Author: Vipin Sharma <[email protected]>
*/
#include <linux/limits.h>
#include <linux/cgroup.h>
#include <linux/errno.h>
#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/misc_cgroup.h>
#define MAX_STR "max"
#define MAX_NUM U64_MAX
/* Miscellaneous res name, keep it in sync with enum misc_res_type */
static const char *const misc_res_name[] = {
#ifdef CONFIG_KVM_AMD_SEV
/* AMD SEV ASIDs resource */
"sev",
/* AMD SEV-ES ASIDs resource */
"sev_es",
#endif
};
/* Root misc cgroup */
static struct misc_cg root_cg;
/*
* Miscellaneous resources capacity for the entire machine. 0 capacity means
* resource is not initialized or not present in the host.
*
* root_cg.max and capacity are independent of each other. root_cg.max can be
* more than the actual capacity. We are using Limits resource distribution
* model of cgroup for miscellaneous controller.
*/
static u64 misc_res_capacity[MISC_CG_RES_TYPES];
/**
* parent_misc() - Get the parent of the passed misc cgroup.
* @cgroup: cgroup whose parent needs to be fetched.
*
* Context: Any context.
* Return:
* * struct misc_cg* - Parent of the @cgroup.
* * %NULL - If @cgroup is null or the passed cgroup does not have a parent.
*/
static struct misc_cg *parent_misc(struct misc_cg *cgroup)
{
return cgroup ? css_misc(cgroup->css.parent) : NULL;
}
/**
* valid_type() - Check if @type is valid or not.
* @type: misc res type.
*
* Context: Any context.
* Return:
* * true - If valid type.
* * false - If not valid type.
*/
static inline bool valid_type(enum misc_res_type type)
{
return type >= 0 && type < MISC_CG_RES_TYPES;
}
/**
* misc_cg_res_total_usage() - Get the current total usage of the resource.
* @type: misc res type.
*
* Context: Any context.
* Return: Current total usage of the resource.
*/
u64 misc_cg_res_total_usage(enum misc_res_type type)
{
if (valid_type(type))
return atomic64_read(&root_cg.res[type].usage);
return 0;
}
EXPORT_SYMBOL_GPL(misc_cg_res_total_usage);
/**
* misc_cg_set_capacity() - Set the capacity of the misc cgroup res.
* @type: Type of the misc res.
* @capacity: Supported capacity of the misc res on the host.
*
* If capacity is 0 then the charging a misc cgroup fails for that type.
*
* Context: Any context.
* Return:
* * %0 - Successfully registered the capacity.
* * %-EINVAL - If @type is invalid.
*/
int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
if (!valid_type(type))
return -EINVAL;
WRITE_ONCE(misc_res_capacity[type], capacity);
return 0;
}
EXPORT_SYMBOL_GPL(misc_cg_set_capacity);
/**
* misc_cg_cancel_charge() - Cancel the charge from the misc cgroup.
* @type: Misc res type in misc cg to cancel the charge from.
* @cg: Misc cgroup to cancel charge from.
* @amount: Amount to cancel.
*
* Context: Any context.
*/
static void misc_cg_cancel_charge(enum misc_res_type type, struct misc_cg *cg,
u64 amount)
{
WARN_ONCE(atomic64_add_negative(-amount, &cg->res[type].usage),
"misc cgroup resource %s became less than 0",
misc_res_name[type]);
}
/**
* misc_cg_try_charge() - Try charging the misc cgroup.
* @type: Misc res type to charge.
* @cg: Misc cgroup which will be charged.
* @amount: Amount to charge.
*
* Charge @amount to the misc cgroup. Caller must use the same cgroup during
* the uncharge call.
*
* Context: Any context.
* Return:
* * %0 - If successfully charged.
* * -EINVAL - If @type is invalid or misc res has 0 capacity.
* * -EBUSY - If max limit will be crossed or total usage will be more than the
* capacity.
*/
int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount)
{
struct misc_cg *i, *j;
int ret;
struct misc_res *res;
u64 new_usage;
if (!(valid_type(type) && cg && READ_ONCE(misc_res_capacity[type])))
return -EINVAL;
if (!amount)
return 0;
for (i = cg; i; i = parent_misc(i)) {
res = &i->res[type];
new_usage = atomic64_add_return(amount, &res->usage);
if (new_usage > READ_ONCE(res->max) ||
new_usage > READ_ONCE(misc_res_capacity[type])) {
ret = -EBUSY;
goto err_charge;
}
}
return 0;
err_charge:
for (j = i; j; j = parent_misc(j)) {
atomic64_inc(&j->res[type].events);
cgroup_file_notify(&j->events_file);
}
for (j = cg; j != i; j = parent_misc(j))
misc_cg_cancel_charge(type, j, amount);
misc_cg_cancel_charge(type, i, amount);
return ret;
}
EXPORT_SYMBOL_GPL(misc_cg_try_charge);
/**
* misc_cg_uncharge() - Uncharge the misc cgroup.
* @type: Misc res type which was charged.
* @cg: Misc cgroup which will be uncharged.
* @amount: Charged amount.
*
* Context: Any context.
*/
void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount)
{
struct misc_cg *i;
if (!(amount && valid_type(type) && cg))
return;
for (i = cg; i; i = parent_misc(i))
misc_cg_cancel_charge(type, i, amount);
}
EXPORT_SYMBOL_GPL(misc_cg_uncharge);
/**
* misc_cg_max_show() - Show the misc cgroup max limit.
* @sf: Interface file
* @v: Arguments passed
*
* Context: Any context.
* Return: 0 to denote successful print.
*/
static int misc_cg_max_show(struct seq_file *sf, void *v)
{
int i;
struct misc_cg *cg = css_misc(seq_css(sf));
u64 max;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
if (READ_ONCE(misc_res_capacity[i])) {
max = READ_ONCE(cg->res[i].max);
if (max == MAX_NUM)
seq_printf(sf, "%s max\n", misc_res_name[i]);
else
seq_printf(sf, "%s %llu\n", misc_res_name[i],
max);
}
}
return 0;
}
/**
* misc_cg_max_write() - Update the maximum limit of the cgroup.
* @of: Handler for the file.
* @buf: Data from the user. It should be either "max", 0, or a positive
* integer.
* @nbytes: Number of bytes of the data.
* @off: Offset in the file.
*
* User can pass data like:
* echo sev 23 > misc.max, OR
* echo sev max > misc.max
*
* Context: Any context.
* Return:
* * >= 0 - Number of bytes processed in the input.
* * -EINVAL - If buf is not valid.
* * -ERANGE - If number is bigger than the u64 capacity.
*/
static ssize_t misc_cg_max_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct misc_cg *cg;
u64 max;
int ret = 0, i;
enum misc_res_type type = MISC_CG_RES_TYPES;
char *token;
buf = strstrip(buf);
token = strsep(&buf, " ");
if (!token || !buf)
return -EINVAL;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
if (!strcmp(misc_res_name[i], token)) {
type = i;
break;
}
}
if (type == MISC_CG_RES_TYPES)
return -EINVAL;
if (!strcmp(MAX_STR, buf)) {
max = MAX_NUM;
} else {
ret = kstrtou64(buf, 0, &max);
if (ret)
return ret;
}
cg = css_misc(of_css(of));
if (READ_ONCE(misc_res_capacity[type]))
WRITE_ONCE(cg->res[type].max, max);
else
ret = -EINVAL;
return ret ? ret : nbytes;
}
/**
* misc_cg_current_show() - Show the current usage of the misc cgroup.
* @sf: Interface file
* @v: Arguments passed
*
* Context: Any context.
* Return: 0 to denote successful print.
*/
static int misc_cg_current_show(struct seq_file *sf, void *v)
{
int i;
u64 usage;
struct misc_cg *cg = css_misc(seq_css(sf));
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
usage = atomic64_read(&cg->res[i].usage);
if (READ_ONCE(misc_res_capacity[i]) || usage)
seq_printf(sf, "%s %llu\n", misc_res_name[i], usage);
}
return 0;
}
/**
* misc_cg_capacity_show() - Show the total capacity of misc res on the host.
* @sf: Interface file
* @v: Arguments passed
*
* Only present in the root cgroup directory.
*
* Context: Any context.
* Return: 0 to denote successful print.
*/
static int misc_cg_capacity_show(struct seq_file *sf, void *v)
{
int i;
u64 cap;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
cap = READ_ONCE(misc_res_capacity[i]);
if (cap)
seq_printf(sf, "%s %llu\n", misc_res_name[i], cap);
}
return 0;
}
static int misc_events_show(struct seq_file *sf, void *v)
{
struct misc_cg *cg = css_misc(seq_css(sf));
u64 events;
int i;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
events = atomic64_read(&cg->res[i].events);
if (READ_ONCE(misc_res_capacity[i]) || events)
seq_printf(sf, "%s.max %llu\n", misc_res_name[i], events);
}
return 0;
}
/* Misc cgroup interface files */
static struct cftype misc_cg_files[] = {
{
.name = "max",
.write = misc_cg_max_write,
.seq_show = misc_cg_max_show,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "current",
.seq_show = misc_cg_current_show,
},
{
.name = "capacity",
.seq_show = misc_cg_capacity_show,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{
.name = "events",
.flags = CFTYPE_NOT_ON_ROOT,
.file_offset = offsetof(struct misc_cg, events_file),
.seq_show = misc_events_show,
},
{}
};
/**
* misc_cg_alloc() - Allocate misc cgroup.
* @parent_css: Parent cgroup.
*
* Context: Process context.
* Return:
* * struct cgroup_subsys_state* - css of the allocated cgroup.
* * ERR_PTR(-ENOMEM) - No memory available to allocate.
*/
static struct cgroup_subsys_state *
misc_cg_alloc(struct cgroup_subsys_state *parent_css)
{
enum misc_res_type i;
struct misc_cg *cg;
if (!parent_css) {
cg = &root_cg;
} else {
cg = kzalloc(sizeof(*cg), GFP_KERNEL);
if (!cg)
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
WRITE_ONCE(cg->res[i].max, MAX_NUM);
atomic64_set(&cg->res[i].usage, 0);
}
return &cg->css;
}
/**
* misc_cg_free() - Free the misc cgroup.
* @css: cgroup subsys object.
*
* Context: Any context.
*/
static void misc_cg_free(struct cgroup_subsys_state *css)
{
kfree(css_misc(css));
}
/* Cgroup controller callbacks */
struct cgroup_subsys misc_cgrp_subsys = {
.css_alloc = misc_cg_alloc,
.css_free = misc_cg_free,
.legacy_cftypes = misc_cg_files,
.dfl_cftypes = misc_cg_files,
};
| linux-master | kernel/cgroup/misc.c |
Subsets and Splits